]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
tcp: add data-races annotations around tp->reordering, tp->snd_cwnd
authorEric Dumazet <edumazet@google.com>
Thu, 16 Apr 2026 20:03:08 +0000 (20:03 +0000)
committerJakub Kicinski <kuba@kernel.org>
Sat, 18 Apr 2026 18:10:12 +0000 (11:10 -0700)
tcp_get_timestamping_opt_stats() intentionally runs lockless, we must
add READ_ONCE(), WRITE_ONCE() data_race() annotations to keep KCSAN happy.

Fixes: bb7c19f96012 ("tcp: add related fields into SCM_TIMESTAMPING_OPT_STATS")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20260416200319.3608680-4-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/tcp.h
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_metrics.c

index 674af493882c802ebe03e0cac6e40b7c704aa0de..ecbadcb3a7446cb18c245e670ba49ff574dfaff7 100644 (file)
@@ -1513,7 +1513,7 @@ static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
 static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
 {
        WARN_ON_ONCE((int)val <= 0);
-       tp->snd_cwnd = val;
+       WRITE_ONCE(tp->snd_cwnd, val);
 }
 
 static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
index e39e0734d958f39aa83a33f5c608ce3b94232fb1..24ba80d244b1fb69102b587b568cebe7b78dff9d 100644 (file)
@@ -4445,13 +4445,13 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
        rate64 = tcp_compute_delivery_rate(tp);
        nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
 
-       nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp));
-       nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
-       nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
+       nla_put_u32(stats, TCP_NLA_SND_CWND, READ_ONCE(tp->snd_cwnd));
+       nla_put_u32(stats, TCP_NLA_REORDERING, READ_ONCE(tp->reordering));
+       nla_put_u32(stats, TCP_NLA_MIN_RTT, data_race(tcp_min_rtt(tp)));
 
        nla_put_u8(stats, TCP_NLA_RECUR_RETRANS,
                   READ_ONCE(inet_csk(sk)->icsk_retransmits));
-       nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited);
+       nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, data_race(!!tp->rate_app_limited));
        nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh);
        nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered);
        nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce);
index 021f745747c59d8b9e200c5954af7807a4d08866..6bb6bf049a35ac91fd53e3e66691f64fc4c93648 100644 (file)
@@ -1293,8 +1293,9 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
                         tp->sacked_out,
                         tp->undo_marker ? tp->undo_retrans : 0);
 #endif
-               tp->reordering = min_t(u32, (metric + mss - 1) / mss,
-                                      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
+               WRITE_ONCE(tp->reordering,
+                          min_t(u32, (metric + mss - 1) / mss,
+                                READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)));
        }
 
        /* This exciting event is worth to be remembered. 8) */
@@ -2439,8 +2440,9 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
        if (!tcp_limit_reno_sacked(tp))
                return;
 
-       tp->reordering = min_t(u32, tp->packets_out + addend,
-                              READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
+       WRITE_ONCE(tp->reordering,
+                  min_t(u32, tp->packets_out + addend,
+                        READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)));
        tp->reord_seen++;
        NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER);
 }
@@ -2579,8 +2581,8 @@ void tcp_enter_loss(struct sock *sk)
        reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering);
        if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
            tp->sacked_out >= reordering)
-               tp->reordering = min_t(unsigned int, tp->reordering,
-                                      reordering);
+               WRITE_ONCE(tp->reordering,
+                          min_t(unsigned int, tp->reordering, reordering));
 
        tcp_set_ca_state(sk, TCP_CA_Loss);
        tp->high_seq = tp->snd_nxt;
index 06b1d5d3b6df7b8fa3fc631b8662160c8729a9df..7a9d6d9006f651e91054d3369b47758a6c35253b 100644 (file)
@@ -496,7 +496,7 @@ void tcp_init_metrics(struct sock *sk)
        }
        val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
        if (val && tp->reordering != val)
-               tp->reordering = val;
+               WRITE_ONCE(tp->reordering, val);
 
        crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
        rcu_read_unlock();