]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
tcp: annotate data-races around tp->delivered and tp->delivered_ce
authorEric Dumazet <edumazet@google.com>
Thu, 16 Apr 2026 20:03:10 +0000 (20:03 +0000)
committerJakub Kicinski <kuba@kernel.org>
Sat, 18 Apr 2026 18:10:12 +0000 (11:10 -0700)
tcp_get_timestamping_opt_stats() intentionally runs lockless, we must
add READ_ONCE() and WRITE_ONCE() annotations to keep KCSAN happy.

Fixes: feb5f2ec6464 ("tcp: export packets delivery info")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20260416200319.3608680-6-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/tcp_ecn.h
net/ipv4/tcp.c
net/ipv4/tcp_input.c

index e9a933641636e11902a84a595672cd56a551f305..865d5c5a7718dbc7d6db1963261889fd44625bdc 100644 (file)
@@ -181,7 +181,7 @@ static inline void tcp_accecn_third_ack(struct sock *sk,
                    tcp_accecn_validate_syn_feedback(sk, ace, sent_ect)) {
                        if ((tcp_accecn_extract_syn_ect(ace) == INET_ECN_CE) &&
                            !tp->delivered_ce)
-                               tp->delivered_ce++;
+                               WRITE_ONCE(tp->delivered_ce, 1);
                }
                break;
        }
index 802a9ea05211f8eab30b6f937a459a270476974d..0aabd02d44967dae3e569702f76037beb45e5de8 100644 (file)
@@ -4453,8 +4453,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
                   READ_ONCE(inet_csk(sk)->icsk_retransmits));
        nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, data_race(!!tp->rate_app_limited));
        nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, READ_ONCE(tp->snd_ssthresh));
-       nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered);
-       nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce);
+       nla_put_u32(stats, TCP_NLA_DELIVERED, READ_ONCE(tp->delivered));
+       nla_put_u32(stats, TCP_NLA_DELIVERED_CE, READ_ONCE(tp->delivered_ce));
 
        nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una);
        nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state);
index c6361447535f0a2b72eccb6fede4618471e38ae5..63ff89210a72fbf5710279c41010d3f6e734e522 100644 (file)
@@ -476,14 +476,14 @@ static bool tcp_accecn_process_option(struct tcp_sock *tp,
 
 static void tcp_count_delivered_ce(struct tcp_sock *tp, u32 ecn_count)
 {
-       tp->delivered_ce += ecn_count;
+       WRITE_ONCE(tp->delivered_ce, tp->delivered_ce + ecn_count);
 }
 
 /* Updates the delivered and delivered_ce counts */
 static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
                                bool ece_ack)
 {
-       tp->delivered += delivered;
+       WRITE_ONCE(tp->delivered, tp->delivered + delivered);
        if (tcp_ecn_mode_rfc3168(tp) && ece_ack)
                tcp_count_delivered_ce(tp, delivered);
 }
@@ -6779,7 +6779,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
                /* SYN-data is counted as two separate packets in tcp_ack() */
                if (tp->delivered > 1)
-                       --tp->delivered;
+                       WRITE_ONCE(tp->delivered, tp->delivered - 1);
        }
 
        tcp_fastopen_add_skb(sk, synack);
@@ -7212,7 +7212,7 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
        SKB_DR_SET(reason, NOT_SPECIFIED);
        switch (sk->sk_state) {
        case TCP_SYN_RECV:
-               tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */
+               WRITE_ONCE(tp->delivered, tp->delivered + 1); /* SYN-ACK delivery isn't tracked in tcp_ack */
                if (!tp->srtt_us)
                        tcp_synack_rtt_meas(sk, req);