]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
tcp: accecn: AccECN needs to know delivered bytes
authorIlpo Järvinen <ij@kernel.org>
Tue, 16 Sep 2025 08:24:28 +0000 (10:24 +0200)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 18 Sep 2025 06:47:52 +0000 (08:47 +0200)
AccECN byte counter estimation requires delivered bytes
which can be calculated while processing SACK blocks and
cumulative ACK. The delivered bytes will be used to estimate
the byte counters between AccECN option (on ACKs w/o the
option).

Accurate ECN does not depend on SACK to function; however,
the calculation would be more accurate if SACK were there.

Signed-off-by: Ilpo Järvinen <ij@kernel.org>
Signed-off-by: Chia-Yu Chang <chia-yu.chang@nokia-bell-labs.com>
Acked-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20250916082434.100722-5-chia-yu.chang@nokia-bell-labs.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
net/ipv4/tcp_input.c

index 636a63383412ac1780f4d8d78673e0b52e3e8b9b..8b48a3c009456bc4d8011afba1d6a7254b054126 100644 (file)
@@ -1050,6 +1050,7 @@ struct tcp_sacktag_state {
        u64     last_sackt;
        u32     reord;
        u32     sack_delivered;
+       u32     delivered_bytes;
        int     flag;
        unsigned int mss_now;
        struct rate_sample *rate;
@@ -1411,7 +1412,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
 static u8 tcp_sacktag_one(struct sock *sk,
                          struct tcp_sacktag_state *state, u8 sacked,
                          u32 start_seq, u32 end_seq,
-                         int dup_sack, int pcount,
+                         int dup_sack, int pcount, u32 plen,
                          u64 xmit_time)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -1471,6 +1472,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
                tp->sacked_out += pcount;
                /* Out-of-order packets delivered */
                state->sack_delivered += pcount;
+               state->delivered_bytes += plen;
        }
 
        /* D-SACK. We can detect redundant retransmission in S|R and plain R
@@ -1507,7 +1509,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
         * tcp_highest_sack_seq() when skb is highest_sack.
         */
        tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
-                       start_seq, end_seq, dup_sack, pcount,
+                       start_seq, end_seq, dup_sack, pcount, skb->len,
                        tcp_skb_timestamp_us(skb));
        tcp_rate_skb_delivered(sk, skb, state->rate);
 
@@ -1792,6 +1794,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                                                TCP_SKB_CB(skb)->end_seq,
                                                dup_sack,
                                                tcp_skb_pcount(skb),
+                                               skb->len,
                                                tcp_skb_timestamp_us(skb));
                        tcp_rate_skb_delivered(sk, skb, state->rate);
                        if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
@@ -3300,6 +3303,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
 
                if (sacked & TCPCB_SACKED_ACKED) {
                        tp->sacked_out -= acked_pcount;
+                       /* snd_una delta covers these skbs */
+                       sack->delivered_bytes -= skb->len;
                } else if (tcp_is_sack(tp)) {
                        tcp_count_delivered(tp, acked_pcount, ece_ack);
                        if (!tcp_skb_spurious_retrans(tp, skb))
@@ -3396,6 +3401,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
                        if (before(reord, prior_fack))
                                tcp_check_sack_reordering(sk, reord, 0);
                }
+
+               sack->delivered_bytes = (skb ?
+                                        TCP_SKB_CB(skb)->seq : tp->snd_una) -
+                                        prior_snd_una;
        } else if (skb && rtt_update && sack_rtt_us >= 0 &&
                   sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
                                                    tcp_skb_timestamp_us(skb))) {
@@ -3858,6 +3867,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        sack_state.first_sackt = 0;
        sack_state.rate = &rs;
        sack_state.sack_delivered = 0;
+       sack_state.delivered_bytes = 0;
 
        /* We very likely will need to access rtx queue. */
        prefetch(sk->tcp_rtx_queue.rb_node);