]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
tcp: add a @pace_delay parameter to tcp_reset_xmit_timer()
authorEric Dumazet <edumazet@google.com>
Fri, 7 Feb 2025 15:28:27 +0000 (15:28 +0000)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 11 Feb 2025 12:07:59 +0000 (13:07 +0100)
We want to factorize calls to inet_csk_reset_xmit_timer(),
to ease TCP_RTO_MAX change.

Current users want to add tcp_pacing_delay(sk)
to the timeout.

Remaining calls to inet_csk_reset_xmit_timer()
do not add the pacing delay. Following patch
will convert them, passing false for @pace_delay.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Jason Xing <kerneljasonxing@gmail.com>
Reviewed-by: Neal Cardwell <ncardwell@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
include/net/tcp.h
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c

index 8678a2d37ef8d2108c1f9288937b4e7bf0f32055..56557b0104e3f0984125532332a73e067ef1a118 100644 (file)
@@ -1423,10 +1423,12 @@ static inline unsigned long tcp_pacing_delay(const struct sock *sk)
 
 static inline void tcp_reset_xmit_timer(struct sock *sk,
                                        const int what,
-                                       unsigned long when)
+                                       unsigned long when,
+                                       bool pace_delay)
 {
-       inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
-                                 TCP_RTO_MAX);
+       if (pace_delay)
+               when += tcp_pacing_delay(sk);
+       inet_csk_reset_xmit_timer(sk, what, when, TCP_RTO_MAX);
 }
 
 /* Something is really bad, we could not queue an additional packet,
@@ -1455,7 +1457,7 @@ static inline void tcp_check_probe_timer(struct sock *sk)
 {
        if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
                tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
-                                    tcp_probe0_base(sk));
+                                    tcp_probe0_base(sk), true);
 }
 
 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
index 1a3ce47b8c750cfd6e921b89e4b0866f20e06489..5c9ed7657c9f0242002d6e91881af081e8c8eec7 100644 (file)
@@ -3282,7 +3282,7 @@ void tcp_rearm_rto(struct sock *sk)
                         */
                        rto = usecs_to_jiffies(max_t(int, delta_us, 1));
                }
-               tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto);
+               tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, true);
        }
 }
 
@@ -3562,7 +3562,7 @@ static void tcp_ack_probe(struct sock *sk)
                unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
 
                when = tcp_clamp_probe0_to_user_timeout(sk, when);
-               tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when);
+               tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, true);
        }
 }
 
index 093476fb2b2e2266e6047418a72815066cb63067..27438ca2d5e68434c7a36d3c3a1ed6578c4e7d27 100644 (file)
@@ -2910,7 +2910,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
        if (rto_delta_us > 0)
                timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
 
-       tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout);
+       tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, true);
        return true;
 }
 
@@ -3544,7 +3544,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
        }
        if (rearm_timer)
                tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
-                                    inet_csk(sk)->icsk_rto);
+                                    inet_csk(sk)->icsk_rto, true);
 }
 
 /* We allow to exceed memory limits for FIN packets to expedite
@@ -4400,7 +4400,7 @@ void tcp_send_probe0(struct sock *sk)
        }
 
        timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
-       tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout);
+       tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, true);
 }
 
 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)