]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
tcp: remove tcp_reset_xmit_timer() @max_when argument
authorEric Dumazet <edumazet@google.com>
Fri, 7 Feb 2025 15:28:26 +0000 (15:28 +0000)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 11 Feb 2025 12:07:59 +0000 (13:07 +0100)
All callers use TCP_RTO_MAX, we can factorize this constant,
becoming a variable soon.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Jason Xing <kerneljasonxing@gmail.com>
Reviewed-by: Neal Cardwell <ncardwell@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
include/net/tcp.h
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c

index bb7edf0e72aa077ed4de02c6e7cd7048976d8a1e..8678a2d37ef8d2108c1f9288937b4e7bf0f32055 100644 (file)
@@ -1423,11 +1423,10 @@ static inline unsigned long tcp_pacing_delay(const struct sock *sk)
 
 static inline void tcp_reset_xmit_timer(struct sock *sk,
                                        const int what,
-                                       unsigned long when,
-                                       const unsigned long max_when)
+                                       unsigned long when)
 {
        inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
-                                 max_when);
+                                 TCP_RTO_MAX);
 }
 
 /* Something is really bad, we could not queue an additional packet,
@@ -1456,7 +1455,7 @@ static inline void tcp_check_probe_timer(struct sock *sk)
 {
        if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
                tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
-                                    tcp_probe0_base(sk), TCP_RTO_MAX);
+                                    tcp_probe0_base(sk));
 }
 
 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
index 286f15e4994a96ceae9386e76c127e76caf79220..1a3ce47b8c750cfd6e921b89e4b0866f20e06489 100644 (file)
@@ -3282,8 +3282,7 @@ void tcp_rearm_rto(struct sock *sk)
                         */
                        rto = usecs_to_jiffies(max_t(int, delta_us, 1));
                }
-               tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
-                                    TCP_RTO_MAX);
+               tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto);
        }
 }
 
@@ -3563,7 +3562,7 @@ static void tcp_ack_probe(struct sock *sk)
                unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
 
                when = tcp_clamp_probe0_to_user_timeout(sk, when);
-               tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX);
+               tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when);
        }
 }
 
index ef9f6172680f5f3a9384132962d6e34cfbf83f14..093476fb2b2e2266e6047418a72815066cb63067 100644 (file)
@@ -2910,7 +2910,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
        if (rto_delta_us > 0)
                timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
 
-       tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
+       tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout);
        return true;
 }
 
@@ -3544,8 +3544,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
        }
        if (rearm_timer)
                tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
-                                    inet_csk(sk)->icsk_rto,
-                                    TCP_RTO_MAX);
+                                    inet_csk(sk)->icsk_rto);
 }
 
 /* We allow to exceed memory limits for FIN packets to expedite
@@ -4401,7 +4400,7 @@ void tcp_send_probe0(struct sock *sk)
        }
 
        timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
-       tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
+       tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout);
 }
 
 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)