]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
tcp: reduce calls to tcp_schedule_loss_probe()
authorEric Dumazet <edumazet@google.com>
Mon, 23 Feb 2026 11:35:01 +0000 (11:35 +0000)
committerJakub Kicinski <kuba@kernel.org>
Wed, 25 Feb 2026 01:44:33 +0000 (17:44 -0800)
For RPC workloads, we alternate tcp_schedule_loss_probe() calls from
output path and from input path, with tp->packets_out value
oscillating between !zero and zero, leading to poor branch prediction.

Move tp->packets_out check from tcp_schedule_loss_probe() to
tcp_set_xmit_timer().

We avoid one call to tcp_schedule_loss_probe() from tcp_ack()
path for typical RPC workloads, while improving branch prediction.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Neal Cardwell <ncardwell@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20260223113501.4070245-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c

index e7b41abb82aad33d8cab4fcfa989cc4771149b41..6c3f1d0314446966d0ec4e8efb0b3d83463990d9 100644 (file)
@@ -3552,7 +3552,7 @@ void tcp_rearm_rto(struct sock *sk)
 /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
 static void tcp_set_xmit_timer(struct sock *sk)
 {
-       if (!tcp_schedule_loss_probe(sk, true))
+       if (!tcp_sk(sk)->packets_out || !tcp_schedule_loss_probe(sk, true))
                tcp_rearm_rto(sk);
 }
 
index 1ef419c66a0ebad6422be73a57afaba044467148..46bd48cf776a6a37e6ab2664245cd1e35a88d4f8 100644 (file)
@@ -3135,7 +3135,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
         * not in loss recovery, that are either limited by cwnd or application.
         */
        if ((early_retrans != 3 && early_retrans != 4) ||
-           !tp->packets_out || !tcp_is_sack(tp) ||
+           !tcp_is_sack(tp) ||
            (icsk->icsk_ca_state != TCP_CA_Open &&
             icsk->icsk_ca_state != TCP_CA_CWR))
                return false;