]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
tcp: move tcp_rate_skb_delivered() to tcp_input.c
authorEric Dumazet <edumazet@google.com>
Sun, 18 Jan 2026 12:32:04 +0000 (12:32 +0000)
committerJakub Kicinski <kuba@kernel.org>
Wed, 21 Jan 2026 03:03:09 +0000 (19:03 -0800)
tcp_rate_skb_delivered() is only called from tcp_input.c.
Move it there and make it static.

Both gcc and clang are (auto)inlining it, TCP performance
is increased at a small space cost.

$ scripts/bloat-o-meter -t vmlinux.old vmlinux.new
add/remove: 0/2 grow/shrink: 3/0 up/down: 509/-187 (322)
Function                                     old     new   delta
tcp_sacktag_walk                            1682    1867    +185
tcp_ack                                     5230    5405    +175
tcp_shifted_skb                              437     586    +149
__pfx_tcp_rate_skb_delivered                  16       -     -16
tcp_rate_skb_delivered                       171       -    -171
Total: Before=22566192, After=22566514, chg +0.00%

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Neal Cardwell <ncardwell@google.com>
Link: https://patch.msgid.link/20260118123204.2315993-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/tcp.h
net/ipv4/tcp_input.c
net/ipv4/tcp_rate.c

index 15f9b20f851fe322f4417ff403c3965436aa3f9f..25143f156957288f5b8674d4d27b805e92c592c8 100644 (file)
@@ -1356,8 +1356,6 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
 void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
 
 /* From tcp_rate.c */
-void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
-                           struct rate_sample *rs);
 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
                  bool is_sack_reneg, struct rate_sample *rs);
 void tcp_rate_check_app_limited(struct sock *sk);
index 198f8a0d37be04f78da9268a230c9494b50b672a..dc8e256321b03da0ef97b4512d2cb5f202501dfa 100644 (file)
@@ -1637,6 +1637,50 @@ static u8 tcp_sacktag_one(struct sock *sk,
        return sacked;
 }
 
+/* When an skb is sacked or acked, we fill in the rate sample with the (prior)
+ * delivery information when the skb was last transmitted.
+ *
+ * If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
+ * called multiple times. We favor the information from the most recently
+ * sent skb, i.e., the skb with the most recently sent time and the highest
+ * sequence.
+ */
+static void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
+                                  struct rate_sample *rs)
+{
+       struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
+       struct tcp_sock *tp = tcp_sk(sk);
+       u64 tx_tstamp;
+
+       if (!scb->tx.delivered_mstamp)
+               return;
+
+       tx_tstamp = tcp_skb_timestamp_us(skb);
+       if (!rs->prior_delivered ||
+           tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
+                              scb->end_seq, rs->last_end_seq)) {
+               rs->prior_delivered_ce  = scb->tx.delivered_ce;
+               rs->prior_delivered  = scb->tx.delivered;
+               rs->prior_mstamp     = scb->tx.delivered_mstamp;
+               rs->is_app_limited   = scb->tx.is_app_limited;
+               rs->is_retrans       = scb->sacked & TCPCB_RETRANS;
+               rs->last_end_seq     = scb->end_seq;
+
+               /* Record send time of most recently ACKed packet: */
+               tp->first_tx_mstamp  = tx_tstamp;
+               /* Find the duration of the "send phase" of this window: */
+               rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
+                                                    scb->tx.first_tx_mstamp);
+
+       }
+       /* Mark off the skb delivered once it's sacked to avoid being
+        * used again when it's cumulatively acked. For acked packets
+        * we don't need to reset since it'll be freed soon.
+        */
+       if (scb->sacked & TCPCB_SACKED_ACKED)
+               scb->tx.delivered_mstamp = 0;
+}
+
 /* Shift newly-SACKed bytes from this skb to the immediately previous
  * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
  */
index 98eb346f986ef24969f804c3b55acbf60d2ec299..f0f2ef377043d797eb0270be1f54e65b21673f02 100644 (file)
  * ready to send in the write queue.
  */
 
-/* When an skb is sacked or acked, we fill in the rate sample with the (prior)
- * delivery information when the skb was last transmitted.
- *
- * If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
- * called multiple times. We favor the information from the most recently
- * sent skb, i.e., the skb with the most recently sent time and the highest
- * sequence.
- */
-void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
-                           struct rate_sample *rs)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
-       u64 tx_tstamp;
-
-       if (!scb->tx.delivered_mstamp)
-               return;
-
-       tx_tstamp = tcp_skb_timestamp_us(skb);
-       if (!rs->prior_delivered ||
-           tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
-                              scb->end_seq, rs->last_end_seq)) {
-               rs->prior_delivered_ce  = scb->tx.delivered_ce;
-               rs->prior_delivered  = scb->tx.delivered;
-               rs->prior_mstamp     = scb->tx.delivered_mstamp;
-               rs->is_app_limited   = scb->tx.is_app_limited;
-               rs->is_retrans       = scb->sacked & TCPCB_RETRANS;
-               rs->last_end_seq     = scb->end_seq;
-
-               /* Record send time of most recently ACKed packet: */
-               tp->first_tx_mstamp  = tx_tstamp;
-               /* Find the duration of the "send phase" of this window: */
-               rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
-                                                    scb->tx.first_tx_mstamp);
-
-       }
-       /* Mark off the skb delivered once it's sacked to avoid being
-        * used again when it's cumulatively acked. For acked packets
-        * we don't need to reset since it'll be freed soon.
-        */
-       if (scb->sacked & TCPCB_SACKED_ACKED)
-               scb->tx.delivered_mstamp = 0;
-}
-
 /* Update the connection delivery information and generate a rate sample. */
 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
                  bool is_sack_reneg, struct rate_sample *rs)