From: Eric Dumazet Date: Fri, 23 Jan 2026 11:16:05 +0000 (+0000) Subject: tcp: move sk_forced_mem_schedule() to tcp.c X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=a18056a6c11cab562a77d16b5931f6fdf55e7ec9;p=thirdparty%2Fkernel%2Flinux.git tcp: move sk_forced_mem_schedule() to tcp.c TCP fast path can (auto)inline this helper, instead of (auto)inling it from tcp_send_fin(). No change of overall code size, but tcp_sendmsg() is faster. $ scripts/bloat-o-meter -t vmlinux.old vmlinux.new add/remove: 0/0 grow/shrink: 1/1 up/down: 141/-140 (1) Function old new delta tcp_stream_alloc_skb 216 357 +141 tcp_send_fin 688 548 -140 Total: Before=22236729, After=22236730, chg +0.00% BTW, we might change tcp_send_fin() to use tcp_stream_alloc_skb(). Signed-off-by: Eric Dumazet Reviewed-by: Neal Cardwell Link: https://patch.msgid.link/20260123111605.4089200-1-edumazet@google.com Signed-off-by: Paolo Abeni --- diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 22b7ec192272..d95b5b8c1ffc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -915,6 +915,33 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, } EXPORT_IPV6_MOD(tcp_splice_read); +/* We allow to exceed memory limits for FIN packets to expedite + * connection tear down and (memory) recovery. + * Otherwise tcp_send_fin() could be tempted to either delay FIN + * or even be forced to close flow without any FIN. + * In general, we want to allow one skb per socket to avoid hangs + * with edge trigger epoll() + */ +void sk_forced_mem_schedule(struct sock *sk, int size) +{ + int delta, amt; + + delta = size - sk->sk_forward_alloc; + if (delta <= 0) + return; + + amt = sk_mem_pages(delta); + sk_forward_alloc_add(sk, amt << PAGE_SHIFT); + + if (mem_cgroup_sk_enabled(sk)) + mem_cgroup_sk_charge(sk, amt, gfp_memcg_charge() | __GFP_NOFAIL); + + if (sk->sk_bypass_prot_mem) + return; + + sk_memory_allocated_add(sk, amt); +} + struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, bool force_schedule) { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 256b669e8d3b..597e888af36d 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3767,33 +3767,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk) inet_csk(sk)->icsk_rto, true); } -/* We allow to exceed memory limits for FIN packets to expedite - * connection tear down and (memory) recovery. - * Otherwise tcp_send_fin() could be tempted to either delay FIN - * or even be forced to close flow without any FIN. - * In general, we want to allow one skb per socket to avoid hangs - * with edge trigger epoll() - */ -void sk_forced_mem_schedule(struct sock *sk, int size) -{ - int delta, amt; - - delta = size - sk->sk_forward_alloc; - if (delta <= 0) - return; - - amt = sk_mem_pages(delta); - sk_forward_alloc_add(sk, amt << PAGE_SHIFT); - - if (mem_cgroup_sk_enabled(sk)) - mem_cgroup_sk_charge(sk, amt, gfp_memcg_charge() | __GFP_NOFAIL); - - if (sk->sk_bypass_prot_mem) - return; - - sk_memory_allocated_add(sk, amt); -} - /* Send a FIN. The caller locks the socket for us. * We should try to send a FIN packet really hard, but eventually give up. */