]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
tcp: minor optimization in tcp_add_backlog()
authorEric Dumazet <edumazet@google.com>
Mon, 15 Nov 2021 19:02:30 +0000 (11:02 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 16 Jun 2024 11:28:35 +0000 (13:28 +0200)
[ Upstream commit d519f350967a60b85a574ad8aeac43f2b4384746 ]

If packet is going to be coalesced, sk_sndbuf/sk_rcvbuf values
are not used. Defer their access to the point we need them.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Stable-dep-of: ec00ed472bdb ("tcp: avoid premature drops in tcp_add_backlog()")
Signed-off-by: Sasha Levin <sashal@kernel.org>
net/ipv4/tcp_ipv4.c

index 0dd917c5a7da6735ee1aba7ddb8d03645087cc9f..1567072071633349cc9e06f56374b81797cf505e 100644 (file)
@@ -1678,8 +1678,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
 
 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
-       u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
-       u32 tail_gso_size, tail_gso_segs;
+       u32 limit, tail_gso_size, tail_gso_segs;
        struct skb_shared_info *shinfo;
        const struct tcphdr *th;
        struct tcphdr *thtail;
@@ -1786,7 +1785,7 @@ no_coalesce:
         * to reduce memory overhead, so add a little headroom here.
         * Few sockets backlog are possibly concurrently non empty.
         */
-       limit += 64*1024;
+       limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf) + 64*1024;
 
        if (unlikely(sk_add_backlog(sk, skb, limit))) {
                bh_unlock_sock(sk);