]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
tcp: fix possible freeze in tx path under memory pressure
authorEric Dumazet <edumazet@google.com>
Tue, 14 Jun 2022 17:17:34 +0000 (10:17 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Aug 2022 12:40:48 +0000 (14:40 +0200)
[ Upstream commit 849b425cd091e1804af964b771761cfbefbafb43 ]

Blamed commit only dealt with applications issuing small writes.

Issue here is that we allow to force memory schedule for the sk_buff
allocation, but we have no guarantee that sendmsg() is able to
copy some payload in it.

In this patch, I make sure the socket can use up to tcp_wmem[0] bytes.

For example, if we consider tcp_wmem[0] = 4096 (default on x86),
and initial skb->truesize being 1280, tcp_sendmsg() is able to
copy up to 2816 bytes under memory pressure.

Before this patch a sendmsg() sending more than 2816 bytes
would either block forever (if persistent memory pressure),
or return -EAGAIN.

For bigger MTU networks, it is advised to increase tcp_wmem[0]
to avoid sending too small packets.

v2: deal with zero copy paths.

Fixes: 8e4d980ac215 ("tcp: fix behavior for epoll edge trigger")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: Wei Wang <weiwan@google.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
net/ipv4/tcp.c

index 91735d631a282d048554b53db65c5e9cecea4294..51116166e3d21b1bef3c419de8d250695a952490 100644 (file)
@@ -953,6 +953,23 @@ static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)
        return 0;
 }
 
+static int tcp_wmem_schedule(struct sock *sk, int copy)
+{
+       int left;
+
+       if (likely(sk_wmem_schedule(sk, copy)))
+               return copy;
+
+       /* We could be in trouble if we have nothing queued.
+        * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0]
+        * to guarantee some progress.
+        */
+       left = sock_net(sk)->ipv4.sysctl_tcp_wmem[0] - sk->sk_wmem_queued;
+       if (left > 0)
+               sk_forced_mem_schedule(sk, min(left, copy));
+       return min(copy, sk->sk_forward_alloc);
+}
+
 static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
                                      struct page *page, int offset, size_t *size)
 {
@@ -988,7 +1005,11 @@ new_segment:
                tcp_mark_push(tp, skb);
                goto new_segment;
        }
-       if (tcp_downgrade_zcopy_pure(sk, skb) || !sk_wmem_schedule(sk, copy))
+       if (tcp_downgrade_zcopy_pure(sk, skb))
+               return NULL;
+
+       copy = tcp_wmem_schedule(sk, copy);
+       if (!copy)
                return NULL;
 
        if (can_coalesce) {
@@ -1337,8 +1358,11 @@ new_segment:
 
                        copy = min_t(int, copy, pfrag->size - pfrag->offset);
 
-                       if (tcp_downgrade_zcopy_pure(sk, skb) ||
-                           !sk_wmem_schedule(sk, copy))
+                       if (tcp_downgrade_zcopy_pure(sk, skb))
+                               goto wait_for_space;
+
+                       copy = tcp_wmem_schedule(sk, copy);
+                       if (!copy)
                                goto wait_for_space;
 
                        err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
@@ -1365,7 +1389,8 @@ new_segment:
                                skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY;
 
                        if (!skb_zcopy_pure(skb)) {
-                               if (!sk_wmem_schedule(sk, copy))
+                               copy = tcp_wmem_schedule(sk, copy);
+                               if (!copy)
                                        goto wait_for_space;
                        }