]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 26 Mar 2019 01:23:32 +0000 (10:23 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 26 Mar 2019 01:23:32 +0000 (10:23 +0900)
added patches:
serial-sprd-adjust-timeout-to-a-big-value.patch
tcp-dccp-drop-syn-packets-if-accept-queue-is-full.patch

queue-4.9/serial-sprd-adjust-timeout-to-a-big-value.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/tcp-dccp-drop-syn-packets-if-accept-queue-is-full.patch [new file with mode: 0644]

diff --git a/queue-4.9/serial-sprd-adjust-timeout-to-a-big-value.patch b/queue-4.9/serial-sprd-adjust-timeout-to-a-big-value.patch
new file mode 100644 (file)
index 0000000..03d00a5
--- /dev/null
@@ -0,0 +1,33 @@
+From e1dc9b08051a2c2e694edf48d1e704f07c7c143c Mon Sep 17 00:00:00 2001
+From: Wei Qiao <wei.qiao@spreadtrum.com>
+Date: Mon, 27 Mar 2017 14:06:42 +0800
+Subject: serial: sprd: adjust TIMEOUT to a big value
+
+From: Wei Qiao <wei.qiao@spreadtrum.com>
+
+commit e1dc9b08051a2c2e694edf48d1e704f07c7c143c upstream.
+
+SPRD_TIMEOUT was 256, which is too small to wait until the status
+switched to workable in a while loop, so that the earlycon could
+not work correctly.
+
+Signed-off-by: Wei Qiao <wei.qiao@spreadtrum.com>
+Signed-off-by: Chunyan Zhang <chunyan.zhang@spreadtrum.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/sprd_serial.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/tty/serial/sprd_serial.c
++++ b/drivers/tty/serial/sprd_serial.c
+@@ -36,7 +36,7 @@
+ #define SPRD_FIFO_SIZE                128
+ #define SPRD_DEF_RATE         26000000
+ #define SPRD_BAUD_IO_LIMIT    3000000
+-#define SPRD_TIMEOUT          256
++#define SPRD_TIMEOUT          256000
+ /* the offset of serial registers and BITs for them */
+ /* data registers */
index f902f1d32237724dd801c5166f5b20f9ad1c637f..39c4ee276f3e29457dff8e73320fa9e4a27bdb94 100644 (file)
@@ -16,3 +16,5 @@ bluetooth-fix-decrementing-reference-count-twice-in-releasing-socket.patch
 locking-lockdep-add-debug_locks-check-in-__lock_downgrade.patch
 alsa-hda-record-the-current-power-state-before-suspend-resume-calls.patch
 alsa-hda-enforces-runtime_resume-after-s3-and-s4-for-each-codec.patch
+tcp-dccp-drop-syn-packets-if-accept-queue-is-full.patch
+serial-sprd-adjust-timeout-to-a-big-value.patch
diff --git a/queue-4.9/tcp-dccp-drop-syn-packets-if-accept-queue-is-full.patch b/queue-4.9/tcp-dccp-drop-syn-packets-if-accept-queue-is-full.patch
new file mode 100644 (file)
index 0000000..1bf1f53
--- /dev/null
@@ -0,0 +1,91 @@
+From 5ea8ea2cb7f1d0db15762c9b0bb9e7330425a071 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 26 Oct 2016 09:27:57 -0700
+Subject: tcp/dccp: drop SYN packets if accept queue is full
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 5ea8ea2cb7f1d0db15762c9b0bb9e7330425a071 upstream.
+
+Per listen(fd, backlog) rules, there is really no point accepting a SYN,
+sending a SYNACK, and dropping the following ACK packet if accept queue
+is full, because application is not draining accept queue fast enough.
+
+This behavior is fooling TCP clients that believe they established a
+flow, while there is nothing at server side. They might then send about
+10 MSS (if using IW10) that will be dropped anyway while server is under
+stress.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/net/inet_connection_sock.h |    5 -----
+ net/dccp/ipv4.c                    |    8 +-------
+ net/dccp/ipv6.c                    |    2 +-
+ net/ipv4/tcp_input.c               |    8 +-------
+ 4 files changed, 3 insertions(+), 20 deletions(-)
+
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -289,11 +289,6 @@ static inline int inet_csk_reqsk_queue_l
+       return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
+ }
+-static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
+-{
+-      return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
+-}
+-
+ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
+ {
+       return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -596,13 +596,7 @@ int dccp_v4_conn_request(struct sock *sk
+       if (inet_csk_reqsk_queue_is_full(sk))
+               goto drop;
+-      /*
+-       * Accept backlog is full. If we have already queued enough
+-       * of warm entries in syn queue, drop request. It is better than
+-       * clogging syn queue with openreqs with exponentially increasing
+-       * timeout.
+-       */
+-      if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
++      if (sk_acceptq_is_full(sk))
+               goto drop;
+       req = inet_reqsk_alloc(&dccp_request_sock_ops, sk, true);
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -328,7 +328,7 @@ static int dccp_v6_conn_request(struct s
+       if (inet_csk_reqsk_queue_is_full(sk))
+               goto drop;
+-      if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
++      if (sk_acceptq_is_full(sk))
+               goto drop;
+       req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -6374,13 +6374,7 @@ int tcp_conn_request(struct request_sock
+                       goto drop;
+       }
+-
+-      /* Accept backlog is full. If we have already queued enough
+-       * of warm entries in syn queue, drop request. It is better than
+-       * clogging syn queue with openreqs with exponentially increasing
+-       * timeout.
+-       */
+-      if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
++      if (sk_acceptq_is_full(sk)) {
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+               goto drop;
+       }