]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.11
authorSasha Levin <sashal@kernel.org>
Fri, 12 Mar 2021 23:04:38 +0000 (18:04 -0500)
committerSasha Levin <sashal@kernel.org>
Fri, 12 Mar 2021 23:04:38 +0000 (18:04 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.11/mips-kernel-reserve-exception-base-early-to-prevent-.patch [new file with mode: 0644]
queue-5.11/mptcp-always-graft-subflow-socket-to-parent.patch [new file with mode: 0644]
queue-5.11/mptcp-reset-last_snd-on-subflow-close.patch [new file with mode: 0644]
queue-5.11/series

diff --git a/queue-5.11/mips-kernel-reserve-exception-base-early-to-prevent-.patch b/queue-5.11/mips-kernel-reserve-exception-base-early-to-prevent-.patch
new file mode 100644 (file)
index 0000000..9dbf15f
--- /dev/null
@@ -0,0 +1,163 @@
+From 284c649c495a821d715181e488aa187ea687229b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Mar 2021 10:24:47 +0100
+Subject: MIPS: kernel: Reserve exception base early to prevent corruption
+
+From: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+
+[ Upstream commit bd67b711bfaa02cf19e88aa2d9edae5c1c1d2739 ]
+
+BMIPS is one of the few platforms that do change the exception base.
+After commit 2dcb39645441 ("memblock: do not start bottom-up allocations
+with kernel_end") we started seeing BMIPS boards fail to boot with the
+built-in FDT being corrupted.
+
+Before the cited commit, early allocations would be in the [kernel_end,
+RAM_END] range, but after commit they would be within [RAM_START +
+PAGE_SIZE, RAM_END].
+
+The custom exception base handler that is installed by
+bmips_ebase_setup() done for BMIPS5000 CPUs ends-up trampling on the
+memory region allocated by unflatten_and_copy_device_tree() thus
+corrupting the FDT used by the kernel.
+
+To fix this, we need to perform an early reservation of the custom
+exception space. Additional we reserve the first 4k (1k for R3k) for
+either normal exception vector space (legacy CPUs) or special vectors
+like cache exceptions.
+
+Huge thanks to Serge for analysing and proposing a solution to this
+issue.
+
+Fixes: 2dcb39645441 ("memblock: do not start bottom-up allocations with kernel_end")
+Reported-by: Kamal Dasu <kdasu.kdev@gmail.com>
+Debugged-by: Serge Semin <Sergey.Semin@baikalelectronics.ru>
+Acked-by: Mike Rapoport <rppt@linux.ibm.com>
+Tested-by: Florian Fainelli <f.fainelli@gmail.com>
+Reviewed-by: Serge Semin <fancer.lancer@gmail.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/traps.h    |  3 +++
+ arch/mips/kernel/cpu-probe.c     |  6 ++++++
+ arch/mips/kernel/cpu-r3k-probe.c |  3 +++
+ arch/mips/kernel/traps.c         | 10 +++++-----
+ 4 files changed, 17 insertions(+), 5 deletions(-)
+
+diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h
+index 6a0864bb604d..9038b91e2d8c 100644
+--- a/arch/mips/include/asm/traps.h
++++ b/arch/mips/include/asm/traps.h
+@@ -24,6 +24,9 @@ extern void (*board_ebase_setup)(void);
+ extern void (*board_cache_error_setup)(void);
+ extern int register_nmi_notifier(struct notifier_block *nb);
++extern void reserve_exception_space(phys_addr_t addr, unsigned long size);
++
++#define VECTORSPACING 0x100   /* for EI/VI mode */
+ #define nmi_notifier(fn, pri)                                         \
+ ({                                                                    \
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 31cb9199197c..21794db53c05 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -26,6 +26,7 @@
+ #include <asm/elf.h>
+ #include <asm/pgtable-bits.h>
+ #include <asm/spram.h>
++#include <asm/traps.h>
+ #include <linux/uaccess.h>
+ #include "fpu-probe.h"
+@@ -1619,6 +1620,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
+               c->cputype = CPU_BMIPS3300;
+               __cpu_name[cpu] = "Broadcom BMIPS3300";
+               set_elf_platform(cpu, "bmips3300");
++              reserve_exception_space(0x400, VECTORSPACING * 64);
+               break;
+       case PRID_IMP_BMIPS43XX: {
+               int rev = c->processor_id & PRID_REV_MASK;
+@@ -1629,6 +1631,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
+                       __cpu_name[cpu] = "Broadcom BMIPS4380";
+                       set_elf_platform(cpu, "bmips4380");
+                       c->options |= MIPS_CPU_RIXI;
++                      reserve_exception_space(0x400, VECTORSPACING * 64);
+               } else {
+                       c->cputype = CPU_BMIPS4350;
+                       __cpu_name[cpu] = "Broadcom BMIPS4350";
+@@ -1645,6 +1648,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
+                       __cpu_name[cpu] = "Broadcom BMIPS5000";
+               set_elf_platform(cpu, "bmips5000");
+               c->options |= MIPS_CPU_ULRI | MIPS_CPU_RIXI;
++              reserve_exception_space(0x1000, VECTORSPACING * 64);
+               break;
+       }
+ }
+@@ -2124,6 +2128,8 @@ void cpu_probe(void)
+       if (cpu == 0)
+               __ua_limit = ~((1ull << cpu_vmbits) - 1);
+ #endif
++
++      reserve_exception_space(0, 0x1000);
+ }
+ void cpu_report(void)
+diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
+index abdbbe8c5a43..af654771918c 100644
+--- a/arch/mips/kernel/cpu-r3k-probe.c
++++ b/arch/mips/kernel/cpu-r3k-probe.c
+@@ -21,6 +21,7 @@
+ #include <asm/fpu.h>
+ #include <asm/mipsregs.h>
+ #include <asm/elf.h>
++#include <asm/traps.h>
+ #include "fpu-probe.h"
+@@ -158,6 +159,8 @@ void cpu_probe(void)
+               cpu_set_fpu_opts(c);
+       else
+               cpu_set_nofpu_opts(c);
++
++      reserve_exception_space(0, 0x400);
+ }
+ void cpu_report(void)
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index e0352958e2f7..808b8b61ded1 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -2009,13 +2009,16 @@ void __noreturn nmi_exception_handler(struct pt_regs *regs)
+       nmi_exit();
+ }
+-#define VECTORSPACING 0x100   /* for EI/VI mode */
+-
+ unsigned long ebase;
+ EXPORT_SYMBOL_GPL(ebase);
+ unsigned long exception_handlers[32];
+ unsigned long vi_handlers[64];
++void reserve_exception_space(phys_addr_t addr, unsigned long size)
++{
++      memblock_reserve(addr, size);
++}
++
+ void __init *set_except_vector(int n, void *addr)
+ {
+       unsigned long handler = (unsigned long) addr;
+@@ -2367,10 +2370,7 @@ void __init trap_init(void)
+       if (!cpu_has_mips_r2_r6) {
+               ebase = CAC_BASE;
+-              ebase_pa = virt_to_phys((void *)ebase);
+               vec_size = 0x400;
+-
+-              memblock_reserve(ebase_pa, vec_size);
+       } else {
+               if (cpu_has_veic || cpu_has_vint)
+                       vec_size = 0x200 + VECTORSPACING*64;
+-- 
+2.30.1
+
diff --git a/queue-5.11/mptcp-always-graft-subflow-socket-to-parent.patch b/queue-5.11/mptcp-always-graft-subflow-socket-to-parent.patch
new file mode 100644 (file)
index 0000000..6728aef
--- /dev/null
@@ -0,0 +1,152 @@
+From 7a0642a351326d1f0a2d9fd3b1a861dfb9e6aa81 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Jan 2021 15:39:10 +0100
+Subject: mptcp: always graft subflow socket to parent
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 866f26f2a9c33bc70eb0f07ffc37fd9424ffe501 ]
+
+Currently, incoming subflows link to the parent socket,
+while outgoing ones link to a per subflow socket. The latter
+is not really needed, except at the initial connect() time and
+for the first subflow.
+
+Always graft the outgoing subflow to the parent socket and
+free the unneeded ones early.
+
+This allows some code cleanup, reduces the amount of memory
+used and will simplify the next patch
+
+Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/protocol.c | 36 ++++++++++--------------------------
+ net/mptcp/protocol.h |  1 +
+ net/mptcp/subflow.c  |  3 +++
+ 3 files changed, 14 insertions(+), 26 deletions(-)
+
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index b51872b9dd61..3cc7be259396 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -114,11 +114,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
+       list_add(&subflow->node, &msk->conn_list);
+       sock_hold(ssock->sk);
+       subflow->request_mptcp = 1;
+-
+-      /* accept() will wait on first subflow sk_wq, and we always wakes up
+-       * via msk->sk_socket
+-       */
+-      RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq);
++      mptcp_sock_graft(msk->first, sk->sk_socket);
+       return 0;
+ }
+@@ -2114,9 +2110,6 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
+ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+                      struct mptcp_subflow_context *subflow)
+ {
+-      bool dispose_socket = false;
+-      struct socket *sock;
+-
+       list_del(&subflow->node);
+       lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
+@@ -2124,11 +2117,8 @@ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+       /* if we are invoked by the msk cleanup code, the subflow is
+        * already orphaned
+        */
+-      sock = ssk->sk_socket;
+-      if (sock) {
+-              dispose_socket = sock != sk->sk_socket;
++      if (ssk->sk_socket)
+               sock_orphan(ssk);
+-      }
+       subflow->disposable = 1;
+@@ -2146,8 +2136,6 @@ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+               __sock_put(ssk);
+       }
+       release_sock(ssk);
+-      if (dispose_socket)
+-              iput(SOCK_INODE(sock));
+       sock_put(ssk);
+ }
+@@ -2535,6 +2523,12 @@ static void __mptcp_destroy_sock(struct sock *sk)
+       pr_debug("msk=%p", msk);
++      /* dispose the ancillatory tcp socket, if any */
++      if (msk->subflow) {
++              iput(SOCK_INODE(msk->subflow));
++              msk->subflow = NULL;
++      }
++
+       /* be sure to always acquire the join list lock, to sync vs
+        * mptcp_finish_join().
+        */
+@@ -2585,20 +2579,10 @@ cleanup:
+       inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
+       list_for_each_entry(subflow, &mptcp_sk(sk)->conn_list, node) {
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+-              bool slow, dispose_socket;
+-              struct socket *sock;
++              bool slow = lock_sock_fast(ssk);
+-              slow = lock_sock_fast(ssk);
+-              sock = ssk->sk_socket;
+-              dispose_socket = sock && sock != sk->sk_socket;
+               sock_orphan(ssk);
+               unlock_sock_fast(ssk, slow);
+-
+-              /* for the outgoing subflows we additionally need to free
+-               * the associated socket
+-               */
+-              if (dispose_socket)
+-                      iput(SOCK_INODE(sock));
+       }
+       sock_orphan(sk);
+@@ -3040,7 +3024,7 @@ void mptcp_finish_connect(struct sock *ssk)
+       mptcp_rcv_space_init(msk, ssk);
+ }
+-static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
++void mptcp_sock_graft(struct sock *sk, struct socket *parent)
+ {
+       write_lock_bh(&sk->sk_callback_lock);
+       rcu_assign_pointer(sk->sk_wq, &parent->wq);
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index dbf62e74fcc1..18fef4273bdc 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -460,6 +460,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how);
+ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+                      struct mptcp_subflow_context *subflow);
+ void mptcp_subflow_reset(struct sock *ssk);
++void mptcp_sock_graft(struct sock *sk, struct socket *parent);
+ /* called with sk socket lock held */
+ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 9d28f6e3dc49..81b7be67d288 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1165,6 +1165,9 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+       if (err && err != -EINPROGRESS)
+               goto failed_unlink;
++      /* discard the subflow socket */
++      mptcp_sock_graft(ssk, sk->sk_socket);
++      iput(SOCK_INODE(sf));
+       return err;
+ failed_unlink:
+-- 
+2.30.1
+
diff --git a/queue-5.11/mptcp-reset-last_snd-on-subflow-close.patch b/queue-5.11/mptcp-reset-last_snd-on-subflow-close.patch
new file mode 100644 (file)
index 0000000..ec83ec7
--- /dev/null
@@ -0,0 +1,51 @@
+From d04fcd890780596c9ccc4949140052e6f27c5cf2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Mar 2021 13:32:08 -0800
+Subject: mptcp: reset last_snd on subflow close
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit e0be4931f3fee2e04dec4013ea4f27ec2db8556f ]
+
+Send logic caches last active subflow in the msk, so it needs to be
+cleared when the cached subflow is closed.
+
+Fixes: d5f49190def61c ("mptcp: allow picking different xmit subflows")
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/155
+Reported-by: Christoph Paasch <cpaasch@apple.com>
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/protocol.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 3cc7be259396..de89824a2a36 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2110,6 +2110,8 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
+ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+                      struct mptcp_subflow_context *subflow)
+ {
++      struct mptcp_sock *msk = mptcp_sk(sk);
++
+       list_del(&subflow->node);
+       lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
+@@ -2138,6 +2140,9 @@ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+       release_sock(ssk);
+       sock_put(ssk);
++
++      if (ssk == msk->last_snd)
++              msk->last_snd = NULL;
+ }
+ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
+-- 
+2.30.1
+
index 09fcb1523f450fd434fbcc7eedbc78df84eb7a6d..e8acf40f9547480351e6a9fe0842096803859e65 100644 (file)
@@ -119,3 +119,6 @@ media-rkisp1-params-fix-wrong-bits-settings.patch
 media-v4l-vsp1-fix-uif-null-pointer-access.patch
 media-v4l-vsp1-fix-bru-null-pointer-access.patch
 media-rc-compile-rc-cec.c-into-rc-core.patch
+mips-kernel-reserve-exception-base-early-to-prevent-.patch
+mptcp-always-graft-subflow-socket-to-parent.patch
+mptcp-reset-last_snd-on-subflow-close.patch