--- /dev/null
+From d2de60c3fc508440269d005a53c8f3b450d600bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2020 18:03:23 +0200
+Subject: bpf: Fix tnum constraints for 32-bit comparisons
+
+From: Jann Horn <jannh@google.com>
+
+[ Upstream commit 604dca5e3af1db98bd123b7bfc02b017af99e3a0 ]
+
+The BPF verifier tried to track values based on 32-bit comparisons by
+(ab)using the tnum state via 581738a681b6 ("bpf: Provide better register
+bounds after jmp32 instructions"). The idea is that after a check like
+this:
+
+ if ((u32)r0 > 3)
+ exit
+
+We can't meaningfully constrain the arithmetic-range-based tracking, but
+we can update the tnum state to (value=0,mask=0xffff'ffff'0000'0003).
+However, the implementation from 581738a681b6 didn't compute the tnum
+constraint based on the fixed operand, but instead derives it from the
+arithmetic-range-based tracking. This means that after the following
+sequence of operations:
+
+ if (r0 >= 0x1'0000'0001)
+ exit
+ if ((u32)r0 > 7)
+ exit
+
+The verifier assumed that the lower half of r0 is in the range (0, 0)
+and apply the tnum constraint (value=0,mask=0xffff'ffff'0000'0000) thus
+causing the overall tnum to be (value=0,mask=0x1'0000'0000), which was
+incorrect. Provide a fixed implementation.
+
+Fixes: 581738a681b6 ("bpf: Provide better register bounds after jmp32 instructions")
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20200330160324.15259-3-daniel@iogearbox.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 108 ++++++++++++++++++++++++++++--------------
+ 1 file changed, 72 insertions(+), 36 deletions(-)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 5080469094afe..595b39eee6422 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5590,6 +5590,70 @@ static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg)
+ reg->smax_value <= 0 && reg->smin_value >= S32_MIN);
+ }
+
++/* Constrain the possible values of @reg with unsigned upper bound @bound.
++ * If @is_exclusive, @bound is an exclusive limit, otherwise it is inclusive.
++ * If @is_jmp32, @bound is a 32-bit value that only constrains the low 32 bits
++ * of @reg.
++ */
++static void set_upper_bound(struct bpf_reg_state *reg, u64 bound, bool is_jmp32,
++ bool is_exclusive)
++{
++ if (is_exclusive) {
++ /* There are no values for `reg` that make `reg<0` true. */
++ if (bound == 0)
++ return;
++ bound--;
++ }
++ if (is_jmp32) {
++ /* Constrain the register's value in the tnum representation.
++ * For 64-bit comparisons this happens later in
++ * __reg_bound_offset(), but for 32-bit comparisons, we can be
++ * more precise than what can be derived from the updated
++ * numeric bounds.
++ */
++ struct tnum t = tnum_range(0, bound);
++
++ t.mask |= ~0xffffffffULL; /* upper half is unknown */
++ reg->var_off = tnum_intersect(reg->var_off, t);
++
++ /* Compute the 64-bit bound from the 32-bit bound. */
++ bound += gen_hi_max(reg->var_off);
++ }
++ reg->umax_value = min(reg->umax_value, bound);
++}
++
++/* Constrain the possible values of @reg with unsigned lower bound @bound.
++ * If @is_exclusive, @bound is an exclusive limit, otherwise it is inclusive.
++ * If @is_jmp32, @bound is a 32-bit value that only constrains the low 32 bits
++ * of @reg.
++ */
++static void set_lower_bound(struct bpf_reg_state *reg, u64 bound, bool is_jmp32,
++ bool is_exclusive)
++{
++ if (is_exclusive) {
++ /* There are no values for `reg` that make `reg>MAX` true. */
++ if (bound == (is_jmp32 ? U32_MAX : U64_MAX))
++ return;
++ bound++;
++ }
++ if (is_jmp32) {
++ /* Constrain the register's value in the tnum representation.
++ * For 64-bit comparisons this happens later in
++ * __reg_bound_offset(), but for 32-bit comparisons, we can be
++ * more precise than what can be derived from the updated
++ * numeric bounds.
++ */
++ struct tnum t = tnum_range(bound, U32_MAX);
++
++ t.mask |= ~0xffffffffULL; /* upper half is unknown */
++ reg->var_off = tnum_intersect(reg->var_off, t);
++
++ /* Compute the 64-bit bound from the 32-bit bound. */
++ bound += gen_hi_min(reg->var_off);
++ }
++ reg->umin_value = max(reg->umin_value, bound);
++}
++
+ /* Adjusts the register min/max values in the case that the dst_reg is the
+ * variable register that we are working on, and src_reg is a constant or we're
+ * simply doing a BPF_K check.
+@@ -5645,15 +5709,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
+ case BPF_JGE:
+ case BPF_JGT:
+ {
+- u64 false_umax = opcode == BPF_JGT ? val : val - 1;
+- u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
+-
+- if (is_jmp32) {
+- false_umax += gen_hi_max(false_reg->var_off);
+- true_umin += gen_hi_min(true_reg->var_off);
+- }
+- false_reg->umax_value = min(false_reg->umax_value, false_umax);
+- true_reg->umin_value = max(true_reg->umin_value, true_umin);
++ set_upper_bound(false_reg, val, is_jmp32, opcode == BPF_JGE);
++ set_lower_bound(true_reg, val, is_jmp32, opcode == BPF_JGT);
+ break;
+ }
+ case BPF_JSGE:
+@@ -5674,15 +5731,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
+ case BPF_JLE:
+ case BPF_JLT:
+ {
+- u64 false_umin = opcode == BPF_JLT ? val : val + 1;
+- u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
+-
+- if (is_jmp32) {
+- false_umin += gen_hi_min(false_reg->var_off);
+- true_umax += gen_hi_max(true_reg->var_off);
+- }
+- false_reg->umin_value = max(false_reg->umin_value, false_umin);
+- true_reg->umax_value = min(true_reg->umax_value, true_umax);
++ set_lower_bound(false_reg, val, is_jmp32, opcode == BPF_JLE);
++ set_upper_bound(true_reg, val, is_jmp32, opcode == BPF_JLT);
+ break;
+ }
+ case BPF_JSLE:
+@@ -5757,15 +5807,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
+ case BPF_JGE:
+ case BPF_JGT:
+ {
+- u64 false_umin = opcode == BPF_JGT ? val : val + 1;
+- u64 true_umax = opcode == BPF_JGT ? val - 1 : val;
+-
+- if (is_jmp32) {
+- false_umin += gen_hi_min(false_reg->var_off);
+- true_umax += gen_hi_max(true_reg->var_off);
+- }
+- false_reg->umin_value = max(false_reg->umin_value, false_umin);
+- true_reg->umax_value = min(true_reg->umax_value, true_umax);
++ set_lower_bound(false_reg, val, is_jmp32, opcode == BPF_JGE);
++ set_upper_bound(true_reg, val, is_jmp32, opcode == BPF_JGT);
+ break;
+ }
+ case BPF_JSGE:
+@@ -5783,15 +5826,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
+ case BPF_JLE:
+ case BPF_JLT:
+ {
+- u64 false_umax = opcode == BPF_JLT ? val : val - 1;
+- u64 true_umin = opcode == BPF_JLT ? val + 1 : val;
+-
+- if (is_jmp32) {
+- false_umax += gen_hi_max(false_reg->var_off);
+- true_umin += gen_hi_min(true_reg->var_off);
+- }
+- false_reg->umax_value = min(false_reg->umax_value, false_umax);
+- true_reg->umin_value = max(true_reg->umin_value, true_umin);
++ set_upper_bound(false_reg, val, is_jmp32, opcode == BPF_JLE);
++ set_lower_bound(true_reg, val, is_jmp32, opcode == BPF_JLT);
+ break;
+ }
+ case BPF_JSLE:
+--
+2.20.1
+
--- /dev/null
+From 0302c93b77e493fc1e8e6673a1c75fcb91e150c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Jan 2020 14:14:57 -0800
+Subject: brcmfmac: abort and release host after error
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 863844ee3bd38219c88e82966d1df36a77716f3e ]
+
+With commit 216b44000ada ("brcmfmac: Fix use after free in
+brcmf_sdio_readframes()") applied, we see locking timeouts in
+brcmf_sdio_watchdog_thread().
+
+brcmfmac: brcmf_escan_timeout: timer expired
+INFO: task brcmf_wdog/mmc1:621 blocked for more than 120 seconds.
+Not tainted 4.19.94-07984-g24ff99a0f713 #1
+"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+brcmf_wdog/mmc1 D 0 621 2 0x00000000 last_sleep: 2440793077. last_runnable: 2440766827
+[<c0aa1e60>] (__schedule) from [<c0aa2100>] (schedule+0x98/0xc4)
+[<c0aa2100>] (schedule) from [<c0853830>] (__mmc_claim_host+0x154/0x274)
+[<c0853830>] (__mmc_claim_host) from [<bf10c5b8>] (brcmf_sdio_watchdog_thread+0x1b0/0x1f8 [brcmfmac])
+[<bf10c5b8>] (brcmf_sdio_watchdog_thread [brcmfmac]) from [<c02570b8>] (kthread+0x178/0x180)
+
+In addition to restarting or exiting the loop, it is also necessary to
+abort the command and to release the host.
+
+Fixes: 216b44000ada ("brcmfmac: Fix use after free in brcmf_sdio_readframes()")
+Cc: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: Matthias Kaehlcke <mka@chromium.org>
+Cc: Brian Norris <briannorris@chromium.org>
+Cc: Douglas Anderson <dianders@chromium.org>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Acked-by: franky.lin@broadcom.com
+Acked-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index f9047db6a11d8..3a08252f1a53f 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -1938,6 +1938,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
+ if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
+ BRCMF_SDIO_FT_NORMAL)) {
+ rd->len = 0;
++ brcmf_sdio_rxfail(bus, true, true);
++ sdio_release_host(bus->sdiodev->func1);
+ brcmu_pkt_buf_free_skb(pkt);
+ continue;
+ }
+--
+2.20.1
+
--- /dev/null
+From a2800de205c0ea502ec396f4eac984a004064098 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Feb 2020 13:11:00 -0500
+Subject: padata: fix uninitialized return value in padata_replace()
+
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+
+[ Upstream commit 41ccdbfd5427bbbf3ed58b16750113b38fad1780 ]
+
+According to Geert's report[0],
+
+ kernel/padata.c: warning: 'err' may be used uninitialized in this
+ function [-Wuninitialized]: => 539:2
+
+Warning is seen only with older compilers on certain archs. The
+runtime effect is potentially returning garbage down the stack when
+padata's cpumasks are modified before any pcrypt requests have run.
+
+Simplest fix is to initialize err to the success value.
+
+[0] http://lkml.kernel.org/r/20200210135506.11536-1-geert@linux-m68k.org
+
+Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Fixes: bbefa1dd6a6d ("crypto: pcrypt - Avoid deadlock by using per-instance padata queues")
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/padata.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 72777c10bb9cb..62082597d4a2a 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -512,7 +512,7 @@ static int padata_replace_one(struct padata_shell *ps)
+ static int padata_replace(struct padata_instance *pinst)
+ {
+ struct padata_shell *ps;
+- int err;
++ int err = 0;
+
+ pinst->flags |= PADATA_RESET;
+
+--
+2.20.1
+
net-macb-fix-handling-of-fixed-link-node.patch
net-fix-fraglist-segmentation-reference-count-leak.patch
udp-initialize-is_flist-with-0-in-udp_gro_receive.patch
+padata-fix-uninitialized-return-value-in-padata_repl.patch
+brcmfmac-abort-and-release-host-after-error.patch
+bpf-fix-tnum-constraints-for-32-bit-comparisons.patch
+xarray-fix-xa_find_next-for-large-multi-index-entrie.patch
--- /dev/null
+From e8f26c8ef4ac00ac8d1e7656f6c73bf5900a16a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 Jan 2020 05:07:55 -0500
+Subject: XArray: Fix xa_find_next for large multi-index entries
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+[ Upstream commit bd40b17ca49d7d110adf456e647701ce74de2241 ]
+
+Coverity pointed out that xas_sibling() was shifting xa_offset without
+promoting it to an unsigned long first, so the shift could cause an
+overflow and we'd get the wrong answer. The fix is obvious, and the
+new test-case provokes UBSAN to report an error:
+runtime error: shift exponent 60 is too large for 32-bit type 'int'
+
+Fixes: 19c30f4dd092 ("XArray: Fix xa_find_after with multi-index entries")
+Reported-by: Bjorn Helgaas <bhelgaas@google.com>
+Reported-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/test_xarray.c | 18 ++++++++++++++++++
+ lib/xarray.c | 3 ++-
+ 2 files changed, 20 insertions(+), 1 deletion(-)
+
+diff --git a/lib/test_xarray.c b/lib/test_xarray.c
+index 55c14e8c88591..8c7d7a8468b88 100644
+--- a/lib/test_xarray.c
++++ b/lib/test_xarray.c
+@@ -12,6 +12,9 @@
+ static unsigned int tests_run;
+ static unsigned int tests_passed;
+
++static const unsigned int order_limit =
++ IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
++
+ #ifndef XA_DEBUG
+ # ifdef __KERNEL__
+ void xa_dump(const struct xarray *xa) { }
+@@ -959,6 +962,20 @@ static noinline void check_multi_find_2(struct xarray *xa)
+ }
+ }
+
++static noinline void check_multi_find_3(struct xarray *xa)
++{
++ unsigned int order;
++
++ for (order = 5; order < order_limit; order++) {
++ unsigned long index = 1UL << (order - 5);
++
++ XA_BUG_ON(xa, !xa_empty(xa));
++ xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
++ XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
++ xa_erase_index(xa, 0);
++ }
++}
++
+ static noinline void check_find_1(struct xarray *xa)
+ {
+ unsigned long i, j, k;
+@@ -1081,6 +1098,7 @@ static noinline void check_find(struct xarray *xa)
+ for (i = 2; i < 10; i++)
+ check_multi_find_1(xa, i);
+ check_multi_find_2(xa);
++ check_multi_find_3(xa);
+ }
+
+ /* See find_swap_entry() in mm/shmem.c */
+diff --git a/lib/xarray.c b/lib/xarray.c
+index 1d9fab7db8dad..acd1fad2e862a 100644
+--- a/lib/xarray.c
++++ b/lib/xarray.c
+@@ -1839,7 +1839,8 @@ static bool xas_sibling(struct xa_state *xas)
+ if (!node)
+ return false;
+ mask = (XA_CHUNK_SIZE << node->shift) - 1;
+- return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
++ return (xas->xa_index & mask) >
++ ((unsigned long)xas->xa_offset << node->shift);
+ }
+
+ /**
+--
+2.20.1
+