--- /dev/null
+From 6fee2d43069003d5b44626a343ac931f6e45b8a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Nov 2020 17:27:57 +0000
+Subject: afs: Fix afs_write_end() when called with copied == 0 [ver #3]
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 3ad216ee73abc554ed8f13f4f8b70845a7bef6da ]
+
+When afs_write_end() is called with copied == 0, it tries to set the
+dirty region, but there's no way to actually encode a 0-length region in
+the encoding in page->private.
+
+"0,0", for example, indicates a 1-byte region at offset 0. The maths
+miscalculates this and sets it incorrectly.
+
+Fix it to just do nothing but unlock and put the page in this case. We
+don't actually need to mark the page dirty as nothing presumably
+changed.
+
+Fixes: 65dd2d6072d3 ("afs: Alter dirty range encoding in page->private")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/write.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index 50371207f3273..c9195fc67fd8f 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -169,11 +169,14 @@ int afs_write_end(struct file *file, struct address_space *mapping,
+ unsigned int f, from = pos & (PAGE_SIZE - 1);
+ unsigned int t, to = from + copied;
+ loff_t i_size, maybe_i_size;
+- int ret;
++ int ret = 0;
+
+ _enter("{%llx:%llu},{%lx}",
+ vnode->fid.vid, vnode->fid.vnode, page->index);
+
++ if (copied == 0)
++ goto out;
++
+ maybe_i_size = pos + copied;
+
+ i_size = i_size_read(&vnode->vfs_inode);
+--
+2.27.0
+
--- /dev/null
+From 0d3cd3a520be6008f9ed6865990b33511a13131a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Oct 2020 01:43:59 +0100
+Subject: ARM: 9019/1: kprobes: Avoid fortify_panic() when copying optprobe
+ template
+
+From: Andrew Jeffery <andrew@aj.id.au>
+
+[ Upstream commit 9fa2e7af3d53a4b769136eccc32c02e128a4ee51 ]
+
+Setting both CONFIG_KPROBES=y and CONFIG_FORTIFY_SOURCE=y on ARM leads
+to a panic in memcpy() when injecting a kprobe despite the fixes found
+in commit e46daee53bb5 ("ARM: 8806/1: kprobes: Fix false positive with
+FORTIFY_SOURCE") and commit 0ac569bf6a79 ("ARM: 8834/1: Fix: kprobes:
+optimized kprobes illegal instruction").
+
+arch/arm/include/asm/kprobes.h effectively declares
+the target type of the optprobe_template_entry assembly label as a u32
+which leads memcpy()'s __builtin_object_size() call to determine that
+the pointed-to object is of size four. However, the symbol is used as a handle
+for the optimised probe assembly template that is at least 96 bytes in size.
+The symbol's use despite its type blows up the memcpy() in ARM's
+arch_prepare_optimized_kprobe() with a false-positive fortify_panic() when it
+should instead copy the optimised probe template into place:
+
+```
+$ sudo perf probe -a aspeed_g6_pinctrl_probe
+[ 158.457252] detected buffer overflow in memcpy
+[ 158.458069] ------------[ cut here ]------------
+[ 158.458283] kernel BUG at lib/string.c:1153!
+[ 158.458436] Internal error: Oops - BUG: 0 [#1] SMP ARM
+[ 158.458768] Modules linked in:
+[ 158.459043] CPU: 1 PID: 99 Comm: perf Not tainted 5.9.0-rc7-00038-gc53ebf8167e9 #158
+[ 158.459296] Hardware name: Generic DT based system
+[ 158.459529] PC is at fortify_panic+0x18/0x20
+[ 158.459658] LR is at __irq_work_queue_local+0x3c/0x74
+[ 158.459831] pc : [<8047451c>] lr : [<8020ecd4>] psr: 60000013
+[ 158.460032] sp : be2d1d50 ip : be2d1c58 fp : be2d1d5c
+[ 158.460174] r10: 00000006 r9 : 00000000 r8 : 00000060
+[ 158.460348] r7 : 8011e434 r6 : b9e0b800 r5 : 7f000000 r4 : b9fe4f0c
+[ 158.460557] r3 : 80c04cc8 r2 : 00000000 r1 : be7c03cc r0 : 00000022
+[ 158.460801] Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none
+[ 158.461037] Control: 10c5387d Table: b9cd806a DAC: 00000051
+[ 158.461251] Process perf (pid: 99, stack limit = 0x81c71a69)
+[ 158.461472] Stack: (0xbe2d1d50 to 0xbe2d2000)
+[ 158.461757] 1d40: be2d1d84 be2d1d60 8011e724 80474510
+[ 158.462104] 1d60: b9e0b800 b9fe4f0c 00000000 b9fe4f14 80c8ec80 be235000 be2d1d9c be2d1d88
+[ 158.462436] 1d80: 801cee44 8011e57c b9fe4f0c 00000000 be2d1dc4 be2d1da0 801d0ad0 801cedec
+[ 158.462742] 1da0: 00000000 00000000 b9fe4f00 ffffffea 00000000 be235000 be2d1de4 be2d1dc8
+[ 158.463087] 1dc0: 80204604 801d0738 00000000 00000000 b9fe4004 ffffffea be2d1e94 be2d1de8
+[ 158.463428] 1de0: 80205434 80204570 00385c00 00000000 00000000 00000000 be2d1e14 be2d1e08
+[ 158.463880] 1e00: 802ba014 b9fe4f00 b9e718c0 b9fe4f84 b9e71ec8 be2d1e24 00000000 00385c00
+[ 158.464365] 1e20: 00000000 626f7270 00000065 802b905c be2d1e94 0000002e 00000000 802b9914
+[ 158.464829] 1e40: be2d1e84 be2d1e50 802b9914 8028ff78 804629d0 b9e71ec0 0000002e b9e71ec0
+[ 158.465141] 1e60: be2d1ea8 80c04cc8 00000cc0 b9e713c4 00000002 80205834 80205834 0000002e
+[ 158.465488] 1e80: be235000 be235000 be2d1ea4 be2d1e98 80205854 80204e94 be2d1ecc be2d1ea8
+[ 158.465806] 1ea0: 801ee4a0 80205840 00000002 80c04cc8 00000000 0000002e 0000002e 00000000
+[ 158.466110] 1ec0: be2d1f0c be2d1ed0 801ee5c8 801ee428 00000000 be2d0000 006b1fd0 00000051
+[ 158.466398] 1ee0: 00000000 b9eedf00 0000002e 80204410 006b1fd0 be2d1f60 00000000 00000004
+[ 158.466763] 1f00: be2d1f24 be2d1f10 8020442c 801ee4c4 80205834 802c613c be2d1f5c be2d1f28
+[ 158.467102] 1f20: 802c60ac 8020441c be2d1fac be2d1f38 8010c764 802e9888 be2d1f5c b9eedf00
+[ 158.467447] 1f40: b9eedf00 006b1fd0 0000002e 00000000 be2d1f94 be2d1f60 802c634c 802c5fec
+[ 158.467812] 1f60: 00000000 00000000 00000000 80c04cc8 006b1fd0 00000003 76f7a610 00000004
+[ 158.468155] 1f80: 80100284 be2d0000 be2d1fa4 be2d1f98 802c63ec 802c62e8 00000000 be2d1fa8
+[ 158.468508] 1fa0: 80100080 802c63e0 006b1fd0 00000003 00000003 006b1fd0 0000002e 00000000
+[ 158.468858] 1fc0: 006b1fd0 00000003 76f7a610 00000004 006b1fb0 0026d348 00000017 7ef2738c
+[ 158.469202] 1fe0: 76f3431c 7ef272d8 0014ec50 76f34338 60000010 00000003 00000000 00000000
+[ 158.469461] Backtrace:
+[ 158.469683] [<80474504>] (fortify_panic) from [<8011e724>] (arch_prepare_optimized_kprobe+0x1b4/0x1f8)
+[ 158.470021] [<8011e570>] (arch_prepare_optimized_kprobe) from [<801cee44>] (alloc_aggr_kprobe+0x64/0x70)
+[ 158.470287] r9:be235000 r8:80c8ec80 r7:b9fe4f14 r6:00000000 r5:b9fe4f0c r4:b9e0b800
+[ 158.470478] [<801cede0>] (alloc_aggr_kprobe) from [<801d0ad0>] (register_kprobe+0x3a4/0x5a0)
+[ 158.470685] r5:00000000 r4:b9fe4f0c
+[ 158.470790] [<801d072c>] (register_kprobe) from [<80204604>] (__register_trace_kprobe+0xa0/0xa4)
+[ 158.471001] r9:be235000 r8:00000000 r7:ffffffea r6:b9fe4f00 r5:00000000 r4:00000000
+[ 158.471188] [<80204564>] (__register_trace_kprobe) from [<80205434>] (trace_kprobe_create+0x5ac/0x9ac)
+[ 158.471408] r7:ffffffea r6:b9fe4004 r5:00000000 r4:00000000
+[ 158.471553] [<80204e88>] (trace_kprobe_create) from [<80205854>] (create_or_delete_trace_kprobe+0x20/0x3c)
+[ 158.471766] r10:be235000 r9:be235000 r8:0000002e r7:80205834 r6:80205834 r5:00000002
+[ 158.471949] r4:b9e713c4
+[ 158.472027] [<80205834>] (create_or_delete_trace_kprobe) from [<801ee4a0>] (trace_run_command+0x84/0x9c)
+[ 158.472255] [<801ee41c>] (trace_run_command) from [<801ee5c8>] (trace_parse_run_command+0x110/0x1f8)
+[ 158.472471] r6:00000000 r5:0000002e r4:0000002e
+[ 158.472594] [<801ee4b8>] (trace_parse_run_command) from [<8020442c>] (probes_write+0x1c/0x28)
+[ 158.472800] r10:00000004 r9:00000000 r8:be2d1f60 r7:006b1fd0 r6:80204410 r5:0000002e
+[ 158.472968] r4:b9eedf00
+[ 158.473046] [<80204410>] (probes_write) from [<802c60ac>] (vfs_write+0xcc/0x1e8)
+[ 158.473226] [<802c5fe0>] (vfs_write) from [<802c634c>] (ksys_write+0x70/0xf8)
+[ 158.473400] r8:00000000 r7:0000002e r6:006b1fd0 r5:b9eedf00 r4:b9eedf00
+[ 158.473567] [<802c62dc>] (ksys_write) from [<802c63ec>] (sys_write+0x18/0x1c)
+[ 158.473745] r9:be2d0000 r8:80100284 r7:00000004 r6:76f7a610 r5:00000003 r4:006b1fd0
+[ 158.473932] [<802c63d4>] (sys_write) from [<80100080>] (ret_fast_syscall+0x0/0x54)
+[ 158.474126] Exception stack(0xbe2d1fa8 to 0xbe2d1ff0)
+[ 158.474305] 1fa0: 006b1fd0 00000003 00000003 006b1fd0 0000002e 00000000
+[ 158.474573] 1fc0: 006b1fd0 00000003 76f7a610 00000004 006b1fb0 0026d348 00000017 7ef2738c
+[ 158.474811] 1fe0: 76f3431c 7ef272d8 0014ec50 76f34338
+[ 158.475171] Code: e24cb004 e1a01000 e59f0004 ebf40dd3 (e7f001f2)
+[ 158.475847] ---[ end trace 55a5b31c08a29f00 ]---
+[ 158.476088] Kernel panic - not syncing: Fatal exception
+[ 158.476375] CPU0: stopping
+[ 158.476709] CPU: 0 PID: 0 Comm: swapper/0 Tainted: G D 5.9.0-rc7-00038-gc53ebf8167e9 #158
+[ 158.477176] Hardware name: Generic DT based system
+[ 158.477411] Backtrace:
+[ 158.477604] [<8010dd28>] (dump_backtrace) from [<8010dfd4>] (show_stack+0x20/0x24)
+[ 158.477990] r7:00000000 r6:60000193 r5:00000000 r4:80c2f634
+[ 158.478323] [<8010dfb4>] (show_stack) from [<8046390c>] (dump_stack+0xcc/0xe8)
+[ 158.478686] [<80463840>] (dump_stack) from [<80110750>] (handle_IPI+0x334/0x3a0)
+[ 158.479063] r7:00000000 r6:00000004 r5:80b65cc8 r4:80c78278
+[ 158.479352] [<8011041c>] (handle_IPI) from [<801013f8>] (gic_handle_irq+0x88/0x94)
+[ 158.479757] r10:10c5387d r9:80c01ed8 r8:00000000 r7:c0802000 r6:80c0537c r5:000003ff
+[ 158.480146] r4:c080200c r3:fffffff4
+[ 158.480364] [<80101370>] (gic_handle_irq) from [<80100b6c>] (__irq_svc+0x6c/0x90)
+[ 158.480748] Exception stack(0x80c01ed8 to 0x80c01f20)
+[ 158.481031] 1ec0: 000128bc 00000000
+[ 158.481499] 1ee0: be7b8174 8011d3a0 80c00000 00000000 80c04cec 80c04d28 80c5d7c2 80a026d4
+[ 158.482091] 1f00: 10c5387d 80c01f34 80c01f38 80c01f28 80109554 80109558 60000013 ffffffff
+[ 158.482621] r9:80c00000 r8:80c5d7c2 r7:80c01f0c r6:ffffffff r5:60000013 r4:80109558
+[ 158.482983] [<80109518>] (arch_cpu_idle) from [<80818780>] (default_idle_call+0x38/0x120)
+[ 158.483360] [<80818748>] (default_idle_call) from [<801585a8>] (do_idle+0xd4/0x158)
+[ 158.483945] r5:00000000 r4:80c00000
+[ 158.484237] [<801584d4>] (do_idle) from [<801588f4>] (cpu_startup_entry+0x28/0x2c)
+[ 158.484784] r9:80c78000 r8:00000000 r7:80c78000 r6:80c78040 r5:80c04cc0 r4:000000d6
+[ 158.485328] [<801588cc>] (cpu_startup_entry) from [<80810a78>] (rest_init+0x9c/0xbc)
+[ 158.485930] [<808109dc>] (rest_init) from [<80b00ae4>] (arch_call_rest_init+0x18/0x1c)
+[ 158.486503] r5:80c04cc0 r4:00000001
+[ 158.486857] [<80b00acc>] (arch_call_rest_init) from [<80b00fcc>] (start_kernel+0x46c/0x548)
+[ 158.487589] [<80b00b60>] (start_kernel) from [<00000000>] (0x0)
+```
+
+Fixes: e46daee53bb5 ("ARM: 8806/1: kprobes: Fix false positive with FORTIFY_SOURCE")
+Fixes: 0ac569bf6a79 ("ARM: 8834/1: Fix: kprobes: optimized kprobes illegal instruction")
+Suggested-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Andrew Jeffery <andrew@aj.id.au>
+Tested-by: Luka Oreskovic <luka.oreskovic@sartura.hr>
+Tested-by: Joel Stanley <joel@jms.id.au>
+Reviewed-by: Joel Stanley <joel@jms.id.au>
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Luka Oreskovic <luka.oreskovic@sartura.hr>
+Cc: Juraj Vijtiuk <juraj.vijtiuk@sartura.hr>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/include/asm/kprobes.h | 22 +++++++++++-----------
+ arch/arm/probes/kprobes/opt-arm.c | 18 +++++++++---------
+ 2 files changed, 20 insertions(+), 20 deletions(-)
+
+diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
+index 213607a1f45c1..e26a278d301ab 100644
+--- a/arch/arm/include/asm/kprobes.h
++++ b/arch/arm/include/asm/kprobes.h
+@@ -44,20 +44,20 @@ int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
+ /* optinsn template addresses */
+-extern __visible kprobe_opcode_t optprobe_template_entry;
+-extern __visible kprobe_opcode_t optprobe_template_val;
+-extern __visible kprobe_opcode_t optprobe_template_call;
+-extern __visible kprobe_opcode_t optprobe_template_end;
+-extern __visible kprobe_opcode_t optprobe_template_sub_sp;
+-extern __visible kprobe_opcode_t optprobe_template_add_sp;
+-extern __visible kprobe_opcode_t optprobe_template_restore_begin;
+-extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn;
+-extern __visible kprobe_opcode_t optprobe_template_restore_end;
++extern __visible kprobe_opcode_t optprobe_template_entry[];
++extern __visible kprobe_opcode_t optprobe_template_val[];
++extern __visible kprobe_opcode_t optprobe_template_call[];
++extern __visible kprobe_opcode_t optprobe_template_end[];
++extern __visible kprobe_opcode_t optprobe_template_sub_sp[];
++extern __visible kprobe_opcode_t optprobe_template_add_sp[];
++extern __visible kprobe_opcode_t optprobe_template_restore_begin[];
++extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn[];
++extern __visible kprobe_opcode_t optprobe_template_restore_end[];
+
+ #define MAX_OPTIMIZED_LENGTH 4
+ #define MAX_OPTINSN_SIZE \
+- ((unsigned long)&optprobe_template_end - \
+- (unsigned long)&optprobe_template_entry)
++ ((unsigned long)optprobe_template_end - \
++ (unsigned long)optprobe_template_entry)
+ #define RELATIVEJUMP_SIZE 4
+
+ struct arch_optimized_insn {
+diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
+index 7a449df0b3591..c78180172120f 100644
+--- a/arch/arm/probes/kprobes/opt-arm.c
++++ b/arch/arm/probes/kprobes/opt-arm.c
+@@ -85,21 +85,21 @@ asm (
+ "optprobe_template_end:\n");
+
+ #define TMPL_VAL_IDX \
+- ((unsigned long *)&optprobe_template_val - (unsigned long *)&optprobe_template_entry)
++ ((unsigned long *)optprobe_template_val - (unsigned long *)optprobe_template_entry)
+ #define TMPL_CALL_IDX \
+- ((unsigned long *)&optprobe_template_call - (unsigned long *)&optprobe_template_entry)
++ ((unsigned long *)optprobe_template_call - (unsigned long *)optprobe_template_entry)
+ #define TMPL_END_IDX \
+- ((unsigned long *)&optprobe_template_end - (unsigned long *)&optprobe_template_entry)
++ ((unsigned long *)optprobe_template_end - (unsigned long *)optprobe_template_entry)
+ #define TMPL_ADD_SP \
+- ((unsigned long *)&optprobe_template_add_sp - (unsigned long *)&optprobe_template_entry)
++ ((unsigned long *)optprobe_template_add_sp - (unsigned long *)optprobe_template_entry)
+ #define TMPL_SUB_SP \
+- ((unsigned long *)&optprobe_template_sub_sp - (unsigned long *)&optprobe_template_entry)
++ ((unsigned long *)optprobe_template_sub_sp - (unsigned long *)optprobe_template_entry)
+ #define TMPL_RESTORE_BEGIN \
+- ((unsigned long *)&optprobe_template_restore_begin - (unsigned long *)&optprobe_template_entry)
++ ((unsigned long *)optprobe_template_restore_begin - (unsigned long *)optprobe_template_entry)
+ #define TMPL_RESTORE_ORIGN_INSN \
+- ((unsigned long *)&optprobe_template_restore_orig_insn - (unsigned long *)&optprobe_template_entry)
++ ((unsigned long *)optprobe_template_restore_orig_insn - (unsigned long *)optprobe_template_entry)
+ #define TMPL_RESTORE_END \
+- ((unsigned long *)&optprobe_template_restore_end - (unsigned long *)&optprobe_template_entry)
++ ((unsigned long *)optprobe_template_restore_end - (unsigned long *)optprobe_template_entry)
+
+ /*
+ * ARM can always optimize an instruction when using ARM ISA, except
+@@ -234,7 +234,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
+ }
+
+ /* Copy arch-dep-instance from template. */
+- memcpy(code, (unsigned long *)&optprobe_template_entry,
++ memcpy(code, (unsigned long *)optprobe_template_entry,
+ TMPL_END_IDX * sizeof(kprobe_opcode_t));
+
+ /* Adjust buffer according to instruction. */
+--
+2.27.0
+
--- /dev/null
+From 27d115096e6f502eecaae44ef2a1583b0cb34f5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Nov 2020 13:00:14 +0530
+Subject: arm64/mm: Validate hotplug range before creating linear mapping
+
+From: Anshuman Khandual <anshuman.khandual@arm.com>
+
+[ Upstream commit 58284a901b426e6130672e9f14c30dfd5a9dbde0 ]
+
+During memory hotplug process, the linear mapping should not be created for
+a given memory range if that would fall outside the maximum allowed linear
+range. Else it might cause memory corruption in the kernel virtual space.
+
+Maximum linear mapping region is [PAGE_OFFSET..(PAGE_END -1)] accommodating
+both its ends but excluding PAGE_END. Max physical range that can be mapped
+inside this linear mapping range, must also be derived from its end points.
+
+This ensures that arch_add_memory() validates memory hot add range for its
+potential linear mapping requirements, before creating it with
+__create_pgd_mapping().
+
+Fixes: 4ab215061554 ("arm64: Add memory hotplug support")
+Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Ard Biesheuvel <ardb@kernel.org>
+Cc: Steven Price <steven.price@arm.com>
+Cc: Robin Murphy <robin.murphy@arm.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: linux-kernel@vger.kernel.org
+Link: https://lore.kernel.org/r/1605252614-761-1-git-send-email-anshuman.khandual@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/mm/mmu.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 75df62fea1b68..a834e7fb0e250 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -1433,11 +1433,28 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
+ free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
+ }
+
++static bool inside_linear_region(u64 start, u64 size)
++{
++ /*
++ * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
++ * accommodating both its ends but excluding PAGE_END. Max physical
++ * range which can be mapped inside this linear mapping range, must
++ * also be derived from its end points.
++ */
++ return start >= __pa(_PAGE_OFFSET(vabits_actual)) &&
++ (start + size - 1) <= __pa(PAGE_END - 1);
++}
++
+ int arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_params *params)
+ {
+ int ret, flags = 0;
+
++ if (!inside_linear_region(start, size)) {
++ pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size);
++ return -EINVAL;
++ }
++
+ if (rodata_full || debug_pagealloc_enabled())
+ flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+
+--
+2.27.0
+
--- /dev/null
+From 05b60ed168d933be7d4cd43355ec4ac2835cc880 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Oct 2020 18:15:05 +0100
+Subject: bpf: Don't rely on GCC __attribute__((optimize)) to disable GCSE
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Upstream commit 080b6f40763565f65ebb9540219c71ce885cf568 ]
+
+Commit 3193c0836 ("bpf: Disable GCC -fgcse optimization for
+___bpf_prog_run()") introduced a __no_fgcse macro that expands to a
+function scope __attribute__((optimize("-fno-gcse"))), to disable a
+GCC specific optimization that was causing trouble on x86 builds, and
+was not expected to have any positive effect in the first place.
+
+However, as the GCC manual documents, __attribute__((optimize))
+is not for production use, and results in all other optimization
+options to be forgotten for the function in question. This can
+cause all kinds of trouble, but in one particular reported case,
+it causes -fno-asynchronous-unwind-tables to be disregarded,
+resulting in .eh_frame info to be emitted for the function.
+
+This reverts commit 3193c0836, and instead, it disables the -fgcse
+optimization for the entire source file, but only when building for
+X86 using GCC with CONFIG_BPF_JIT_ALWAYS_ON disabled. Note that the
+original commit states that CONFIG_RETPOLINE=n triggers the issue,
+whereas CONFIG_RETPOLINE=y performs better without the optimization,
+so it is kept disabled in both cases.
+
+Fixes: 3193c0836f20 ("bpf: Disable GCC -fgcse optimization for ___bpf_prog_run()")
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Link: https://lore.kernel.org/lkml/CAMuHMdUg0WJHEcq6to0-eODpXPOywLot6UD2=GFHpzoj_hCoBQ@mail.gmail.com/
+Link: https://lore.kernel.org/bpf/20201028171506.15682-2-ardb@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/compiler-gcc.h | 2 --
+ include/linux/compiler_types.h | 4 ----
+ kernel/bpf/Makefile | 6 +++++-
+ kernel/bpf/core.c | 2 +-
+ 4 files changed, 6 insertions(+), 8 deletions(-)
+
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 7a3769040d7dc..3017ebd400546 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -175,5 +175,3 @@
+ #else
+ #define __diag_GCC_8(s)
+ #endif
+-
+-#define __no_fgcse __attribute__((optimize("-fno-gcse")))
+diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
+index 6e390d58a9f8c..ac3fa37a84f94 100644
+--- a/include/linux/compiler_types.h
++++ b/include/linux/compiler_types.h
+@@ -247,10 +247,6 @@ struct ftrace_likely_data {
+ #define asm_inline asm
+ #endif
+
+-#ifndef __no_fgcse
+-# define __no_fgcse
+-#endif
+-
+ /* Are two types/vars the same type (ignoring qualifiers)? */
+ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+
+diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
+index e6eb9c0402dab..0cc0de72163dc 100644
+--- a/kernel/bpf/Makefile
++++ b/kernel/bpf/Makefile
+@@ -1,6 +1,10 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-y := core.o
+-CFLAGS_core.o += $(call cc-disable-warning, override-init)
++ifneq ($(CONFIG_BPF_JIT_ALWAYS_ON),y)
++# ___bpf_prog_run() needs GCSE disabled on x86; see 3193c0836f203 for details
++cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
++endif
++CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
+
+ obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o
+ obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index ed0b3578867c0..3cb26e82549ac 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1364,7 +1364,7 @@ u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+ *
+ * Decode and execute eBPF instructions.
+ */
+-static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
++static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
+ {
+ #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
+ #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
+--
+2.27.0
+
--- /dev/null
+From 21ef4b3f365820533fb8683401ce1a52d5e78c4b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Nov 2020 12:23:32 +0100
+Subject: bpf: Zero-fill re-used per-cpu map element
+
+From: David Verbeiren <david.verbeiren@tessares.net>
+
+[ Upstream commit d3bec0138bfbe58606fc1d6f57a4cdc1a20218db ]
+
+Zero-fill element values for all other cpus than current, just as
+when not using prealloc. This is the only way the bpf program can
+ensure known initial values for all cpus ('onallcpus' cannot be
+set when coming from the bpf program).
+
+The scenario is: bpf program inserts some elements in a per-cpu
+map, then deletes some (or userspace does). When later adding
+new elements using bpf_map_update_elem(), the bpf program can
+only set the value of the new elements for the current cpu.
+When prealloc is enabled, previously deleted elements are re-used.
+Without the fix, values for other cpus remain whatever they were
+when the re-used entry was previously freed.
+
+A selftest is added to validate correct operation in above
+scenario as well as in case of LRU per-cpu map element re-use.
+
+Fixes: 6c9059817432 ("bpf: pre-allocate hash map elements")
+Signed-off-by: David Verbeiren <david.verbeiren@tessares.net>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Acked-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/bpf/20201104112332.15191-1-david.verbeiren@tessares.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/hashtab.c | 30 ++-
+ .../selftests/bpf/prog_tests/map_init.c | 214 ++++++++++++++++++
+ .../selftests/bpf/progs/test_map_init.c | 33 +++
+ 3 files changed, 275 insertions(+), 2 deletions(-)
+ create mode 100644 tools/testing/selftests/bpf/prog_tests/map_init.c
+ create mode 100644 tools/testing/selftests/bpf/progs/test_map_init.c
+
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 7df28a45c66bf..15364543b2c0f 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -821,6 +821,32 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
+ }
+ }
+
++static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
++ void *value, bool onallcpus)
++{
++ /* When using prealloc and not setting the initial value on all cpus,
++ * zero-fill element values for other cpus (just as what happens when
++ * not using prealloc). Otherwise, bpf program has no way to ensure
++ * known initial values for cpus other than current one
++ * (onallcpus=false always when coming from bpf prog).
++ */
++ if (htab_is_prealloc(htab) && !onallcpus) {
++ u32 size = round_up(htab->map.value_size, 8);
++ int current_cpu = raw_smp_processor_id();
++ int cpu;
++
++ for_each_possible_cpu(cpu) {
++ if (cpu == current_cpu)
++ bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value,
++ size);
++ else
++ memset(per_cpu_ptr(pptr, cpu), 0, size);
++ }
++ } else {
++ pcpu_copy_value(htab, pptr, value, onallcpus);
++ }
++}
++
+ static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
+ {
+ return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
+@@ -891,7 +917,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
+ }
+ }
+
+- pcpu_copy_value(htab, pptr, value, onallcpus);
++ pcpu_init_value(htab, pptr, value, onallcpus);
+
+ if (!prealloc)
+ htab_elem_set_ptr(l_new, key_size, pptr);
+@@ -1183,7 +1209,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
+ pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
+ value, onallcpus);
+ } else {
+- pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
++ pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
+ value, onallcpus);
+ hlist_nulls_add_head_rcu(&l_new->hash_node, head);
+ l_new = NULL;
+diff --git a/tools/testing/selftests/bpf/prog_tests/map_init.c b/tools/testing/selftests/bpf/prog_tests/map_init.c
+new file mode 100644
+index 0000000000000..14a31109dd0e0
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/map_init.c
+@@ -0,0 +1,214 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
++
++#include <test_progs.h>
++#include "test_map_init.skel.h"
++
++#define TEST_VALUE 0x1234
++#define FILL_VALUE 0xdeadbeef
++
++static int nr_cpus;
++static int duration;
++
++typedef unsigned long long map_key_t;
++typedef unsigned long long map_value_t;
++typedef struct {
++ map_value_t v; /* padding */
++} __bpf_percpu_val_align pcpu_map_value_t;
++
++
++static int map_populate(int map_fd, int num)
++{
++ pcpu_map_value_t value[nr_cpus];
++ int i, err;
++ map_key_t key;
++
++ for (i = 0; i < nr_cpus; i++)
++ bpf_percpu(value, i) = FILL_VALUE;
++
++ for (key = 1; key <= num; key++) {
++ err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
++ if (!ASSERT_OK(err, "bpf_map_update_elem"))
++ return -1;
++ }
++
++ return 0;
++}
++
++static struct test_map_init *setup(enum bpf_map_type map_type, int map_sz,
++ int *map_fd, int populate)
++{
++ struct test_map_init *skel;
++ int err;
++
++ skel = test_map_init__open();
++ if (!ASSERT_OK_PTR(skel, "skel_open"))
++ return NULL;
++
++ err = bpf_map__set_type(skel->maps.hashmap1, map_type);
++ if (!ASSERT_OK(err, "bpf_map__set_type"))
++ goto error;
++
++ err = bpf_map__set_max_entries(skel->maps.hashmap1, map_sz);
++ if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
++ goto error;
++
++ err = test_map_init__load(skel);
++ if (!ASSERT_OK(err, "skel_load"))
++ goto error;
++
++ *map_fd = bpf_map__fd(skel->maps.hashmap1);
++ if (CHECK(*map_fd < 0, "bpf_map__fd", "failed\n"))
++ goto error;
++
++ err = map_populate(*map_fd, populate);
++ if (!ASSERT_OK(err, "map_populate"))
++ goto error_map;
++
++ return skel;
++
++error_map:
++ close(*map_fd);
++error:
++ test_map_init__destroy(skel);
++ return NULL;
++}
++
++/* executes bpf program that updates map with key, value */
++static int prog_run_insert_elem(struct test_map_init *skel, map_key_t key,
++ map_value_t value)
++{
++ struct test_map_init__bss *bss;
++
++ bss = skel->bss;
++
++ bss->inKey = key;
++ bss->inValue = value;
++ bss->inPid = getpid();
++
++ if (!ASSERT_OK(test_map_init__attach(skel), "skel_attach"))
++ return -1;
++
++ /* Let tracepoint trigger */
++ syscall(__NR_getpgid);
++
++ test_map_init__detach(skel);
++
++ return 0;
++}
++
++static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected)
++{
++ int i, nzCnt = 0;
++ map_value_t val;
++
++ for (i = 0; i < nr_cpus; i++) {
++ val = bpf_percpu(value, i);
++ if (val) {
++ if (CHECK(val != expected, "map value",
++ "unexpected for cpu %d: 0x%llx\n", i, val))
++ return -1;
++ nzCnt++;
++ }
++ }
++
++ if (CHECK(nzCnt != 1, "map value", "set for %d CPUs instead of 1!\n",
++ nzCnt))
++ return -1;
++
++ return 0;
++}
++
++/* Add key=1 elem with values set for all CPUs
++ * Delete elem key=1
++ * Run bpf prog that inserts new key=1 elem with value=0x1234
++ * (bpf prog can only set value for current CPU)
++ * Lookup Key=1 and check value is as expected for all CPUs:
++ * value set by bpf prog for one CPU, 0 for all others
++ */
++static void test_pcpu_map_init(void)
++{
++ pcpu_map_value_t value[nr_cpus];
++ struct test_map_init *skel;
++ int map_fd, err;
++ map_key_t key;
++
++ /* max 1 elem in map so insertion is forced to reuse freed entry */
++ skel = setup(BPF_MAP_TYPE_PERCPU_HASH, 1, &map_fd, 1);
++ if (!ASSERT_OK_PTR(skel, "prog_setup"))
++ return;
++
++ /* delete element so the entry can be re-used*/
++ key = 1;
++ err = bpf_map_delete_elem(map_fd, &key);
++ if (!ASSERT_OK(err, "bpf_map_delete_elem"))
++ goto cleanup;
++
++ /* run bpf prog that inserts new elem, re-using the slot just freed */
++ err = prog_run_insert_elem(skel, key, TEST_VALUE);
++ if (!ASSERT_OK(err, "prog_run_insert_elem"))
++ goto cleanup;
++
++ /* check that key=1 was re-created by bpf prog */
++ err = bpf_map_lookup_elem(map_fd, &key, value);
++ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
++ goto cleanup;
++
++ /* and has expected values */
++ check_values_one_cpu(value, TEST_VALUE);
++
++cleanup:
++ test_map_init__destroy(skel);
++}
++
++/* Add key=1 and key=2 elems with values set for all CPUs
++ * Run bpf prog that inserts new key=3 elem
++ * (only for current cpu; other cpus should have initial value = 0)
++ * Lookup Key=1 and check value is as expected for all CPUs
++ */
++static void test_pcpu_lru_map_init(void)
++{
++ pcpu_map_value_t value[nr_cpus];
++ struct test_map_init *skel;
++ int map_fd, err;
++ map_key_t key;
++
++ /* Set up LRU map with 2 elements, values filled for all CPUs.
++ * With these 2 elements, the LRU map is full
++ */
++ skel = setup(BPF_MAP_TYPE_LRU_PERCPU_HASH, 2, &map_fd, 2);
++ if (!ASSERT_OK_PTR(skel, "prog_setup"))
++ return;
++
++ /* run bpf prog that inserts new key=3 element, re-using LRU slot */
++ key = 3;
++ err = prog_run_insert_elem(skel, key, TEST_VALUE);
++ if (!ASSERT_OK(err, "prog_run_insert_elem"))
++ goto cleanup;
++
++ /* check that key=3 replaced one of earlier elements */
++ err = bpf_map_lookup_elem(map_fd, &key, value);
++ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
++ goto cleanup;
++
++ /* and has expected values */
++ check_values_one_cpu(value, TEST_VALUE);
++
++cleanup:
++ test_map_init__destroy(skel);
++}
++
++void test_map_init(void)
++{
++ nr_cpus = bpf_num_possible_cpus();
++ if (nr_cpus <= 1) {
++ printf("%s:SKIP: >1 cpu needed for this test\n", __func__);
++ test__skip();
++ return;
++ }
++
++ if (test__start_subtest("pcpu_map_init"))
++ test_pcpu_map_init();
++ if (test__start_subtest("pcpu_lru_map_init"))
++ test_pcpu_lru_map_init();
++}
+diff --git a/tools/testing/selftests/bpf/progs/test_map_init.c b/tools/testing/selftests/bpf/progs/test_map_init.c
+new file mode 100644
+index 0000000000000..c89d28ead6737
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/test_map_init.c
+@@ -0,0 +1,33 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
++
++#include "vmlinux.h"
++#include <bpf/bpf_helpers.h>
++
++__u64 inKey = 0;
++__u64 inValue = 0;
++__u32 inPid = 0;
++
++struct {
++ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
++ __uint(max_entries, 2);
++ __type(key, __u64);
++ __type(value, __u64);
++} hashmap1 SEC(".maps");
++
++
++SEC("tp/syscalls/sys_enter_getpgid")
++int sysenter_getpgid(const void *ctx)
++{
++ /* Just do it for once, when called from our own test prog. This
++ * ensures the map value is only updated for a single CPU.
++ */
++ int cur_pid = bpf_get_current_pid_tgid() >> 32;
++
++ if (cur_pid == inPid)
++ bpf_map_update_elem(&hashmap1, &inKey, &inValue, BPF_NOEXIST);
++
++ return 0;
++}
++
++char _license[] SEC("license") = "GPL";
+--
+2.27.0
+
--- /dev/null
+From 2a5c7d140e2dab74b26a9cc86d4d8e65d1fff386 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Nov 2020 16:21:41 +0530
+Subject: ch_ktls: tcb update fails sometimes
+
+From: Rohit Maheshwari <rohitm@chelsio.com>
+
+[ Upstream commit 7d01c428c86b525dc780226924d74df2048cf411 ]
+
+context id and port id should be filled while sending tcb update.
+
+Fixes: 5a4b9fe7fece ("cxgb4/chcr: complete record tx handling")
+Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/chelsio/chcr_ktls.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c
+index 026689d091102..dc5e22bc64b39 100644
+--- a/drivers/crypto/chelsio/chcr_ktls.c
++++ b/drivers/crypto/chelsio/chcr_ktls.c
+@@ -659,7 +659,8 @@ int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
+ }
+
+ static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+- u32 tid, void *pos, u16 word, u64 mask,
++ u32 tid, void *pos, u16 word,
++ struct sge_eth_txq *q, u64 mask,
+ u64 val, u32 reply)
+ {
+ struct cpl_set_tcb_field_core *cpl;
+@@ -668,7 +669,10 @@ static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+
+ /* ULP_TXPKT */
+ txpkt = pos;
+- txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
++ txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
++ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
++ ULP_TXPKT_FID_V(q->q.cntxt_id) |
++ ULP_TXPKT_RO_F);
+ txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
+
+ /* ULPTX_IDATA sub-command */
+@@ -723,7 +727,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+ } else {
+ u8 buf[48] = {0};
+
+- __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word,
++ __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, q,
+ mask, val, reply);
+
+ return chcr_copy_to_txd(buf, &q->q, pos,
+@@ -731,7 +735,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+ }
+ }
+
+- pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word,
++ pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, q,
+ mask, val, reply);
+
+ /* check again if we are at the end of the queue */
+--
+2.27.0
+
--- /dev/null
+From b5bab96eb8184fde2a8397dec9d74cb51cfe47a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Nov 2020 16:21:33 +0530
+Subject: ch_ktls: Update cheksum information
+
+From: Rohit Maheshwari <rohitm@chelsio.com>
+
+[ Upstream commit 86716b51d14fc2201938939b323ba3ad99186910 ]
+
+Checksum update was missing in the WR.
+
+Fixes: 429765a149f1 ("chcr: handle partial end part of a record")
+Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/chelsio/chcr_ktls.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c
+index c5cce024886ac..026689d091102 100644
+--- a/drivers/crypto/chelsio/chcr_ktls.c
++++ b/drivers/crypto/chelsio/chcr_ktls.c
+@@ -926,6 +926,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
+ struct iphdr *ip;
+ int credits;
+ u8 buf[150];
++ u64 cntrl1;
+ void *pos;
+
+ iplen = skb_network_header_len(skb);
+@@ -964,22 +965,28 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
+ TXPKT_PF_V(tx_info->adap->pf));
+ cpl->pack = 0;
+ cpl->len = htons(pktlen);
+- /* checksum offload */
+- cpl->ctrl1 = 0;
+-
+- pos = cpl + 1;
+
+ memcpy(buf, skb->data, pktlen);
+ if (tx_info->ip_family == AF_INET) {
+ /* we need to correct ip header len */
+ ip = (struct iphdr *)(buf + maclen);
+ ip->tot_len = htons(pktlen - maclen);
++ cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
+ #if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ ip6 = (struct ipv6hdr *)(buf + maclen);
+ ip6->payload_len = htons(pktlen - maclen - iplen);
++ cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
+ #endif
+ }
++
++ cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
++ TXPKT_IPHDR_LEN_V(iplen);
++ /* checksum offload */
++ cpl->ctrl1 = cpu_to_be64(cntrl1);
++
++ pos = cpl + 1;
++
+ /* now take care of the tcp header, if fin is not set then clear push
+ * bit as well, and if fin is set, it will be sent at the last so we
+ * need to update the tcp sequence number as per the last packet.
+--
+2.27.0
+
--- /dev/null
+From f3f08c5fe957ce3c92b1a207487b942ff64ee46e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Nov 2020 22:46:14 +0800
+Subject: cosa: Add missing kfree in error path of cosa_write
+
+From: Wang Hai <wanghai38@huawei.com>
+
+[ Upstream commit 52755b66ddcef2e897778fac5656df18817b59ab ]
+
+If memory allocation for 'kbuf' succeed, cosa_write() doesn't have a
+corresponding kfree() in exception handling. Thus add kfree() for this
+function implementation.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Wang Hai <wanghai38@huawei.com>
+Acked-by: Jan "Yenya" Kasprzak <kas@fi.muni.cz>
+Link: https://lore.kernel.org/r/20201110144614.43194-1-wanghai38@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wan/cosa.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
+index f8aed0696d775..2369ca250cd65 100644
+--- a/drivers/net/wan/cosa.c
++++ b/drivers/net/wan/cosa.c
+@@ -889,6 +889,7 @@ static ssize_t cosa_write(struct file *file,
+ chan->tx_status = 1;
+ spin_unlock_irqrestore(&cosa->lock, flags);
+ up(&chan->wsem);
++ kfree(kbuf);
+ return -ERESTARTSYS;
+ }
+ }
+--
+2.27.0
+
--- /dev/null
+From 5e4389fb69d530d3eba17237c5d31b6dba6edbed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Oct 2020 12:50:26 +0800
+Subject: gpio: aspeed: fix ast2600 bank properties
+
+From: Billy Tsai <billy_tsai@aspeedtech.com>
+
+[ Upstream commit 560b6ac37a87fcb78d580437e3e0bc2b6b5b0295 ]
+
+GPIO_T is mapped to the most significant byte of input/output mask, and
+the byte in "output" mask should be 0 because GPIO_T is input only. All
+the other bits need to be 1 because GPIO_Q/R/S support both input and
+output modes.
+
+Fixes: ab4a85534c3e ("gpio: aspeed: Add in ast2600 details to Aspeed driver")
+Signed-off-by: Billy Tsai <billy_tsai@aspeedtech.com>
+Reviewed-by: Tao Ren <rentao.bupt@gmail.com>
+Reviewed-by: Joel Stanley <joel@jms.id.au>
+Reviewed-by: Andrew Jeffery <andrew@aj.id.au>
+Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpio/gpio-aspeed.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
+index e44d5de2a1201..b966f5e28ebff 100644
+--- a/drivers/gpio/gpio-aspeed.c
++++ b/drivers/gpio/gpio-aspeed.c
+@@ -1114,6 +1114,7 @@ static const struct aspeed_gpio_config ast2500_config =
+
+ static const struct aspeed_bank_props ast2600_bank_props[] = {
+ /* input output */
++ {4, 0xffffffff, 0x00ffffff}, /* Q/R/S/T */
+ {5, 0xffffffff, 0xffffff00}, /* U/V/W/X */
+ {6, 0x0000ffff, 0x0000ffff}, /* Y/Z */
+ { },
+--
+2.27.0
+
--- /dev/null
+From d693fcb5ed870c209e72f9104b88536fddbe883e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Nov 2020 14:08:23 +1100
+Subject: hwmon: (applesmc) Re-work SMC comms
+
+From: Brad Campbell <brad@fnarfbargle.com>
+
+[ Upstream commit 4d64bb4ba5ecf4831448cdb2fe16d0ae91b2b40b ]
+
+Commit fff2d0f701e6 ("hwmon: (applesmc) avoid overlong udelay()")
+introduced an issue whereby communication with the SMC became
+unreliable with write errors like :
+
+[ 120.378614] applesmc: send_byte(0x00, 0x0300) fail: 0x40
+[ 120.378621] applesmc: LKSB: write data fail
+[ 120.512782] applesmc: send_byte(0x00, 0x0300) fail: 0x40
+[ 120.512787] applesmc: LKSB: write data fail
+
+The original code appeared to be timing sensitive and was not reliable
+with the timing changes in the aforementioned commit.
+
+This patch re-factors the SMC communication to remove the timing
+dependencies and restore function with the changes previously
+committed.
+
+Tested on : MacbookAir6,2 MacBookPro11,1 iMac12,2, MacBookAir1,1,
+MacBookAir3,1
+
+Fixes: fff2d0f701e6 ("hwmon: (applesmc) avoid overlong udelay()")
+Reported-by: Andreas Kemnade <andreas@kemnade.info>
+Tested-by: Andreas Kemnade <andreas@kemnade.info> # MacBookAir6,2
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Brad Campbell <brad@fnarfbargle.com>
+Signed-off-by: Henrik Rydberg <rydberg@bitmath.org>
+Link: https://lore.kernel.org/r/194a7d71-a781-765a-d177-c962ef296b90@fnarfbargle.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/applesmc.c | 130 ++++++++++++++++++++++++---------------
+ 1 file changed, 82 insertions(+), 48 deletions(-)
+
+diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
+index a18887990f4a2..79b498f816fe9 100644
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -32,6 +32,7 @@
+ #include <linux/hwmon.h>
+ #include <linux/workqueue.h>
+ #include <linux/err.h>
++#include <linux/bits.h>
+
+ /* data port used by Apple SMC */
+ #define APPLESMC_DATA_PORT 0x300
+@@ -42,10 +43,13 @@
+
+ #define APPLESMC_MAX_DATA_LENGTH 32
+
+-/* wait up to 128 ms for a status change. */
+-#define APPLESMC_MIN_WAIT 0x0010
+-#define APPLESMC_RETRY_WAIT 0x0100
+-#define APPLESMC_MAX_WAIT 0x20000
++/* Apple SMC status bits */
++#define SMC_STATUS_AWAITING_DATA BIT(0) /* SMC has data waiting to be read */
++#define SMC_STATUS_IB_CLOSED BIT(1) /* Will ignore any input */
++#define SMC_STATUS_BUSY BIT(2) /* Command in progress */
++
++/* Initial wait is 8us */
++#define APPLESMC_MIN_WAIT 0x0008
+
+ #define APPLESMC_READ_CMD 0x10
+ #define APPLESMC_WRITE_CMD 0x11
+@@ -151,65 +155,84 @@ static unsigned int key_at_index;
+ static struct workqueue_struct *applesmc_led_wq;
+
+ /*
+- * wait_read - Wait for a byte to appear on SMC port. Callers must
+- * hold applesmc_lock.
++ * Wait for specific status bits with a mask on the SMC.
++ * Used before all transactions.
++ * This does 10 fast loops of 8us then exponentially backs off for a
++ * minimum total wait of 262ms. Depending on usleep_range this could
++ * run out past 500ms.
+ */
+-static int wait_read(void)
++
++static int wait_status(u8 val, u8 mask)
+ {
+- unsigned long end = jiffies + (APPLESMC_MAX_WAIT * HZ) / USEC_PER_SEC;
+ u8 status;
+ int us;
++ int i;
+
+- for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
+- usleep_range(us, us * 16);
++ us = APPLESMC_MIN_WAIT;
++ for (i = 0; i < 24 ; i++) {
+ status = inb(APPLESMC_CMD_PORT);
+- /* read: wait for smc to settle */
+- if (status & 0x01)
++ if ((status & mask) == val)
+ return 0;
+- /* timeout: give up */
+- if (time_after(jiffies, end))
+- break;
++ usleep_range(us, us * 2);
++ if (i > 9)
++ us <<= 1;
+ }
+-
+- pr_warn("wait_read() fail: 0x%02x\n", status);
+ return -EIO;
+ }
+
+-/*
+- * send_byte - Write to SMC port, retrying when necessary. Callers
+- * must hold applesmc_lock.
+- */
++/* send_byte - Write to SMC data port. Callers must hold applesmc_lock. */
++
+ static int send_byte(u8 cmd, u16 port)
+ {
+- u8 status;
+- int us;
+- unsigned long end = jiffies + (APPLESMC_MAX_WAIT * HZ) / USEC_PER_SEC;
++ int status;
++
++ status = wait_status(0, SMC_STATUS_IB_CLOSED);
++ if (status)
++ return status;
++ /*
++ * This needs to be a separate read looking for bit 0x04
++ * after bit 0x02 falls. If consolidated with the wait above
++ * this extra read may not happen if status returns both
++ * simultaneously and this would appear to be required.
++ */
++ status = wait_status(SMC_STATUS_BUSY, SMC_STATUS_BUSY);
++ if (status)
++ return status;
+
+ outb(cmd, port);
+- for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
+- usleep_range(us, us * 16);
+- status = inb(APPLESMC_CMD_PORT);
+- /* write: wait for smc to settle */
+- if (status & 0x02)
+- continue;
+- /* ready: cmd accepted, return */
+- if (status & 0x04)
+- return 0;
+- /* timeout: give up */
+- if (time_after(jiffies, end))
+- break;
+- /* busy: long wait and resend */
+- udelay(APPLESMC_RETRY_WAIT);
+- outb(cmd, port);
+- }
+-
+- pr_warn("send_byte(0x%02x, 0x%04x) fail: 0x%02x\n", cmd, port, status);
+- return -EIO;
++ return 0;
+ }
+
++/* send_command - Write a command to the SMC. Callers must hold applesmc_lock. */
++
+ static int send_command(u8 cmd)
+ {
+- return send_byte(cmd, APPLESMC_CMD_PORT);
++ int ret;
++
++ ret = wait_status(0, SMC_STATUS_IB_CLOSED);
++ if (ret)
++ return ret;
++ outb(cmd, APPLESMC_CMD_PORT);
++ return 0;
++}
++
++/*
++ * Based on logic from the Apple driver. This is issued before any interaction
++ * If busy is stuck high, issue a read command to reset the SMC state machine.
++ * If busy is stuck high after the command then the SMC is jammed.
++ */
++
++static int smc_sane(void)
++{
++ int ret;
++
++ ret = wait_status(0, SMC_STATUS_BUSY);
++ if (!ret)
++ return ret;
++ ret = send_command(APPLESMC_READ_CMD);
++ if (ret)
++ return ret;
++ return wait_status(0, SMC_STATUS_BUSY);
+ }
+
+ static int send_argument(const char *key)
+@@ -226,6 +249,11 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ {
+ u8 status, data = 0;
+ int i;
++ int ret;
++
++ ret = smc_sane();
++ if (ret)
++ return ret;
+
+ if (send_command(cmd) || send_argument(key)) {
+ pr_warn("%.4s: read arg fail\n", key);
+@@ -239,7 +267,8 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ }
+
+ for (i = 0; i < len; i++) {
+- if (wait_read()) {
++ if (wait_status(SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY,
++ SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY)) {
+ pr_warn("%.4s: read data[%d] fail\n", key, i);
+ return -EIO;
+ }
+@@ -250,19 +279,24 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ for (i = 0; i < 16; i++) {
+ udelay(APPLESMC_MIN_WAIT);
+ status = inb(APPLESMC_CMD_PORT);
+- if (!(status & 0x01))
++ if (!(status & SMC_STATUS_AWAITING_DATA))
+ break;
+ data = inb(APPLESMC_DATA_PORT);
+ }
+ if (i)
+ pr_warn("flushed %d bytes, last value is: %d\n", i, data);
+
+- return 0;
++ return wait_status(0, SMC_STATUS_BUSY);
+ }
+
+ static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len)
+ {
+ int i;
++ int ret;
++
++ ret = smc_sane();
++ if (ret)
++ return ret;
+
+ if (send_command(cmd) || send_argument(key)) {
+ pr_warn("%s: write arg fail\n", key);
+@@ -281,7 +315,7 @@ static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len)
+ }
+ }
+
+- return 0;
++ return wait_status(0, SMC_STATUS_BUSY);
+ }
+
+ static int read_register_count(unsigned int *count)
+--
+2.27.0
+
--- /dev/null
+From 78ca0b9372caf6add4e13c02446190b40f96e8c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Oct 2020 08:54:09 +0000
+Subject: i40e: Fix MAC address setting for a VF via Host/VM
+
+From: Slawomir Laba <slawomirx.laba@intel.com>
+
+[ Upstream commit 3a7001788fed0311d6fb77ed0dabe7bed3567bc0 ]
+
+Fix MAC setting flow for the PF driver.
+
+Update the unicast VF's MAC address in VF structure if it is
+a new setting in i40e_vc_add_mac_addr_msg.
+
+When unicast MAC address gets deleted, record that and
+set the new unicast MAC address that is already waiting in the filter
+list. This logic is based on the order of messages arriving to
+the PF driver.
+
+Without this change the MAC address setting was interpreted
+incorrectly in the following use cases:
+1) Print incorrect VF MAC or zero MAC
+ip link show dev $pf
+2) Don't preserve MAC between driver reload
+rmmod iavf; modprobe iavf
+3) Update VF MAC when macvlan was set
+ip link add link $vf address $mac $vf.1 type macvlan
+4) Failed to update mac address when VF was trusted
+ip link set dev $vf address $mac
+
+This includes all other configurations including above commands.
+
+Fixes: f657a6e1313b ("i40e: Fix VF driver MAC address configuration")
+Signed-off-by: Slawomir Laba <slawomirx.laba@intel.com>
+Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 26 +++++++++++++++++--
+ 1 file changed, 24 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 47bfb2e95e2db..343177d71f70a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -2712,6 +2712,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_param;
+ }
++ if (is_valid_ether_addr(al->list[i].addr) &&
++ is_zero_ether_addr(vf->default_lan_addr.addr))
++ ether_addr_copy(vf->default_lan_addr.addr,
++ al->list[i].addr);
+ }
+ }
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+@@ -2739,6 +2743,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ {
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
++ bool was_unimac_deleted = false;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ i40e_status ret = 0;
+@@ -2758,6 +2763,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ goto error_param;
+ }
++ if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
++ was_unimac_deleted = true;
+ }
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+@@ -2778,10 +2785,25 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
+ vf->vf_id, ret);
+
++ if (vf->trusted && was_unimac_deleted) {
++ struct i40e_mac_filter *f;
++ struct hlist_node *h;
++ u8 *macaddr = NULL;
++ int bkt;
++
++ /* set last unicast mac address as default */
++ spin_lock_bh(&vsi->mac_filter_hash_lock);
++ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
++ if (is_valid_ether_addr(f->macaddr))
++ macaddr = f->macaddr;
++ }
++ if (macaddr)
++ ether_addr_copy(vf->default_lan_addr.addr, macaddr);
++ spin_unlock_bh(&vsi->mac_filter_hash_lock);
++ }
+ error_param:
+ /* send the response to the VF */
+- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
+- ret);
++ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
+ }
+
+ /**
+--
+2.27.0
+
--- /dev/null
+From cf4be9390c17a012be3b5d984241b7c3591af5c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Sep 2020 11:35:37 -0700
+Subject: igc: Fix returning wrong statistics
+
+From: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+
+[ Upstream commit 6b7ed22ae4c96a415001f0c3116ebee15bb8491a ]
+
+'igc_update_stats()' was not updating 'netdev->stats', so the returned
+statistics, for example, requested by:
+
+$ ip -s link show dev enp3s0
+
+were not being updated and were always zero.
+
+Fix by returning a set of statistics that are actually being
+updated (adapter->stats64).
+
+Fixes: c9a11c23ceb6 ("igc: Add netdev")
+Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 9593aa4eea369..1358a39c34ad3 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -3890,21 +3890,23 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
+ }
+
+ /**
+- * igc_get_stats - Get System Network Statistics
++ * igc_get_stats64 - Get System Network Statistics
+ * @netdev: network interface device structure
++ * @stats: rtnl_link_stats64 pointer
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are updated here and also from the timer callback.
+ */
+-static struct net_device_stats *igc_get_stats(struct net_device *netdev)
++static void igc_get_stats64(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
+ {
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
++ spin_lock(&adapter->stats64_lock);
+ if (!test_bit(__IGC_RESETTING, &adapter->state))
+ igc_update_stats(adapter);
+-
+- /* only return the current stats */
+- return &netdev->stats;
++ memcpy(stats, &adapter->stats64, sizeof(*stats));
++ spin_unlock(&adapter->stats64_lock);
+ }
+
+ static netdev_features_t igc_fix_features(struct net_device *netdev,
+@@ -4833,7 +4835,7 @@ static const struct net_device_ops igc_netdev_ops = {
+ .ndo_set_rx_mode = igc_set_rx_mode,
+ .ndo_set_mac_address = igc_set_mac,
+ .ndo_change_mtu = igc_change_mtu,
+- .ndo_get_stats = igc_get_stats,
++ .ndo_get_stats64 = igc_get_stats64,
+ .ndo_fix_features = igc_fix_features,
+ .ndo_set_features = igc_set_features,
+ .ndo_features_check = igc_features_check,
+--
+2.27.0
+
--- /dev/null
+From a6c0c32c3555abda2e5a87dde5b3c059e029b2c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Nov 2020 22:52:10 -0800
+Subject: kernel/watchdog: fix watchdog_allowed_mask not used warning
+
+From: Santosh Sivaraj <santosh@fossix.org>
+
+[ Upstream commit e7e046155af04cdca5e1157f28b07e1651eb317b ]
+
+Define watchdog_allowed_mask only when SOFTLOCKUP_DETECTOR is enabled.
+
+Fixes: 7feeb9cd4f5b ("watchdog/sysctl: Clean up sysctl variable name space")
+Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lkml.kernel.org/r/20201106015025.1281561-1-santosh@fossix.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/watchdog.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 5abb5b22ad130..71109065bd8eb 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -44,8 +44,6 @@ int __read_mostly soft_watchdog_user_enabled = 1;
+ int __read_mostly watchdog_thresh = 10;
+ static int __read_mostly nmi_watchdog_available;
+
+-static struct cpumask watchdog_allowed_mask __read_mostly;
+-
+ struct cpumask watchdog_cpumask __read_mostly;
+ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
+
+@@ -162,6 +160,8 @@ static void lockup_detector_update_enable(void)
+ int __read_mostly sysctl_softlockup_all_cpu_backtrace;
+ #endif
+
++static struct cpumask watchdog_allowed_mask __read_mostly;
++
+ /* Global variables, exported for sysctl */
+ unsigned int __read_mostly softlockup_panic =
+ CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
+--
+2.27.0
+
--- /dev/null
+From 92d9dae9c1c6885a5f86730f83610bce249b2993 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 8 Nov 2020 12:12:24 -0500
+Subject: lan743x: correctly handle chips with internal PHY
+
+From: Sven Van Asbroeck <thesven73@gmail.com>
+
+[ Upstream commit 902a66e08ceaadb9a7a1ab3a4f3af611cd1d8cba ]
+
+Commit 6f197fb63850 ("lan743x: Added fixed link and RGMII support")
+assumes that chips with an internal PHY will never have a devicetree
+entry. This is incorrect: even for these chips, a devicetree entry
+can be useful e.g. to pass the mac address from bootloader to chip:
+
+ &pcie {
+ status = "okay";
+
+ host@0 {
+ reg = <0 0 0 0 0>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ lan7430: ethernet@0 {
+ /* LAN7430 with internal PHY */
+ compatible = "microchip,lan743x";
+ status = "okay";
+ reg = <0 0 0 0 0>;
+ /* filled in by bootloader */
+ local-mac-address = [00 00 00 00 00 00];
+ };
+ };
+ };
+
+If a devicetree entry is present, the driver will not attach the chip
+to its internal phy, and the chip will be non-operational.
+
+Fix by tweaking the phy connection algorithm:
+- first try to connect to a phy specified in the devicetree
+ (could be 'real' phy, or just a 'fixed-link')
+- if that doesn't succeed, try to connect to an internal phy, even
+ if the chip has a devnode
+
+Tested on a LAN7430 with internal PHY. I cannot test a device using
+fixed-link, as I do not have access to one.
+
+Fixes: 6f197fb63850 ("lan743x: Added fixed link and RGMII support")
+Tested-by: Sven Van Asbroeck <thesven73@gmail.com> # lan7430
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: Sven Van Asbroeck <thesven73@gmail.com>
+Link: https://lore.kernel.org/r/20201108171224.23829-1-TheSven73@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/microchip/lan743x_main.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index de93cc6ebc1ac..be58a941965b1 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -1027,9 +1027,9 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
+
+ netdev = adapter->netdev;
+ phynode = of_node_get(adapter->pdev->dev.of_node);
+- adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
+
+ if (phynode) {
++ /* try devicetree phy, or fixed link */
+ of_get_phy_mode(phynode, &adapter->phy_mode);
+
+ if (of_phy_is_fixed_link(phynode)) {
+@@ -1045,13 +1045,15 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
+ lan743x_phy_link_status_change, 0,
+ adapter->phy_mode);
+ of_node_put(phynode);
+- if (!phydev)
+- goto return_error;
+- } else {
++ }
++
++ if (!phydev) {
++ /* try internal phy */
+ phydev = phy_find_first(adapter->mdiobus);
+ if (!phydev)
+ goto return_error;
+
++ adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
+ ret = phy_connect_direct(netdev, phydev,
+ lan743x_phy_link_status_change,
+ adapter->phy_mode);
+--
+2.27.0
+
--- /dev/null
+From be0b2ded3ebbcc0aa058cc8f603450a5ea125739 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Nov 2020 15:38:28 -0500
+Subject: lan743x: fix "BUG: invalid wait context" when setting rx mode
+
+From: Sven Van Asbroeck <thesven73@gmail.com>
+
+[ Upstream commit 2b52a4b65bc8f14520fe6e996ea7fb3f7e400761 ]
+
+In the net core, the struct net_device_ops -> ndo_set_rx_mode()
+callback is called with the dev->addr_list_lock spinlock held.
+
+However, this driver's ndo_set_rx_mode callback eventually calls
+lan743x_dp_write(), which acquires a mutex. Mutex acquisition
+may sleep, and this is not allowed when holding a spinlock.
+
+Fix by removing the dp_lock mutex entirely. Its purpose is to
+prevent concurrent accesses to the data port. No concurrent
+accesses are possible, because the dev->addr_list_lock
+spinlock in the core only lets through one thread at a time.
+
+Fixes: 23f0703c125b ("lan743x: Add main source files for new lan743x driver")
+Signed-off-by: Sven Van Asbroeck <thesven73@gmail.com>
+Link: https://lore.kernel.org/r/20201109203828.5115-1-TheSven73@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/microchip/lan743x_main.c | 12 +++---------
+ drivers/net/ethernet/microchip/lan743x_main.h | 3 ---
+ 2 files changed, 3 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index be58a941965b1..6c25c7c8b7cf8 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -675,14 +675,12 @@ clean_up:
+ static int lan743x_dp_write(struct lan743x_adapter *adapter,
+ u32 select, u32 addr, u32 length, u32 *buf)
+ {
+- int ret = -EIO;
+ u32 dp_sel;
+ int i;
+
+- mutex_lock(&adapter->dp_lock);
+ if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
+ 1, 40, 100, 100))
+- goto unlock;
++ return -EIO;
+ dp_sel = lan743x_csr_read(adapter, DP_SEL);
+ dp_sel &= ~DP_SEL_MASK_;
+ dp_sel |= select;
+@@ -694,13 +692,10 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
+ lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
+ if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
+ 1, 40, 100, 100))
+- goto unlock;
++ return -EIO;
+ }
+- ret = 0;
+
+-unlock:
+- mutex_unlock(&adapter->dp_lock);
+- return ret;
++ return 0;
+ }
+
+ static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
+@@ -2737,7 +2732,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
+
+ adapter->intr.irq = adapter->pdev->irq;
+ lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
+- mutex_init(&adapter->dp_lock);
+
+ ret = lan743x_gpio_init(adapter);
+ if (ret)
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
+index c61a404113179..a536f4a4994df 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.h
++++ b/drivers/net/ethernet/microchip/lan743x_main.h
+@@ -712,9 +712,6 @@ struct lan743x_adapter {
+ struct lan743x_csr csr;
+ struct lan743x_intr intr;
+
+- /* lock, used to prevent concurrent access to data port */
+- struct mutex dp_lock;
+-
+ struct lan743x_gpio gpio;
+ struct lan743x_ptp ptp;
+
+--
+2.27.0
+
--- /dev/null
+From 32c49bd0ce6764962eed67cb86f061c0a1ade9fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Nov 2020 10:25:13 -0500
+Subject: lan743x: fix use of uninitialized variable
+
+From: Sven Van Asbroeck <thesven73@gmail.com>
+
+[ Upstream commit edbc21113bde13ca3d06eec24b621b1f628583dd ]
+
+When no devicetree is present, the driver will use an
+uninitialized variable.
+
+Fix by initializing this variable.
+
+Fixes: 902a66e08cea ("lan743x: correctly handle chips with internal PHY")
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Sven Van Asbroeck <thesven73@gmail.com>
+Link: https://lore.kernel.org/r/20201112152513.1941-1-TheSven73@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/microchip/lan743x_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index 6c25c7c8b7cf8..bc368136bccc6 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -1015,8 +1015,8 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter)
+ static int lan743x_phy_open(struct lan743x_adapter *adapter)
+ {
+ struct lan743x_phy *phy = &adapter->phy;
++ struct phy_device *phydev = NULL;
+ struct device_node *phynode;
+- struct phy_device *phydev;
+ struct net_device *netdev;
+ int ret = -EIO;
+
+--
+2.27.0
+
--- /dev/null
+From 13b2bdd63249d7680bc5dde3fa7c15045cf0ca54 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Oct 2020 15:37:07 -0700
+Subject: libbpf, hashmap: Fix undefined behavior in hash_bits
+
+From: Ian Rogers <irogers@google.com>
+
+[ Upstream commit 7a078d2d18801bba7bde7337a823d7342299acf7 ]
+
+If bits is 0, the case when the map is empty, then the >> is the size of
+the register which is undefined behavior - on x86 it is the same as a
+shift by 0.
+
+Fix by handling the 0 case explicitly and guarding calls to hash_bits for
+empty maps in hashmap__for_each_key_entry and hashmap__for_each_entry_safe.
+
+Fixes: e3b924224028 ("libbpf: add resizable non-thread safe internal hashmap")
+Suggested-by: Andrii Nakryiko <andriin@fb.com>,
+Signed-off-by: Ian Rogers <irogers@google.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Andrii Nakryiko <andrii@kernel.org>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20201029223707.494059-1-irogers@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/lib/bpf/hashmap.h | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h
+index e0af36b0e5d83..6a3c3d8bb4ab8 100644
+--- a/tools/lib/bpf/hashmap.h
++++ b/tools/lib/bpf/hashmap.h
+@@ -15,6 +15,9 @@
+ static inline size_t hash_bits(size_t h, int bits)
+ {
+ /* shuffle bits and return requested number of upper bits */
++ if (bits == 0)
++ return 0;
++
+ #if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__)
+ /* LP64 case */
+ return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits);
+@@ -162,17 +165,17 @@ bool hashmap__find(const struct hashmap *map, const void *key, void **value);
+ * @key: key to iterate entries for
+ */
+ #define hashmap__for_each_key_entry(map, cur, _key) \
+- for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
+- map->cap_bits); \
+- map->buckets ? map->buckets[bkt] : NULL; }); \
++ for (cur = map->buckets \
++ ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
++ : NULL; \
+ cur; \
+ cur = cur->next) \
+ if (map->equal_fn(cur->key, (_key), map->ctx))
+
+ #define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
+- for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
+- map->cap_bits); \
+- cur = map->buckets ? map->buckets[bkt] : NULL; }); \
++ for (cur = map->buckets \
++ ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
++ : NULL; \
+ cur && ({ tmp = cur->next; true; }); \
+ cur = tmp) \
+ if (map->equal_fn(cur->key, (_key), map->ctx))
+--
+2.27.0
+
--- /dev/null
+From 73439214e1b90b7135fb0f9e825591f5c6164217 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Nov 2020 22:52:13 -0800
+Subject: mm: memcontrol: fix missing wakeup polling thread
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+[ Upstream commit 8b21ca0218d29cc6bb7028125c7e5a10dfb4730c ]
+
+When we poll the swap.events, we can miss being woken up when the swap
+event occurs. Because we didn't notify.
+
+Fixes: f3a53a3a1e5b ("mm, memcontrol: implement memory.swap.events")
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Shakeel Butt <shakeelb@google.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Yafang Shao <laoar.shao@gmail.com>
+Cc: Chris Down <chris@chrisdown.name>
+Cc: Tejun Heo <tj@kernel.org>
+Link: https://lkml.kernel.org/r/20201105161936.98312-1-songmuchun@bytedance.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/memcontrol.h | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index d0b036123c6ab..fa635207fe96d 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -897,12 +897,19 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
+ static inline void memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event)
+ {
++ bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
++ event == MEMCG_SWAP_FAIL;
++
+ atomic_long_inc(&memcg->memory_events_local[event]);
+- cgroup_file_notify(&memcg->events_local_file);
++ if (!swap_event)
++ cgroup_file_notify(&memcg->events_local_file);
+
+ do {
+ atomic_long_inc(&memcg->memory_events[event]);
+- cgroup_file_notify(&memcg->events_file);
++ if (swap_event)
++ cgroup_file_notify(&memcg->swap_events_file);
++ else
++ cgroup_file_notify(&memcg->events_file);
+
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ break;
+--
+2.27.0
+
--- /dev/null
+From e0bfecf82bee9cd8311a1383933315cd98200fa1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Nov 2020 18:30:59 +0100
+Subject: nbd: fix a block_device refcount leak in nbd_release
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 2bd645b2d3f0bacadaa6037f067538e1cd4e42ef ]
+
+bdget_disk needs to be paired with bdput to not leak a reference
+on the block device inode.
+
+Fixes: 08ba91ee6e2c ("nbd: Add the nbd NBD_DISCONNECT_ON_CLOSE config flag.")
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/nbd.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index d76fca629c143..36c46fe078556 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -1517,6 +1517,7 @@ static void nbd_release(struct gendisk *disk, fmode_t mode)
+ if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
+ bdev->bd_openers == 0)
+ nbd_disconnect_and_put(nbd);
++ bdput(bdev);
+
+ nbd_config_put(nbd);
+ nbd_put(nbd);
+--
+2.27.0
+
--- /dev/null
+From 7c2a4f371f55930d03557b0b557085d7fb7946dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Nov 2020 12:41:28 +0200
+Subject: net/mlx5: E-switch, Avoid extack error log for disabled vport
+
+From: Parav Pandit <parav@nvidia.com>
+
+[ Upstream commit ae35859445607f7f18dd4f332749219cd636ed59 ]
+
+When E-switch vport is disabled, querying its hardware address is
+unsupported.
+Avoid setting extack error log message in such case.
+
+Fixes: f099fde16db3 ("net/mlx5: E-switch, Support querying port function mac address")
+Signed-off-by: Parav Pandit <parav@nvidia.com>
+Reviewed-by: Roi Dayan <roid@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 6e6a9a5639928..e8e6294c7ccae 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1902,8 +1902,6 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
+ ether_addr_copy(hw_addr, vport->info.mac);
+ *hw_addr_len = ETH_ALEN;
+ err = 0;
+- } else {
+- NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ }
+ mutex_unlock(&esw->state_lock);
+ return err;
+--
+2.27.0
+
--- /dev/null
+From d72dff24d2e0675fa9db47fd32d9990e7caf0db9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Oct 2020 08:42:49 +0300
+Subject: net/mlx5: Fix deletion of duplicate rules
+
+From: Maor Gottlieb <maorg@nvidia.com>
+
+[ Upstream commit 465e7baab6d93b399344f5868f84c177ab5cd16f ]
+
+When a rule is duplicated, the refcount of the rule is increased so only
+the second deletion of the rule should cause destruction of the FTE.
+Currently, the FTE will be destroyed in the first deletion of rule since
+the modify_mask will be 0.
+Fix it and call to destroy FTE only if all the rules (FTE's children)
+have been removed.
+
+Fixes: 718ce4d601db ("net/mlx5: Consolidate update FTE for all removal changes")
+Signed-off-by: Maor Gottlieb <maorg@nvidia.com>
+Reviewed-by: Mark Bloch <mbloch@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 75fa44eee434d..d4755d61dd740 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -1994,10 +1994,11 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
+ down_write_ref_node(&fte->node, false);
+ for (i = handle->num_rules - 1; i >= 0; i--)
+ tree_remove_node(&handle->rule[i]->node, true);
+- if (fte->modify_mask && fte->dests_size) {
+- modify_fte(fte);
++ if (fte->dests_size) {
++ if (fte->modify_mask)
++ modify_fte(fte);
+ up_write_ref_node(&fte->node, false);
+- } else {
++ } else if (list_empty(&fte->node.children)) {
+ del_hw_fte(&fte->node);
+ /* Avoid double call to del_hw_fte */
+ fte->node.del_hw_func = NULL;
+--
+2.27.0
+
--- /dev/null
+From ce1c45688e165f7e96a952088325dacd3e763507 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Oct 2020 12:49:51 +0300
+Subject: net/mlx5e: Fix incorrect access of RCU-protected xdp_prog
+
+From: Maxim Mikityanskiy <maximmi@mellanox.com>
+
+[ Upstream commit 1a50cf9a67ff2241c2949d30bc11c8dd4280eef8 ]
+
+rq->xdp_prog is RCU-protected and should be accessed only with
+rcu_access_pointer for the NULL check in mlx5e_poll_rx_cq.
+
+rq->xdp_prog may change on the fly only from one non-NULL value to
+another non-NULL value, so the checks in mlx5e_xdp_handle and
+mlx5e_poll_rx_cq will have the same result during one NAPI cycle,
+meaning that no additional synchronization is needed.
+
+Fixes: fe45386a2082 ("net/mlx5e: Use RCU to protect rq->xdp_prog")
+Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 64c8ac5eabf6a..a0a4398408b85 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1566,7 +1566,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+ } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
+
+ out:
+- if (rq->xdp_prog)
++ if (rcu_access_pointer(rq->xdp_prog))
+ mlx5e_xdp_rx_poll_complete(rq);
+
+ mlx5_cqwq_update_db_record(cqwq);
+--
+2.27.0
+
--- /dev/null
+From 9a6ee78661655dfac3f772a0516d34b6c2cfb075 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Sep 2020 16:31:11 +0300
+Subject: net/mlx5e: Fix modify header actions memory leak
+
+From: Maor Dickman <maord@nvidia.com>
+
+[ Upstream commit e68e28b4a9d71261e3f8fd05a72d6cf0b443a493 ]
+
+Modify header actions are allocated during parse tc actions and only
+freed during the flow creation, however, on error flow the allocated
+memory is wrongly unfreed.
+
+Fix this by calling dealloc_mod_hdr_actions in __mlx5e_add_fdb_flow
+and mlx5e_add_nic_flow error flow.
+
+Fixes: d7e75a325cb2 ("net/mlx5e: Add offloading of E-Switch TC pedit (header re-write) actions")
+Fixes: 2f4fe4cab073 ("net/mlx5e: Add offloading of NIC TC pedit (header re-write) actions")
+Signed-off-by: Maor Dickman <maord@nvidia.com>
+Reviewed-by: Paul Blakey <paulb@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 1c93f92d9210a..44947b054dc4c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -4430,6 +4430,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
+ return flow;
+
+ err_free:
++ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ mlx5e_flow_put(priv, flow);
+ out:
+ return ERR_PTR(err);
+@@ -4564,6 +4565,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
+ return 0;
+
+ err_free:
++ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ mlx5e_flow_put(priv, flow);
+ kvfree(parse_attr);
+ out:
+--
+2.27.0
+
--- /dev/null
+From a9458dc93c4f281c891d047b9d393ee421de6962 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Sep 2020 12:58:44 +0300
+Subject: net/mlx5e: Fix VXLAN synchronization after function reload
+
+From: Aya Levin <ayal@nvidia.com>
+
+[ Upstream commit c5eb51adf06b2644fa28d4af886bfdcc53e288da ]
+
+During driver reload, perform firmware tear-down which results in
+firmware losing the configured VXLAN ports. These ports are still
+available in the driver's database. Fix this by cleaning up driver's
+VXLAN database in the nic unload flow, before firmware tear-down. With
+that, minimize mlx5_vxlan_destroy() to remove only what was added in
+mlx5_vxlan_create() and warn on leftover UDP ports.
+
+Fixes: 18a2b7f969c9 ("net/mlx5: convert to new udp_tunnel infrastructure")
+Signed-off-by: Aya Levin <ayal@nvidia.com>
+Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/en_main.c | 1 +
+ .../ethernet/mellanox/mlx5/core/lib/vxlan.c | 23 ++++++++++++++-----
+ .../ethernet/mellanox/mlx5/core/lib/vxlan.h | 2 ++
+ 3 files changed, 20 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 42ec28e298348..f399973a44eb0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5226,6 +5226,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
+
+ mlx5e_disable_async_events(priv);
+ mlx5_lag_remove(mdev);
++ mlx5_vxlan_reset_to_default(mdev->vxlan);
+ }
+
+ int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+index 3315afe2f8dce..38084400ee8fa 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+@@ -167,6 +167,17 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
+ }
+
+ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
++{
++ if (!mlx5_vxlan_allowed(vxlan))
++ return;
++
++ mlx5_vxlan_del_port(vxlan, IANA_VXLAN_UDP_PORT);
++ WARN_ON(!hash_empty(vxlan->htable));
++
++ kfree(vxlan);
++}
++
++void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan)
+ {
+ struct mlx5_vxlan_port *vxlanp;
+ struct hlist_node *tmp;
+@@ -175,12 +186,12 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
+ if (!mlx5_vxlan_allowed(vxlan))
+ return;
+
+- /* Lockless since we are the only hash table consumers*/
+ hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) {
+- hash_del(&vxlanp->hlist);
+- mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port);
+- kfree(vxlanp);
++ /* Don't delete default UDP port added by the HW.
++ * Remove only user configured ports
++ */
++ if (vxlanp->udp_port == IANA_VXLAN_UDP_PORT)
++ continue;
++ mlx5_vxlan_del_port(vxlan, vxlanp->udp_port);
+ }
+-
+- kfree(vxlan);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
+index ec766529f49b6..34ef662da35ed 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
+@@ -56,6 +56,7 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
+ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
+ int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
+ bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
++void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan);
+ #else
+ static inline struct mlx5_vxlan*
+ mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); }
+@@ -63,6 +64,7 @@ static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
+ static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
+ static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
+ static inline bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return false; }
++static inline void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan) { return; }
+ #endif
+
+ #endif /* __MLX5_VXLAN_H__ */
+--
+2.27.0
+
--- /dev/null
+From 7c3a41541ed653c61bda7557d8c31c60cf2416dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 31 Aug 2020 16:17:29 +0300
+Subject: net/mlx5e: Protect encap route dev from concurrent release
+
+From: Vlad Buslov <vladbu@nvidia.com>
+
+[ Upstream commit 78c906e430b13d30a8cfbdef4ccbbe1686841a9e ]
+
+In functions mlx5e_route_lookup_ipv{4|6}() route_dev can be arbitrary net
+device and not necessary mlx5 eswitch port representor. As such, in order
+to ensure that route_dev is not destroyed concurrent the code needs either
+explicitly take reference to the device before releasing reference to
+rtable instance or ensure that caller holds rtnl lock. First approach is
+chosen as a fix since rtnl lock dependency was intentionally removed from
+mlx5 TC layer.
+
+To prevent unprotected usage of route_dev in encap code take a reference to
+the device before releasing rt. Don't save direct pointer to the device in
+mlx5_encap_entry structure and use ifindex instead. Modify users of
+route_dev pointer to properly obtain the net device instance from its
+ifindex.
+
+Fixes: 61086f391044 ("net/mlx5e: Protect encap hash table with mutex")
+Fixes: 6707f74be862 ("net/mlx5e: Update hw flows when encap source mac changed")
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Reviewed-by: Roi Dayan <roid@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/mellanox/mlx5/core/en/rep/tc.c | 6 +-
+ .../ethernet/mellanox/mlx5/core/en/tc_tun.c | 72 ++++++++++++-------
+ .../net/ethernet/mellanox/mlx5/core/en_rep.h | 2 +-
+ 3 files changed, 52 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+index 79cc42d88eec6..38ea249159f60 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+@@ -107,12 +107,16 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
+ mlx5e_tc_encap_flows_del(priv, e, &flow_list);
+
+ if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
++ struct net_device *route_dev;
++
+ ether_addr_copy(e->h_dest, ha);
+ ether_addr_copy(eth->h_dest, ha);
+ /* Update the encap source mac, in case that we delete
+ * the flows when encap source mac changed.
+ */
+- ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
++ route_dev = __dev_get_by_index(dev_net(priv->netdev), e->route_dev_ifindex);
++ if (route_dev)
++ ether_addr_copy(eth->h_source, route_dev->dev_addr);
+
+ mlx5e_tc_encap_flows_add(priv, e, &flow_list);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+index 7cce85faa16fa..90930e54b6f28 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+@@ -77,13 +77,13 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
+ return 0;
+ }
+
+-static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
+- struct net_device *mirred_dev,
+- struct net_device **out_dev,
+- struct net_device **route_dev,
+- struct flowi4 *fl4,
+- struct neighbour **out_n,
+- u8 *out_ttl)
++static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
++ struct net_device *mirred_dev,
++ struct net_device **out_dev,
++ struct net_device **route_dev,
++ struct flowi4 *fl4,
++ struct neighbour **out_n,
++ u8 *out_ttl)
+ {
+ struct neighbour *n;
+ struct rtable *rt;
+@@ -117,18 +117,28 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
+ ip_rt_put(rt);
+ return ret;
+ }
++ dev_hold(*route_dev);
+
+ if (!(*out_ttl))
+ *out_ttl = ip4_dst_hoplimit(&rt->dst);
+ n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
+ ip_rt_put(rt);
+- if (!n)
++ if (!n) {
++ dev_put(*route_dev);
+ return -ENOMEM;
++ }
+
+ *out_n = n;
+ return 0;
+ }
+
++static void mlx5e_route_lookup_ipv4_put(struct net_device *route_dev,
++ struct neighbour *n)
++{
++ neigh_release(n);
++ dev_put(route_dev);
++}
++
+ static const char *mlx5e_netdev_kind(struct net_device *dev)
+ {
+ if (dev->rtnl_link_ops)
+@@ -193,8 +203,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ fl4.saddr = tun_key->u.ipv4.src;
+ ttl = tun_key->ttl;
+
+- err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev,
+- &fl4, &n, &ttl);
++ err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &out_dev, &route_dev,
++ &fl4, &n, &ttl);
+ if (err)
+ return err;
+
+@@ -223,7 +233,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ e->m_neigh.family = n->ops->family;
+ memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+ e->out_dev = out_dev;
+- e->route_dev = route_dev;
++ e->route_dev_ifindex = route_dev->ifindex;
+
+ /* It's important to add the neigh to the hash table before checking
+ * the neigh validity state. So if we'll get a notification, in case the
+@@ -278,7 +288,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
+- neigh_release(n);
++ mlx5e_route_lookup_ipv4_put(route_dev, n);
+ return err;
+
+ destroy_neigh_entry:
+@@ -286,18 +296,18 @@ destroy_neigh_entry:
+ free_encap:
+ kfree(encap_header);
+ release_neigh:
+- neigh_release(n);
++ mlx5e_route_lookup_ipv4_put(route_dev, n);
+ return err;
+ }
+
+ #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+-static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
+- struct net_device *mirred_dev,
+- struct net_device **out_dev,
+- struct net_device **route_dev,
+- struct flowi6 *fl6,
+- struct neighbour **out_n,
+- u8 *out_ttl)
++static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
++ struct net_device *mirred_dev,
++ struct net_device **out_dev,
++ struct net_device **route_dev,
++ struct flowi6 *fl6,
++ struct neighbour **out_n,
++ u8 *out_ttl)
+ {
+ struct dst_entry *dst;
+ struct neighbour *n;
+@@ -318,15 +328,25 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
+ return ret;
+ }
+
++ dev_hold(*route_dev);
+ n = dst_neigh_lookup(dst, &fl6->daddr);
+ dst_release(dst);
+- if (!n)
++ if (!n) {
++ dev_put(*route_dev);
+ return -ENOMEM;
++ }
+
+ *out_n = n;
+ return 0;
+ }
+
++static void mlx5e_route_lookup_ipv6_put(struct net_device *route_dev,
++ struct neighbour *n)
++{
++ neigh_release(n);
++ dev_put(route_dev);
++}
++
+ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e)
+@@ -348,8 +368,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ fl6.daddr = tun_key->u.ipv6.dst;
+ fl6.saddr = tun_key->u.ipv6.src;
+
+- err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev,
+- &fl6, &n, &ttl);
++ err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &out_dev, &route_dev,
++ &fl6, &n, &ttl);
+ if (err)
+ return err;
+
+@@ -378,7 +398,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ e->m_neigh.family = n->ops->family;
+ memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+ e->out_dev = out_dev;
+- e->route_dev = route_dev;
++ e->route_dev_ifindex = route_dev->ifindex;
+
+ /* It's importent to add the neigh to the hash table before checking
+ * the neigh validity state. So if we'll get a notification, in case the
+@@ -433,7 +453,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
+- neigh_release(n);
++ mlx5e_route_lookup_ipv6_put(route_dev, n);
+ return err;
+
+ destroy_neigh_entry:
+@@ -441,7 +461,7 @@ destroy_neigh_entry:
+ free_encap:
+ kfree(encap_header);
+ release_neigh:
+- neigh_release(n);
++ mlx5e_route_lookup_ipv6_put(route_dev, n);
+ return err;
+ }
+ #endif
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+index 0d1562e20118c..963a6d98840ac 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+@@ -187,7 +187,7 @@ struct mlx5e_encap_entry {
+ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+
+ struct net_device *out_dev;
+- struct net_device *route_dev;
++ int route_dev_ifindex;
+ struct mlx5e_tc_tunnel *tunnel;
+ int reformat_type;
+ u8 flags;
+--
+2.27.0
+
--- /dev/null
+From 17408b2aac6f84f45282473c0ae872d80cbe8323 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Oct 2020 11:34:03 +0300
+Subject: net/mlx5e: Use spin_lock_bh for async_icosq_lock
+
+From: Maxim Mikityanskiy <maximmi@mellanox.com>
+
+[ Upstream commit f42139ba49791ab6b12443c60044872705b74a1e ]
+
+async_icosq_lock may be taken from softirq and non-softirq contexts. It
+requires protection with spin_lock_bh, otherwise a softirq may be
+triggered in the middle of the critical section, and it may deadlock if
+it tries to take the same lock. This patch fixes such a scenario by
+using spin_lock_bh to disable softirqs on that CPU while inside the
+critical section.
+
+Fixes: 8d94b590f1e4 ("net/mlx5e: Turn XSK ICOSQ into a general asynchronous one")
+Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/en/xsk/setup.c | 4 ++--
+ .../net/ethernet/mellanox/mlx5/core/en/xsk/tx.c | 4 ++--
+ .../ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c | 14 +++++++-------
+ 3 files changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+index 55e65a438de70..fcaeb30778bc7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+@@ -122,9 +122,9 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
+ set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
+ /* TX queue is created active. */
+
+- spin_lock(&c->async_icosq_lock);
++ spin_lock_bh(&c->async_icosq_lock);
+ mlx5e_trigger_irq(&c->async_icosq);
+- spin_unlock(&c->async_icosq_lock);
++ spin_unlock_bh(&c->async_icosq_lock);
+ }
+
+ void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+index 4d892f6cecb3e..4de70cee80c0a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+@@ -36,9 +36,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+ if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
+ return 0;
+
+- spin_lock(&c->async_icosq_lock);
++ spin_lock_bh(&c->async_icosq_lock);
+ mlx5e_trigger_irq(&c->async_icosq);
+- spin_unlock(&c->async_icosq_lock);
++ spin_unlock_bh(&c->async_icosq_lock);
+ }
+
+ return 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+index 6bbfcf18107d2..979ff5658a3f7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+@@ -188,7 +188,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
+
+ err = 0;
+ sq = &c->async_icosq;
+- spin_lock(&c->async_icosq_lock);
++ spin_lock_bh(&c->async_icosq_lock);
+
+ cseg = post_static_params(sq, priv_rx);
+ if (IS_ERR(cseg))
+@@ -199,7 +199,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
+
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+ unlock:
+- spin_unlock(&c->async_icosq_lock);
++ spin_unlock_bh(&c->async_icosq_lock);
+
+ return err;
+
+@@ -265,10 +265,10 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
+
+ BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
+
+- spin_lock(&sq->channel->async_icosq_lock);
++ spin_lock_bh(&sq->channel->async_icosq_lock);
+
+ if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+- spin_unlock(&sq->channel->async_icosq_lock);
++ spin_unlock_bh(&sq->channel->async_icosq_lock);
+ err = -ENOSPC;
+ goto err_dma_unmap;
+ }
+@@ -299,7 +299,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
+ icosq_fill_wi(sq, pi, &wi);
+ sq->pc++;
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+- spin_unlock(&sq->channel->async_icosq_lock);
++ spin_unlock_bh(&sq->channel->async_icosq_lock);
+
+ return 0;
+
+@@ -360,7 +360,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
+ err = 0;
+
+ sq = &c->async_icosq;
+- spin_lock(&c->async_icosq_lock);
++ spin_lock_bh(&c->async_icosq_lock);
+
+ cseg = post_static_params(sq, priv_rx);
+ if (IS_ERR(cseg)) {
+@@ -372,7 +372,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+ priv_rx->stats->tls_resync_res_ok++;
+ unlock:
+- spin_unlock(&c->async_icosq_lock);
++ spin_unlock_bh(&c->async_icosq_lock);
+
+ return err;
+ }
+--
+2.27.0
+
--- /dev/null
+From 6ed62dfe8d72a9abb984ca95da1318666c208b02 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 8 Nov 2020 22:44:02 +0100
+Subject: net: phy: realtek: support paged operations on RTL8201CP
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit f3037c5a31b58a73b32a36e938ad0560085acadd ]
+
+The RTL8401-internal PHY identifies as RTL8201CP, and the init
+sequence in r8169, copied from vendor driver r8168, uses paged
+operations. Therefore set the same paged operation callbacks as
+for the other Realtek PHY's.
+
+Fixes: cdafdc29ef75 ("r8169: sync support for RTL8401 with vendor driver")
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Link: https://lore.kernel.org/r/69882f7a-ca2f-e0c7-ae83-c9b6937282cd@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/realtek.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
+index 0f09609718007..81a614f903c4a 100644
+--- a/drivers/net/phy/realtek.c
++++ b/drivers/net/phy/realtek.c
+@@ -542,6 +542,8 @@ static struct phy_driver realtek_drvs[] = {
+ {
+ PHY_ID_MATCH_EXACT(0x00008201),
+ .name = "RTL8201CP Ethernet",
++ .read_page = rtl821x_read_page,
++ .write_page = rtl821x_write_page,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc816),
+ .name = "RTL8201F Fast Ethernet",
+--
+2.27.0
+
--- /dev/null
+From 7d986b71c47e98ad6ff68443ff6db44afd7dcea1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 31 Oct 2020 12:44:25 -0400
+Subject: NFS: Fix listxattr receive buffer size
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 6c2190b3fcbc92cb79e39cc7e7531656b341e463 ]
+
+Certain NFSv4.2/RDMA tests fail with v5.9-rc1.
+
+rpcrdma_convert_kvec() runs off the end of the rl_segments array
+because rq_rcv_buf.tail[0].iov_len holds a very large positive
+value. The resultant kernel memory corruption is enough to crash
+the client system.
+
+Callers of rpc_prepare_reply_pages() must reserve an extra XDR_UNIT
+in the maximum decode size for a possible XDR pad of the contents
+of the xdr_buf's pages. That guarantees the allocated receive buffer
+will be large enough to accommodate the usual contents plus that XDR
+pad word.
+
+encode_op_hdr() cannot add that extra word. If it does,
+xdr_inline_pages() underruns the length of the tail iovec.
+
+Fixes: 3e1f02123fba ("NFSv4.2: add client side XDR handling for extended attributes")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs42xdr.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
+index cc50085e151c5..d0ddf90c9be48 100644
+--- a/fs/nfs/nfs42xdr.c
++++ b/fs/nfs/nfs42xdr.c
+@@ -179,7 +179,7 @@
+ 1 + nfs4_xattr_name_maxsz + 1)
+ #define decode_setxattr_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
+ #define encode_listxattrs_maxsz (op_encode_hdr_maxsz + 2 + 1)
+-#define decode_listxattrs_maxsz (op_decode_hdr_maxsz + 2 + 1 + 1)
++#define decode_listxattrs_maxsz (op_decode_hdr_maxsz + 2 + 1 + 1 + 1)
+ #define encode_removexattr_maxsz (op_encode_hdr_maxsz + 1 + \
+ nfs4_xattr_name_maxsz)
+ #define decode_removexattr_maxsz (op_decode_hdr_maxsz + \
+@@ -504,7 +504,7 @@ static void encode_listxattrs(struct xdr_stream *xdr,
+ {
+ __be32 *p;
+
+- encode_op_hdr(xdr, OP_LISTXATTRS, decode_listxattrs_maxsz + 1, hdr);
++ encode_op_hdr(xdr, OP_LISTXATTRS, decode_listxattrs_maxsz, hdr);
+
+ p = reserve_space(xdr, 12);
+ if (unlikely(!p))
+--
+2.27.0
+
--- /dev/null
+From a2e97aa8a22b05b1c871320863abca53983eba0b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Oct 2020 15:07:16 -0400
+Subject: NFSD: fix missing refcount in nfsd4_copy by nfsd4_do_async_copy
+
+From: Dai Ngo <dai.ngo@oracle.com>
+
+[ Upstream commit 49a361327332c9221438397059067f9b205f690d ]
+
+Need to initialize nfsd4_copy's refcount to 1 to avoid use-after-free
+warning when nfs4_put_copy is called from nfsd4_cb_offload_release.
+
+Fixes: ce0887ac96d3 ("NFSD add nfs4 inter ssc to nfsd4_copy")
+Signed-off-by: Dai Ngo <dai.ngo@oracle.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/nfs4proc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 80effaa18b7b2..3ba17b5fc9286 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1486,6 +1486,7 @@ do_callback:
+ cb_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
+ if (!cb_copy)
+ goto out;
++ refcount_set(&cb_copy->refcount, 1);
+ memcpy(&cb_copy->cp_res, ©->cp_res, sizeof(copy->cp_res));
+ cb_copy->cp_clp = copy->cp_clp;
+ cb_copy->nfserr = copy->nfserr;
+--
+2.27.0
+
--- /dev/null
+From 0b4977be487eabbdecf6197ef023ded08055b663 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Oct 2020 15:07:15 -0400
+Subject: NFSD: Fix use-after-free warning when doing inter-server copy
+
+From: Dai Ngo <dai.ngo@oracle.com>
+
+[ Upstream commit 36e1e5ba90fb3fba6888fae26e4dfc28bf70aaf1 ]
+
+The source file nfsd_file is not constructed the same as other
+nfsd_file's via nfsd_file_alloc. nfsd_file_put should not be
+called to free the object; nfsd_file_put is not the inverse of
+kzalloc, instead kfree is called by nfsd4_do_async_copy when done.
+
+Fixes: ce0887ac96d3 ("NFSD add nfs4 inter ssc to nfsd4_copy")
+Signed-off-by: Dai Ngo <dai.ngo@oracle.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/nfs4proc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 84e10aef14175..80effaa18b7b2 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1299,7 +1299,7 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
+ struct nfsd_file *dst)
+ {
+ nfs42_ssc_close(src->nf_file);
+- nfsd_file_put(src);
++ /* 'src' is freed by nfsd4_do_async_copy */
+ nfsd_file_put(dst);
+ mntput(ss_mnt);
+ }
+--
+2.27.0
+
--- /dev/null
+From dd6e2d179de3c89137d44672775665e01117e310 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Sep 2020 07:19:13 +0200
+Subject: nvme: factor out a nvme_configure_metadata helper
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit d4609ea8b3d3fb3423f35805843a82774cb4ef2f ]
+
+Factor out a helper from nvme_update_ns_info that configures the
+per-namespaces metadata and PI settings. Also make sure the helpers
+clear the flags explicitly instead of all of ->features to allow for
+potentially reusing ->features for future non-metadata flags.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Keith Busch <kbusch@kernel.org>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 78 ++++++++++++++++++++++++----------------
+ 1 file changed, 47 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 59040bab5d6fa..be0cec51f5e6d 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1946,6 +1946,50 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ return 0;
+ }
+
++static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
++{
++ struct nvme_ctrl *ctrl = ns->ctrl;
++
++ /*
++ * The PI implementation requires the metadata size to be equal to the
++ * t10 pi tuple size.
++ */
++ ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
++ if (ns->ms == sizeof(struct t10_pi_tuple))
++ ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
++ else
++ ns->pi_type = 0;
++
++ ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
++ if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
++ return 0;
++ if (ctrl->ops->flags & NVME_F_FABRICS) {
++ /*
++ * The NVMe over Fabrics specification only supports metadata as
++ * part of the extended data LBA. We rely on HCA/HBA support to
++ * remap the separate metadata buffer from the block layer.
++ */
++ if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
++ return -EINVAL;
++ if (ctrl->max_integrity_segments)
++ ns->features |=
++ (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
++ } else {
++ /*
++ * For PCIe controllers, we can't easily remap the separate
++ * metadata buffer from the block layer and thus require a
++ * separate metadata buffer for block layer metadata/PI support.
++ * We allow extended LBAs for the passthrough interface, though.
++ */
++ if (id->flbas & NVME_NS_FLBAS_META_EXT)
++ ns->features |= NVME_NS_EXT_LBAS;
++ else
++ ns->features |= NVME_NS_METADATA_SUPPORTED;
++ }
++
++ return 0;
++}
++
+ static void nvme_update_disk_info(struct gendisk *disk,
+ struct nvme_ns *ns, struct nvme_id_ns *id)
+ {
+@@ -2096,37 +2140,9 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+ return -ENODEV;
+ }
+
+- ns->features = 0;
+- ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+- /* the PI implementation requires metadata equal t10 pi tuple size */
+- if (ns->ms == sizeof(struct t10_pi_tuple))
+- ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+- else
+- ns->pi_type = 0;
+-
+- if (ns->ms) {
+- /*
+- * For PCIe only the separate metadata pointer is supported,
+- * as the block layer supplies metadata in a separate bio_vec
+- * chain. For Fabrics, only metadata as part of extended data
+- * LBA is supported on the wire per the Fabrics specification,
+- * but the HBA/HCA will do the remapping from the separate
+- * metadata buffers for us.
+- */
+- if (id->flbas & NVME_NS_FLBAS_META_EXT) {
+- ns->features |= NVME_NS_EXT_LBAS;
+- if ((ctrl->ops->flags & NVME_F_FABRICS) &&
+- (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) &&
+- ctrl->max_integrity_segments)
+- ns->features |= NVME_NS_METADATA_SUPPORTED;
+- } else {
+- if (WARN_ON_ONCE(ctrl->ops->flags & NVME_F_FABRICS))
+- return -EINVAL;
+- if (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
+- ns->features |= NVME_NS_METADATA_SUPPORTED;
+- }
+- }
+-
++ ret = nvme_configure_metadata(ns, id);
++ if (ret)
++ return ret;
+ nvme_set_chunk_sectors(ns, id);
+ nvme_update_disk_info(disk, ns, id);
+ #ifdef CONFIG_NVME_MULTIPATH
+--
+2.27.0
+
--- /dev/null
+From 60381785be6b6018ea4171a7a7757dc2460bd17b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Nov 2020 02:57:34 -0800
+Subject: nvme: fix incorrect behavior when BLKROSET is called by the user
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+[ Upstream commit 65c5a055b0d567b7e7639d942c0605da9cc54c5e ]
+
+The offending commit breaks BLKROSET ioctl because a device
+revalidation will blindly override BLKROSET setting. Hence,
+we remove the disk rw setting in case NVME_NS_ATTR_RO is cleared
+from by the controller.
+
+Fixes: 1293477f4f32 ("nvme: set gendisk read only based on nsattr")
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index b130696b00592..349fba056cb65 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2064,8 +2064,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
+
+ if (id->nsattr & NVME_NS_ATTR_RO)
+ set_disk_ro(disk, true);
+- else
+- set_disk_ro(disk, false);
+ }
+
+ static inline bool nvme_first_scan(struct gendisk *disk)
+--
+2.27.0
+
--- /dev/null
+From 58bb516bd006fc2315a04bce9222267c8ae4eb63 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 15 Nov 2020 17:07:55 -0500
+Subject: nvme: freeze the queue over ->lba_shift updates
+
+[ Upstream commit f9d5f4579feafa721dba2f350fc064a1852c6f8c ]
+
+Ensure that there can't be any I/O in flight went we change the disk
+geometry in nvme_update_ns_info, most notable the LBA size by lifting
+the queue free from nvme_update_disk_info into the caller
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Keith Busch <kbusch@kernel.org>
+Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index be0cec51f5e6d..b130696b00592 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2001,7 +2001,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
+ /* unsupported block size, set capacity to 0 later */
+ bs = (1 << 9);
+ }
+- blk_mq_freeze_queue(disk->queue);
++
+ blk_integrity_unregister(disk);
+
+ atomic_bs = phys_bs = bs;
+@@ -2066,8 +2066,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
+ set_disk_ro(disk, true);
+ else
+ set_disk_ro(disk, false);
+-
+- blk_mq_unfreeze_queue(disk->queue);
+ }
+
+ static inline bool nvme_first_scan(struct gendisk *disk)
+@@ -2114,6 +2112,7 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ int ret;
+
++ blk_mq_freeze_queue(ns->disk->queue);
+ /*
+ * If identify namespace failed, use default 512 byte block size so
+ * block layer can use before failing read/write for 0 capacity.
+@@ -2131,29 +2130,38 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+ dev_warn(ctrl->device,
+ "failed to add zoned namespace:%u ret:%d\n",
+ ns->head->ns_id, ret);
+- return ret;
++ goto out_unfreeze;
+ }
+ break;
+ default:
+ dev_warn(ctrl->device, "unknown csi:%u ns:%u\n",
+ ns->head->ids.csi, ns->head->ns_id);
+- return -ENODEV;
++ ret = -ENODEV;
++ goto out_unfreeze;
+ }
+
+ ret = nvme_configure_metadata(ns, id);
+ if (ret)
+- return ret;
++ goto out_unfreeze;
+ nvme_set_chunk_sectors(ns, id);
+ nvme_update_disk_info(disk, ns, id);
++ blk_mq_unfreeze_queue(ns->disk->queue);
++
+ #ifdef CONFIG_NVME_MULTIPATH
+ if (ns->head->disk) {
++ blk_mq_freeze_queue(ns->head->disk->queue);
+ nvme_update_disk_info(ns->head->disk, ns, id);
+ blk_stack_limits(&ns->head->disk->queue->limits,
+ &ns->queue->limits, 0);
+ nvme_mpath_update_disk_size(ns->head->disk);
++ blk_mq_unfreeze_queue(ns->head->disk->queue);
+ }
+ #endif
+ return 0;
++
++out_unfreeze:
++ blk_mq_unfreeze_queue(ns->disk->queue);
++ return ret;
+ }
+
+ static int _nvme_revalidate_disk(struct gendisk *disk)
+--
+2.27.0
+
--- /dev/null
+From 5560234426e0974ae75609b4fdafb24391add8fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Nov 2020 15:28:25 +1300
+Subject: of/address: Fix of_node memory leak in of_dma_is_coherent
+
+From: Evan Nimmo <evan.nimmo@alliedtelesis.co.nz>
+
+[ Upstream commit a5bea04fcc0b3c0aec71ee1fd58fd4ff7ee36177 ]
+
+Commit dabf6b36b83a ("of: Add OF_DMA_DEFAULT_COHERENT & select it on
+powerpc") added a check to of_dma_is_coherent which returns early
+if OF_DMA_DEFAULT_COHERENT is enabled. This results in the of_node_put()
+being skipped causing a memory leak. Moved the of_node_get() below this
+check so we now we only get the node if OF_DMA_DEFAULT_COHERENT is not
+enabled.
+
+Fixes: dabf6b36b83a ("of: Add OF_DMA_DEFAULT_COHERENT & select it on powerpc")
+Signed-off-by: Evan Nimmo <evan.nimmo@alliedtelesis.co.nz>
+Link: https://lore.kernel.org/r/20201110022825.30895-1-evan.nimmo@alliedtelesis.co.nz
+Signed-off-by: Rob Herring <robh@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/of/address.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index da4f7341323f2..37ac311843090 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -1043,11 +1043,13 @@ out:
+ */
+ bool of_dma_is_coherent(struct device_node *np)
+ {
+- struct device_node *node = of_node_get(np);
++ struct device_node *node;
+
+ if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
+ return true;
+
++ node = of_node_get(np);
++
+ while (node) {
+ if (of_property_read_bool(node, "dma-coherent")) {
+ of_node_put(node);
+--
+2.27.0
+
--- /dev/null
+From bb9a46bfe66f54443c68aaed039ad1fedeaf7679 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Oct 2020 16:29:53 +0100
+Subject: perf: Fix event multiplexing for exclusive groups
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 2714c3962f304d031d5016c963c4b459337b0749 ]
+
+Commit 9e6302056f80 ("perf: Use hrtimers for event multiplexing")
+placed the hrtimer (re)start call in the wrong place. Instead of
+capturing all scheduling failures, it only considered the PMU failure.
+
+The result is that groups using perf_event_attr::exclusive are no
+longer rotated.
+
+Fixes: 9e6302056f80 ("perf: Use hrtimers for event multiplexing")
+Reported-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20201029162902.038667689@infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index c245ccd426b71..a06ac60d346f1 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2597,7 +2597,6 @@ group_error:
+
+ error:
+ pmu->cancel_txn(pmu);
+- perf_mux_hrtimer_restart(cpuctx);
+ return -EAGAIN;
+ }
+
+@@ -3653,6 +3652,7 @@ static int merge_sched_in(struct perf_event *event, void *data)
+
+ *can_add_hw = 0;
+ ctx->rotate_necessary = 1;
++ perf_mux_hrtimer_restart(cpuctx);
+ }
+
+ return 0;
+--
+2.27.0
+
--- /dev/null
+From 5730fe2a2f96e5f2dcbfeaa0a7c42244eeeccd46 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Oct 2020 12:49:45 +0100
+Subject: perf: Fix get_recursion_context()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit ce0f17fc93f63ee91428af10b7b2ddef38cd19e5 ]
+
+One should use in_serving_softirq() to detect SoftIRQ context.
+
+Fixes: 96f6d4444302 ("perf_counter: avoid recursion")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20201030151955.120572175@infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/internal.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index fcbf5616a4411..402054e755f27 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -211,7 +211,7 @@ static inline int get_recursion_context(int *recursion)
+ rctx = 3;
+ else if (in_irq())
+ rctx = 2;
+- else if (in_softirq())
++ else if (in_serving_softirq())
+ rctx = 1;
+ else
+ rctx = 0;
+--
+2.27.0
+
--- /dev/null
+From 4de04202c7643c0bd577810e49bd482068dcf91d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Oct 2020 16:29:15 +0100
+Subject: perf: Simplify group_sched_in()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 251ff2d49347793d348babcff745289b11910e96 ]
+
+Collate the error paths. Code duplication only leads to divergence and
+extra bugs.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20201029162901.972161394@infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 98a603098f23e..c245ccd426b71 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2565,11 +2565,8 @@ group_sched_in(struct perf_event *group_event,
+
+ pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
+
+- if (event_sched_in(group_event, cpuctx, ctx)) {
+- pmu->cancel_txn(pmu);
+- perf_mux_hrtimer_restart(cpuctx);
+- return -EAGAIN;
+- }
++ if (event_sched_in(group_event, cpuctx, ctx))
++ goto error;
+
+ /*
+ * Schedule in siblings as one group (if any):
+@@ -2598,10 +2595,9 @@ group_error:
+ }
+ event_sched_out(group_event, cpuctx, ctx);
+
++error:
+ pmu->cancel_txn(pmu);
+-
+ perf_mux_hrtimer_restart(cpuctx);
+-
+ return -EAGAIN;
+ }
+
+--
+2.27.0
+
--- /dev/null
+From 03bc0d9ecdb368e5bcebd6eabfdb2d3e39b66af4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Oct 2020 13:54:50 +0800
+Subject: pinctrl: aspeed: Fix GPI only function problem.
+
+From: Billy Tsai <billy_tsai@aspeedtech.com>
+
+[ Upstream commit 9b92f5c51e9a41352d665f6f956bd95085a56a83 ]
+
+Some gpio pin at aspeed soc is input only and the prefix name of these
+pin is "GPI" only.
+This patch fine-tune the condition of GPIO check from "GPIO" to "GPI"
+and it will fix the usage error of banks D and E in the AST2400/AST2500
+and banks T and U in the AST2600.
+
+Fixes: 4d3d0e4272d8 ("pinctrl: Add core support for Aspeed SoCs")
+Signed-off-by: Billy Tsai <billy_tsai@aspeedtech.com>
+Reviewed-by: Andrew Jeffery <andrew@aj.id.au>
+Link: https://lore.kernel.org/r/20201030055450.29613-1-billy_tsai@aspeedtech.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/aspeed/pinctrl-aspeed.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+index 3e6567355d97d..1d603732903fe 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+@@ -286,13 +286,14 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
+ static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
+ {
+ /*
+- * The signal type is GPIO if the signal name has "GPIO" as a prefix.
++ * The signal type is GPIO if the signal name has "GPI" as a prefix.
+ * strncmp (rather than strcmp) is used to implement the prefix
+ * requirement.
+ *
+- * expr->signal might look like "GPIOT3" in the GPIO case.
++ * expr->signal might look like "GPIOB1" in the GPIO case.
++ * expr->signal might look like "GPIT0" in the GPI case.
+ */
+- return strncmp(expr->signal, "GPIO", 4) == 0;
++ return strncmp(expr->signal, "GPI", 3) == 0;
+ }
+
+ static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
+--
+2.27.0
+
--- /dev/null
+From 7e14b752bcf35bf0f909a9e679f3becb6217ba4e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Oct 2020 13:46:37 +0300
+Subject: pinctrl: intel: Fix 2 kOhm bias which is 833 Ohm
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit dd26209bc56886cacdbd828571e54a6bca251e55 ]
+
+2 kOhm bias was never an option in Intel GPIO hardware, the available
+matrix is:
+
+ 000 none
+ 001 1 kOhm (if available)
+ 010 5 kOhm
+ 100 20 kOhm
+
+As easy to get the 3 resistors are gated separately and according to
+parallel circuits calculations we may get combinations of the above where
+the result is always strictly less than minimal resistance. Hence,
+additional values can be:
+
+ 011 ~833.3 Ohm
+ 101 ~952.4 Ohm
+ 110 ~4 kOhm
+ 111 ~800 Ohm
+
+That said, convert TERM definitions to be the bit masks to reflect the above.
+
+While at it, enable the same setting for pull down case.
+
+Fixes: 7981c0015af2 ("pinctrl: intel: Add Intel Sunrisepoint pin controller and GPIO support")
+Cc: Jamie McClymont <jamie@kwiius.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/intel/pinctrl-intel.c | 32 ++++++++++++++++++---------
+ 1 file changed, 22 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
+index b64997b303e0c..b738b28239bd4 100644
+--- a/drivers/pinctrl/intel/pinctrl-intel.c
++++ b/drivers/pinctrl/intel/pinctrl-intel.c
+@@ -62,10 +62,10 @@
+ #define PADCFG1_TERM_UP BIT(13)
+ #define PADCFG1_TERM_SHIFT 10
+ #define PADCFG1_TERM_MASK GENMASK(12, 10)
+-#define PADCFG1_TERM_20K 4
+-#define PADCFG1_TERM_2K 3
+-#define PADCFG1_TERM_5K 2
+-#define PADCFG1_TERM_1K 1
++#define PADCFG1_TERM_20K BIT(2)
++#define PADCFG1_TERM_5K BIT(1)
++#define PADCFG1_TERM_1K BIT(0)
++#define PADCFG1_TERM_833 (BIT(1) | BIT(0))
+
+ #define PADCFG2 0x008
+ #define PADCFG2_DEBEN BIT(0)
+@@ -549,12 +549,12 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
+ return -EINVAL;
+
+ switch (term) {
++ case PADCFG1_TERM_833:
++ *arg = 833;
++ break;
+ case PADCFG1_TERM_1K:
+ *arg = 1000;
+ break;
+- case PADCFG1_TERM_2K:
+- *arg = 2000;
+- break;
+ case PADCFG1_TERM_5K:
+ *arg = 5000;
+ break;
+@@ -570,6 +570,11 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
+ return -EINVAL;
+
+ switch (term) {
++ case PADCFG1_TERM_833:
++ if (!(community->features & PINCTRL_FEATURE_1K_PD))
++ return -EINVAL;
++ *arg = 833;
++ break;
+ case PADCFG1_TERM_1K:
+ if (!(community->features & PINCTRL_FEATURE_1K_PD))
+ return -EINVAL;
+@@ -685,12 +690,12 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
+ case 5000:
+ value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
+ break;
+- case 2000:
+- value |= PADCFG1_TERM_2K << PADCFG1_TERM_SHIFT;
+- break;
+ case 1000:
+ value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
+ break;
++ case 833:
++ value |= PADCFG1_TERM_833 << PADCFG1_TERM_SHIFT;
++ break;
+ default:
+ ret = -EINVAL;
+ }
+@@ -714,6 +719,13 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
+ }
+ value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
+ break;
++ case 833:
++ if (!(community->features & PINCTRL_FEATURE_1K_PD)) {
++ ret = -EINVAL;
++ break;
++ }
++ value |= PADCFG1_TERM_833 << PADCFG1_TERM_SHIFT;
++ break;
+ default:
+ ret = -EINVAL;
+ }
+--
+2.27.0
+
--- /dev/null
+From 00440429c7e421b29f478bef176a526bed0e857a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Oct 2020 13:46:38 +0300
+Subject: pinctrl: intel: Set default bias in case no particular value given
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit f3c75e7a9349d1d33eb53ddc1b31640994969f73 ]
+
+When GPIO library asks pin control to set the bias, it doesn't pass
+any value of it and argument is considered boolean (and this is true
+for ACPI GpioIo() / GpioInt() resources, by the way). Thus, individual
+drivers must behave well, when they got the resistance value of 1 Ohm,
+i.e. transforming it to sane default.
+
+In case of Intel pin control hardware the 5 kOhm sounds plausible
+because on one hand it's a minimum of resistors present in all
+hardware generations and at the same time it's high enough to minimize
+leakage current (will be only 200 uA with the above choice).
+
+Fixes: e57725eabf87 ("pinctrl: intel: Add support for hardware debouncer")
+Reported-by: Jamie McClymont <jamie@kwiius.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/intel/pinctrl-intel.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
+index b738b28239bd4..31e7840bc5e25 100644
+--- a/drivers/pinctrl/intel/pinctrl-intel.c
++++ b/drivers/pinctrl/intel/pinctrl-intel.c
+@@ -683,6 +683,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
+
+ value |= PADCFG1_TERM_UP;
+
++ /* Set default strength value in case none is given */
++ if (arg == 1)
++ arg = 5000;
++
+ switch (arg) {
+ case 20000:
+ value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
+@@ -705,6 +709,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value &= ~(PADCFG1_TERM_UP | PADCFG1_TERM_MASK);
+
++ /* Set default strength value in case none is given */
++ if (arg == 1)
++ arg = 5000;
++
+ switch (arg) {
+ case 20000:
+ value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
+--
+2.27.0
+
--- /dev/null
+From 9675ee6daca1a5148fa14e5bcaded49584e34696 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Oct 2020 21:08:55 +0300
+Subject: pinctrl: mcp23s08: Use full chunk of memory for regmap configuration
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 2b12c13637134897ba320bd8906a8d918ee7069b ]
+
+It appears that simplification of mcp23s08_spi_regmap_init() made
+a regression due to wrong size calculation for dev_kmemdup() call.
+It misses the fact that config variable is already a pointer, thus
+the sizeof() calculation is wrong and only 4 or 8 bytes were copied.
+
+Fix the parameters to devm_kmemdup() to copy a full chunk of memory.
+
+Fixes: 0874758ecb2b ("pinctrl: mcp23s08: Refactor mcp23s08_spi_regmap_init()")
+Reported-by: Martin Hundebøll <martin@geanix.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Tested-by: Martin Hundebøll <martin@geanix.com>
+Link: https://lore.kernel.org/r/20201009180856.4738-1-andriy.shevchenko@linux.intel.com
+Tested-by: Jan Kundrát <jan.kundrat@cesnet.cz>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/pinctrl-mcp23s08_spi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08_spi.c b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+index 1f47a661b0a79..7c72cffe14127 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08_spi.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+@@ -119,7 +119,7 @@ static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
+ return -EINVAL;
+ }
+
+- copy = devm_kmemdup(dev, &config, sizeof(config), GFP_KERNEL);
++ copy = devm_kmemdup(dev, config, sizeof(*config), GFP_KERNEL);
+ if (!copy)
+ return -ENOMEM;
+
+--
+2.27.0
+
--- /dev/null
+From 34a6f963a4a2b32d686e156b91f7fadfbf07e273 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Nov 2020 13:08:04 +0530
+Subject: pinctrl: qcom: Move clearing pending IRQ to .irq_request_resources
+ callback
+
+From: Maulik Shah <mkshah@codeaurora.org>
+
+[ Upstream commit 71266d9d39366c9b24b866d811b3facaf837f13f ]
+
+When GPIOs that are routed to PDC are used as output they can still latch
+the IRQ pending at GIC. As a result the spurious IRQ was handled when the
+client driver change the direction to input to starts using it as IRQ.
+
+Currently such erroneous latched IRQ are cleared with .irq_enable callback
+however if the driver continue to use GPIO as interrupt and invokes
+disable_irq() followed by enable_irq() then everytime during enable_irq()
+previously latched interrupt gets cleared.
+
+This can make edge IRQs not seen after enable_irq() if they had arrived
+after the driver has invoked disable_irq() and were pending at GIC.
+
+Move clearing erroneous IRQ to .irq_request_resources callback as this is
+the place where GPIO direction is changed as input and its locked as IRQ.
+
+While at this add a missing check to invoke msm_gpio_irq_clear_unmask()
+from .irq_enable callback only when GPIO is not routed to PDC.
+
+Fixes: e35a6ae0eb3a ("pinctrl/msm: Setup GPIO chip in hierarchy")
+Signed-off-by: Maulik Shah <mkshah@codeaurora.org>
+Link: https://lore.kernel.org/r/1604561884-10166-1-git-send-email-mkshah@codeaurora.org
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/qcom/pinctrl-msm.c | 32 ++++++++++++++++++------------
+ 1 file changed, 19 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
+index 1df232266f63a..1554f0275067e 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
+@@ -815,21 +815,14 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
+
+ static void msm_gpio_irq_enable(struct irq_data *d)
+ {
+- /*
+- * Clear the interrupt that may be pending before we enable
+- * the line.
+- * This is especially a problem with the GPIOs routed to the
+- * PDC. These GPIOs are direct-connect interrupts to the GIC.
+- * Disabling the interrupt line at the PDC does not prevent
+- * the interrupt from being latched at the GIC. The state at
+- * GIC needs to be cleared before enabling.
+- */
+- if (d->parent_data) {
+- irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
++
++ if (d->parent_data)
+ irq_chip_enable_parent(d);
+- }
+
+- msm_gpio_irq_clear_unmask(d, true);
++ if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
++ msm_gpio_irq_clear_unmask(d, true);
+ }
+
+ static void msm_gpio_irq_disable(struct irq_data *d)
+@@ -1104,6 +1097,19 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
+ ret = -EINVAL;
+ goto out;
+ }
++
++ /*
++ * Clear the interrupt that may be pending before we enable
++ * the line.
++ * This is especially a problem with the GPIOs routed to the
++ * PDC. These GPIOs are direct-connect interrupts to the GIC.
++ * Disabling the interrupt line at the PDC does not prevent
++ * the interrupt from being latched at the GIC. The state at
++ * GIC needs to be cleared before enabling.
++ */
++ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
++ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
++
+ return 0;
+ out:
+ module_put(gc->owner);
+--
+2.27.0
+
--- /dev/null
+From ac8ca44a483f6b9f68ed5062f1b41d84f7a4e471 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Oct 2020 21:36:42 -0700
+Subject: pinctrl: qcom: sm8250: Specify PDC map
+
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+
+[ Upstream commit b41efeed507addecb92e83dd444d86c1fbe38ae0 ]
+
+Specify the PDC mapping for SM8250, so that gpio interrupts are
+propertly mapped to the wakeup IRQs of the PDC.
+
+Fixes: 4e3ec9e407ad ("pinctrl: qcom: Add sm8250 pinctrl driver.")
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Link: https://lore.kernel.org/r/20201028043642.1141723-1-bjorn.andersson@linaro.org
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/qcom/pinctrl-sm8250.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
+index 826df0d637eaa..af144e724bd9c 100644
+--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
++++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
+@@ -1313,6 +1313,22 @@ static const struct msm_pingroup sm8250_groups[] = {
+ [183] = SDC_PINGROUP(sdc2_data, 0xb7000, 9, 0),
+ };
+
++static const struct msm_gpio_wakeirq_map sm8250_pdc_map[] = {
++ { 0, 79 }, { 1, 84 }, { 2, 80 }, { 3, 82 }, { 4, 107 }, { 7, 43 },
++ { 11, 42 }, { 14, 44 }, { 15, 52 }, { 19, 67 }, { 23, 68 }, { 24, 105 },
++ { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 37 },
++ { 40, 108 }, { 43, 71 }, { 45, 72 }, { 47, 83 }, { 51, 74 }, { 55, 77 },
++ { 59, 78 }, { 63, 75 }, { 64, 81 }, { 65, 87 }, { 66, 88 }, { 67, 89 },
++ { 68, 54 }, { 70, 85 }, { 77, 46 }, { 80, 90 }, { 81, 91 }, { 83, 97 },
++ { 84, 98 }, { 86, 99 }, { 87, 100 }, { 88, 101 }, { 89, 102 },
++ { 92, 103 }, { 93, 104 }, { 100, 53 }, { 103, 47 }, { 104, 48 },
++ { 108, 49 }, { 109, 94 }, { 110, 95 }, { 111, 96 }, { 112, 55 },
++ { 113, 56 }, { 118, 50 }, { 121, 51 }, { 122, 57 }, { 123, 58 },
++ { 124, 45 }, { 126, 59 }, { 128, 76 }, { 129, 86 }, { 132, 93 },
++ { 133, 65 }, { 134, 66 }, { 136, 62 }, { 137, 63 }, { 138, 64 },
++ { 142, 60 }, { 143, 61 }
++};
++
+ static const struct msm_pinctrl_soc_data sm8250_pinctrl = {
+ .pins = sm8250_pins,
+ .npins = ARRAY_SIZE(sm8250_pins),
+@@ -1323,6 +1339,8 @@ static const struct msm_pinctrl_soc_data sm8250_pinctrl = {
+ .ngpios = 181,
+ .tiles = sm8250_tiles,
+ .ntiles = ARRAY_SIZE(sm8250_tiles),
++ .wakeirq_map = sm8250_pdc_map,
++ .nwakeirq_map = ARRAY_SIZE(sm8250_pdc_map),
+ };
+
+ static int sm8250_pinctrl_probe(struct platform_device *pdev)
+--
+2.27.0
+
--- /dev/null
+From 6e6eaf0933f97d7a79b314250cf2238eb4e9a6c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Nov 2020 18:14:47 +0100
+Subject: r8169: disable hw csum for short packets on all chip versions
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit 847f0a2bfd2fe16d6afa537816b313b71f32e139 ]
+
+RTL8125B has same or similar short packet hw padding bug as RTL8168evl.
+The main workaround has been extended accordingly, however we have to
+disable also hw checksumming for short packets on affected new chip
+versions. Instead of checking for an affected chip version let's
+simply disable hw checksumming for short packets in general.
+
+v2:
+- remove the version checks and disable short packet hw csum in general
+- reflect this in commit title and message
+
+Fixes: 0439297be951 ("r8169: add support for RTL8125B")
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Link: https://lore.kernel.org/r/7fbb35f0-e244-ef65-aa55-3872d7d38698@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/realtek/r8169_main.c | 15 +++------------
+ 1 file changed, 3 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index ed918c12bc5e9..515d9116dfadf 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4325,18 +4325,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
+ rtl_chip_supports_csum_v2(tp))
+ features &= ~NETIF_F_ALL_TSO;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+- if (skb->len < ETH_ZLEN) {
+- switch (tp->mac_version) {
+- case RTL_GIGA_MAC_VER_11:
+- case RTL_GIGA_MAC_VER_12:
+- case RTL_GIGA_MAC_VER_17:
+- case RTL_GIGA_MAC_VER_34:
+- features &= ~NETIF_F_CSUM_MASK;
+- break;
+- default:
+- break;
+- }
+- }
++ /* work around hw bug on some chip versions */
++ if (skb->len < ETH_ZLEN)
++ features &= ~NETIF_F_CSUM_MASK;
+
+ if (transport_offset > TCPHO_MAX &&
+ rtl_chip_supports_csum_v2(tp))
+--
+2.27.0
+
--- /dev/null
+From b825ea2a4efa8779d5ebefdd13cbae4373030eb6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Nov 2020 15:28:42 +0100
+Subject: r8169: fix potential skb double free in an error path
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit cc6528bc9a0c901c83b8220a2e2617f3354d6dd9 ]
+
+The caller of rtl8169_tso_csum_v2() frees the skb if false is returned.
+eth_skb_pad() internally frees the skb on error what would result in a
+double free. Therefore use __skb_put_padto() directly and instruct it
+to not free the skb on error.
+
+Fixes: b423e9ae49d7 ("r8169: fix offloaded tx checksum for small packets.")
+Reported-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Link: https://lore.kernel.org/r/f7e68191-acff-9ded-4263-c016428a8762@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/realtek/r8169_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index c74d9c02a805f..ed918c12bc5e9 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4145,7 +4145,8 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
+ opts[1] |= transport_offset << TCPHO_SHIFT;
+ } else {
+ if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp)))
+- return !eth_skb_pad(skb);
++ /* eth_skb_pad would free the skb on error */
++ return !__skb_put_padto(skb, ETH_ZLEN, false);
+ }
+
+ return true;
+--
+2.27.0
+
--- /dev/null
+From 9f60398e6262e7628758e1a7af00133a28db830f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Nov 2020 13:19:28 +0200
+Subject: selftest: fix flower terse dump tests
+
+From: Vlad Buslov <vlad@buslov.dev>
+
+[ Upstream commit 97adb13dc9ba08ecd4758bc59efc0205f5cbf377 ]
+
+Iproute2 tc classifier terse dump has been accepted with modified syntax.
+Update the tests accordingly.
+
+Signed-off-by: Vlad Buslov <vlad@buslov.dev>
+Fixes: e7534fd42a99 ("selftests: implement flower classifier terse dump tests")
+Link: https://lore.kernel.org/r/20201107111928.453534-1-vlad@buslov.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../testing/selftests/tc-testing/tc-tests/filters/tests.json | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
+index bb543bf69d694..361235ad574be 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
+@@ -100,7 +100,7 @@
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 ingress flower dst_mac e4:11:22:11:4a:51 action drop",
+ "expExitCode": "0",
+- "verifyCmd": "$TC filter show terse dev $DEV2 ingress",
++ "verifyCmd": "$TC -br filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower.*handle",
+ "matchCount": "1",
+ "teardown": [
+@@ -119,7 +119,7 @@
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 ingress flower dst_mac e4:11:22:11:4a:51 action drop",
+ "expExitCode": "0",
+- "verifyCmd": "$TC filter show terse dev $DEV2 ingress",
++ "verifyCmd": "$TC -br filter show dev $DEV2 ingress",
+ "matchPattern": " dst_mac e4:11:22:11:4a:51",
+ "matchCount": "0",
+ "teardown": [
+--
+2.27.0
+
tpm_tis-disable-interrupts-on-thinkpad-t490s.patch
spi-bcm2835-remove-use-of-uninitialized-gpio-flags-variable.patch
mfd-sprd-add-wakeup-capability-for-pmic-irq.patch
+pinctrl-intel-fix-2-kohm-bias-which-is-833-ohm.patch
+pinctrl-intel-set-default-bias-in-case-no-particular.patch
+gpio-aspeed-fix-ast2600-bank-properties.patch
+arm-9019-1-kprobes-avoid-fortify_panic-when-copying-.patch
+bpf-don-t-rely-on-gcc-__attribute__-optimize-to-disa.patch
+libbpf-hashmap-fix-undefined-behavior-in-hash_bits.patch
+pinctrl-mcp23s08-use-full-chunk-of-memory-for-regmap.patch
+pinctrl-aspeed-fix-gpi-only-function-problem.patch
+net-mlx5e-fix-modify-header-actions-memory-leak.patch
+net-mlx5e-protect-encap-route-dev-from-concurrent-re.patch
+net-mlx5e-use-spin_lock_bh-for-async_icosq_lock.patch
+net-mlx5-fix-deletion-of-duplicate-rules.patch
+net-mlx5-e-switch-avoid-extack-error-log-for-disable.patch
+net-mlx5e-fix-vxlan-synchronization-after-function-r.patch
+net-mlx5e-fix-incorrect-access-of-rcu-protected-xdp_.patch
+sunrpc-fix-general-protection-fault-in-trace_rpc_xdr.patch
+nfsd-fix-use-after-free-warning-when-doing-inter-ser.patch
+nfsd-fix-missing-refcount-in-nfsd4_copy-by-nfsd4_do_.patch
+tools-bpftool-fix-attaching-flow-dissector.patch
+bpf-zero-fill-re-used-per-cpu-map-element.patch
+r8169-fix-potential-skb-double-free-in-an-error-path.patch
+r8169-disable-hw-csum-for-short-packets-on-all-chip-.patch
+pinctrl-qcom-move-clearing-pending-irq-to-.irq_reque.patch
+pinctrl-qcom-sm8250-specify-pdc-map.patch
+nbd-fix-a-block_device-refcount-leak-in-nbd_release.patch
+selftest-fix-flower-terse-dump-tests.patch
+i40e-fix-mac-address-setting-for-a-vf-via-host-vm.patch
+igc-fix-returning-wrong-statistics.patch
+lan743x-correctly-handle-chips-with-internal-phy.patch
+net-phy-realtek-support-paged-operations-on-rtl8201c.patch
+xfs-fix-flags-argument-to-rmap-lookup-when-convertin.patch
+xfs-set-the-unwritten-bit-in-rmap-lookup-flags-in-xc.patch
+xfs-fix-rmap-key-and-record-comparison-functions.patch
+xfs-fix-brainos-in-the-refcount-scrubber-s-rmap-frag.patch
+lan743x-fix-bug-invalid-wait-context-when-setting-rx.patch
+xfs-fix-a-missing-unlock-on-error-in-xfs_fs_map_bloc.patch
+of-address-fix-of_node-memory-leak-in-of_dma_is_cohe.patch
+ch_ktls-update-cheksum-information.patch
+ch_ktls-tcb-update-fails-sometimes.patch
+cosa-add-missing-kfree-in-error-path-of-cosa_write.patch
+hwmon-applesmc-re-work-smc-comms.patch
+nfs-fix-listxattr-receive-buffer-size.patch
+vrf-fix-fast-path-output-packet-handling-with-async-.patch
+lan743x-fix-use-of-uninitialized-variable.patch
+arm64-mm-validate-hotplug-range-before-creating-line.patch
+kernel-watchdog-fix-watchdog_allowed_mask-not-used-w.patch
+mm-memcontrol-fix-missing-wakeup-polling-thread.patch
+afs-fix-afs_write_end-when-called-with-copied-0-ver-.patch
+perf-fix-get_recursion_context.patch
+nvme-factor-out-a-nvme_configure_metadata-helper.patch
+nvme-freeze-the-queue-over-lba_shift-updates.patch
+nvme-fix-incorrect-behavior-when-blkroset-is-called-.patch
+perf-simplify-group_sched_in.patch
+perf-fix-event-multiplexing-for-exclusive-groups.patch
--- /dev/null
+From b4e7cc0f6944081bfab85f7fc33b7a66a48deefc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Oct 2020 10:41:07 -0400
+Subject: SUNRPC: Fix general protection fault in trace_rpc_xdr_overflow()
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit d321ff589c16d8c2207485a6d7fbdb14e873d46e ]
+
+The TP_fast_assign() section is careful enough not to dereference
+xdr->rqst if it's NULL. The TP_STRUCT__entry section is not.
+
+Fixes: 5582863f450c ("SUNRPC: Add XDR overflow trace event")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/trace/events/sunrpc.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index 65d7dfbbc9cd7..ca2f27b9f919d 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -607,10 +607,10 @@ TRACE_EVENT(rpc_xdr_overflow,
+ __field(size_t, tail_len)
+ __field(unsigned int, page_len)
+ __field(unsigned int, len)
+- __string(progname,
+- xdr->rqst->rq_task->tk_client->cl_program->name)
+- __string(procedure,
+- xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
++ __string(progname, xdr->rqst ?
++ xdr->rqst->rq_task->tk_client->cl_program->name : "unknown")
++ __string(procedure, xdr->rqst ?
++ xdr->rqst->rq_task->tk_msg.rpc_proc->p_name : "unknown")
+ ),
+
+ TP_fast_assign(
+--
+2.27.0
+
--- /dev/null
+From 210f4b06fc395585334ad35396cf24dad96ac2fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Nov 2020 11:52:30 +0000
+Subject: tools/bpftool: Fix attaching flow dissector
+
+From: Lorenz Bauer <lmb@cloudflare.com>
+
+[ Upstream commit f9b7ff0d7f7a466a920424246e7ddc2b84c87e52 ]
+
+My earlier patch to reject non-zero arguments to flow dissector attach
+broke attaching via bpftool. Instead of 0 it uses -1 for target_fd.
+Fix this by passing a zero argument when attaching the flow dissector.
+
+Fixes: 1b514239e859 ("bpf: flow_dissector: Check value of unused flags to BPF_PROG_ATTACH")
+Reported-by: Jiri Benc <jbenc@redhat.com>
+Signed-off-by: Lorenz Bauer <lmb@cloudflare.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20201105115230.296657-1-lmb@cloudflare.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/bpf/bpftool/prog.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index d393eb8263a60..994506540e564 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -741,7 +741,7 @@ static int parse_attach_detach_args(int argc, char **argv, int *progfd,
+ }
+
+ if (*attach_type == BPF_FLOW_DISSECTOR) {
+- *mapfd = -1;
++ *mapfd = 0;
+ return 0;
+ }
+
+--
+2.27.0
+
--- /dev/null
+From 1dda3b40a087f077277ce94d359b1df720d2bfba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Nov 2020 08:30:30 +0100
+Subject: vrf: Fix fast path output packet handling with async Netfilter rules
+
+From: Martin Willi <martin@strongswan.org>
+
+[ Upstream commit 9e2b7fa2df4365e99934901da4fb4af52d81e820 ]
+
+VRF devices use an optimized direct path on output if a default qdisc
+is involved, calling Netfilter hooks directly. This path, however, does
+not consider Netfilter rules completing asynchronously, such as with
+NFQUEUE. The Netfilter okfn() is called for asynchronously accepted
+packets, but the VRF never passes that packet down the stack to send
+it out over the slave device. Using the slower redirect path for this
+seems not feasible, as we do not know beforehand if a Netfilter hook
+has asynchronously completing rules.
+
+Fix the use of asynchronously completing Netfilter rules in OUTPUT and
+POSTROUTING by using a special completion function that additionally
+calls dst_output() to pass the packet down the stack. Also, slightly
+adjust the use of nf_reset_ct() so that is called in the asynchronous
+case, too.
+
+Fixes: dcdd43c41e60 ("net: vrf: performance improvements for IPv4")
+Fixes: a9ec54d1b0cd ("net: vrf: performance improvements for IPv6")
+Signed-off-by: Martin Willi <martin@strongswan.org>
+Link: https://lore.kernel.org/r/20201106073030.3974927-1-martin@strongswan.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vrf.c | 92 +++++++++++++++++++++++++++++++++++------------
+ 1 file changed, 69 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 60c1aadece89a..f2793ffde1913 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -608,8 +608,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
+ return ret;
+ }
+
+-static int vrf_finish_direct(struct net *net, struct sock *sk,
+- struct sk_buff *skb)
++static void vrf_finish_direct(struct sk_buff *skb)
+ {
+ struct net_device *vrf_dev = skb->dev;
+
+@@ -628,7 +627,8 @@ static int vrf_finish_direct(struct net *net, struct sock *sk,
+ skb_pull(skb, ETH_HLEN);
+ }
+
+- return 1;
++ /* reset skb device */
++ nf_reset_ct(skb);
+ }
+
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -707,15 +707,41 @@ static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
+ return skb;
+ }
+
++static int vrf_output6_direct_finish(struct net *net, struct sock *sk,
++ struct sk_buff *skb)
++{
++ vrf_finish_direct(skb);
++
++ return vrf_ip6_local_out(net, sk, skb);
++}
++
+ static int vrf_output6_direct(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+ {
++ int err = 1;
++
+ skb->protocol = htons(ETH_P_IPV6);
+
+- return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
+- net, sk, skb, NULL, skb->dev,
+- vrf_finish_direct,
+- !(IPCB(skb)->flags & IPSKB_REROUTED));
++ if (!(IPCB(skb)->flags & IPSKB_REROUTED))
++ err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb,
++ NULL, skb->dev, vrf_output6_direct_finish);
++
++ if (likely(err == 1))
++ vrf_finish_direct(skb);
++
++ return err;
++}
++
++static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk,
++ struct sk_buff *skb)
++{
++ int err;
++
++ err = vrf_output6_direct(net, sk, skb);
++ if (likely(err == 1))
++ err = vrf_ip6_local_out(net, sk, skb);
++
++ return err;
+ }
+
+ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
+@@ -728,18 +754,15 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
+ skb->dev = vrf_dev;
+
+ err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
+- skb, NULL, vrf_dev, vrf_output6_direct);
++ skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
+
+ if (likely(err == 1))
+ err = vrf_output6_direct(net, sk, skb);
+
+- /* reset skb device */
+ if (likely(err == 1))
+- nf_reset_ct(skb);
+- else
+- skb = NULL;
++ return skb;
+
+- return skb;
++ return NULL;
+ }
+
+ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
+@@ -919,15 +942,41 @@ static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
+ return skb;
+ }
+
++static int vrf_output_direct_finish(struct net *net, struct sock *sk,
++ struct sk_buff *skb)
++{
++ vrf_finish_direct(skb);
++
++ return vrf_ip_local_out(net, sk, skb);
++}
++
+ static int vrf_output_direct(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+ {
++ int err = 1;
++
+ skb->protocol = htons(ETH_P_IP);
+
+- return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+- net, sk, skb, NULL, skb->dev,
+- vrf_finish_direct,
+- !(IPCB(skb)->flags & IPSKB_REROUTED));
++ if (!(IPCB(skb)->flags & IPSKB_REROUTED))
++ err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb,
++ NULL, skb->dev, vrf_output_direct_finish);
++
++ if (likely(err == 1))
++ vrf_finish_direct(skb);
++
++ return err;
++}
++
++static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk,
++ struct sk_buff *skb)
++{
++ int err;
++
++ err = vrf_output_direct(net, sk, skb);
++ if (likely(err == 1))
++ err = vrf_ip_local_out(net, sk, skb);
++
++ return err;
+ }
+
+ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
+@@ -940,18 +989,15 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
+ skb->dev = vrf_dev;
+
+ err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
+- skb, NULL, vrf_dev, vrf_output_direct);
++ skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
+
+ if (likely(err == 1))
+ err = vrf_output_direct(net, sk, skb);
+
+- /* reset skb device */
+ if (likely(err == 1))
+- nf_reset_ct(skb);
+- else
+- skb = NULL;
++ return skb;
+
+- return skb;
++ return NULL;
+ }
+
+ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
+--
+2.27.0
+
--- /dev/null
+From f3ce8ba1b5429de0be560d2fcfcc6e21bab49c94 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Nov 2020 08:07:37 -0800
+Subject: xfs: fix a missing unlock on error in xfs_fs_map_blocks
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 2bd3fa793aaa7e98b74e3653fdcc72fa753913b5 ]
+
+We also need to drop the iolock when invalidate_inode_pages2 fails, not
+only on all other error or successful cases.
+
+Fixes: 527851124d10 ("xfs: implement pNFS export operations")
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/xfs_pnfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
+index b101feb2aab45..f3082a957d5e1 100644
+--- a/fs/xfs/xfs_pnfs.c
++++ b/fs/xfs/xfs_pnfs.c
+@@ -134,7 +134,7 @@ xfs_fs_map_blocks(
+ goto out_unlock;
+ error = invalidate_inode_pages2(inode->i_mapping);
+ if (WARN_ON_ONCE(error))
+- return error;
++ goto out_unlock;
+
+ end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
+--
+2.27.0
+
--- /dev/null
+From 3e25c3c5839f37a685ee728bc36e4c7c4636e6d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 8 Nov 2020 16:32:42 -0800
+Subject: xfs: fix brainos in the refcount scrubber's rmap fragment processor
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+[ Upstream commit 54e9b09e153842ab5adb8a460b891e11b39e9c3d ]
+
+Fix some serious WTF in the reference count scrubber's rmap fragment
+processing. The code comment says that this loop is supposed to move
+all fragment records starting at or before bno onto the worklist, but
+there's no obvious reason why nr (the number of items added) should
+increment starting from 1, and breaking the loop when we've added the
+target number seems dubious since we could have more rmap fragments that
+should have been added to the worklist.
+
+This seems to manifest in xfs/411 when adding one to the refcount field.
+
+Fixes: dbde19da9637 ("xfs: cross-reference the rmapbt data with the refcountbt")
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/scrub/refcount.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
+index beaeb6fa31197..dd672e6bbc75c 100644
+--- a/fs/xfs/scrub/refcount.c
++++ b/fs/xfs/scrub/refcount.c
+@@ -170,7 +170,6 @@ xchk_refcountbt_process_rmap_fragments(
+ */
+ INIT_LIST_HEAD(&worklist);
+ rbno = NULLAGBLOCK;
+- nr = 1;
+
+ /* Make sure the fragments actually /are/ in agbno order. */
+ bno = 0;
+@@ -184,15 +183,14 @@ xchk_refcountbt_process_rmap_fragments(
+ * Find all the rmaps that start at or before the refc extent,
+ * and put them on the worklist.
+ */
++ nr = 0;
+ list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
+- if (frag->rm.rm_startblock > refchk->bno)
+- goto done;
++ if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
++ break;
+ bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
+ if (bno < rbno)
+ rbno = bno;
+ list_move_tail(&frag->list, &worklist);
+- if (nr == target_nr)
+- break;
+ nr++;
+ }
+
+--
+2.27.0
+
--- /dev/null
+From 26b9e0f7bcb7ace34805f189273ade94c31ce110 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 8 Nov 2020 16:32:43 -0800
+Subject: xfs: fix flags argument to rmap lookup when converting shared file
+ rmaps
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+[ Upstream commit ea8439899c0b15a176664df62aff928010fad276 ]
+
+Pass the same oldext argument (which contains the existing rmapping's
+unwritten state) to xfs_rmap_lookup_le_range at the start of
+xfs_rmap_convert_shared. At this point in the code, flags is zero,
+which means that we perform lookups using the wrong key.
+
+Fixes: 3f165b334e51 ("xfs: convert unwritten status of reverse mappings for shared files")
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/libxfs/xfs_rmap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
+index 27c39268c31f7..82117b1ee34cb 100644
+--- a/fs/xfs/libxfs/xfs_rmap.c
++++ b/fs/xfs/libxfs/xfs_rmap.c
+@@ -1514,7 +1514,7 @@ xfs_rmap_convert_shared(
+ * record for our insertion point. This will also give us the record for
+ * start block contiguity tests.
+ */
+- error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags,
++ error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext,
+ &PREV, &i);
+ if (error)
+ goto done;
+--
+2.27.0
+
--- /dev/null
+From aeb90c68563d907fc704d383f0cadeb8a49bd006 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 8 Nov 2020 16:32:44 -0800
+Subject: xfs: fix rmap key and record comparison functions
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+[ Upstream commit 6ff646b2ceb0eec916101877f38da0b73e3a5b7f ]
+
+Keys for extent interval records in the reverse mapping btree are
+supposed to be computed as follows:
+
+(physical block, owner, fork, is_btree, is_unwritten, offset)
+
+This provides users the ability to look up a reverse mapping from a bmbt
+record -- start with the physical block; then if there are multiple
+records for the same block, move on to the owner; then the inode fork
+type; and so on to the file offset.
+
+However, the key comparison functions incorrectly remove the
+fork/btree/unwritten information that's encoded in the on-disk offset.
+This means that lookup comparisons are only done with:
+
+(physical block, owner, offset)
+
+This means that queries can return incorrect results. On consistent
+filesystems this hasn't been an issue because blocks are never shared
+between forks or with bmbt blocks; and are never unwritten. However,
+this bug means that online repair cannot always detect corruption in the
+key information in internal rmapbt nodes.
+
+Found by fuzzing keys[1].attrfork = ones on xfs/371.
+
+Fixes: 4b8ed67794fe ("xfs: add rmap btree operations")
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/libxfs/xfs_rmap_btree.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
+index beb81c84a9375..577a66381327c 100644
+--- a/fs/xfs/libxfs/xfs_rmap_btree.c
++++ b/fs/xfs/libxfs/xfs_rmap_btree.c
+@@ -243,8 +243,8 @@ xfs_rmapbt_key_diff(
+ else if (y > x)
+ return -1;
+
+- x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
+- y = rec->rm_offset;
++ x = be64_to_cpu(kp->rm_offset);
++ y = xfs_rmap_irec_offset_pack(rec);
+ if (x > y)
+ return 1;
+ else if (y > x)
+@@ -275,8 +275,8 @@ xfs_rmapbt_diff_two_keys(
+ else if (y > x)
+ return -1;
+
+- x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset));
+- y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset));
++ x = be64_to_cpu(kp1->rm_offset);
++ y = be64_to_cpu(kp2->rm_offset);
+ if (x > y)
+ return 1;
+ else if (y > x)
+@@ -390,8 +390,8 @@ xfs_rmapbt_keys_inorder(
+ return 1;
+ else if (a > b)
+ return 0;
+- a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
+- b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
++ a = be64_to_cpu(k1->rmap.rm_offset);
++ b = be64_to_cpu(k2->rmap.rm_offset);
+ if (a <= b)
+ return 1;
+ return 0;
+@@ -420,8 +420,8 @@ xfs_rmapbt_recs_inorder(
+ return 1;
+ else if (a > b)
+ return 0;
+- a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
+- b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
++ a = be64_to_cpu(r1->rmap.rm_offset);
++ b = be64_to_cpu(r2->rmap.rm_offset);
+ if (a <= b)
+ return 1;
+ return 0;
+--
+2.27.0
+
--- /dev/null
+From 417734315af8e3b453a29377557f03049473a76a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 8 Nov 2020 16:32:43 -0800
+Subject: xfs: set the unwritten bit in rmap lookup flags in
+ xchk_bmap_get_rmapextents
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+[ Upstream commit 5dda3897fd90783358c4c6115ef86047d8c8f503 ]
+
+When the bmbt scrubber is looking up rmap extents, we need to set the
+extent flags from the bmbt record fully. This will matter once we fix
+the rmap btree comparison functions to check those flags correctly.
+
+Fixes: d852657ccfc0 ("xfs: cross-reference reverse-mapping btree")
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/scrub/bmap.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
+index 955302e7cdde9..412e2ec55e388 100644
+--- a/fs/xfs/scrub/bmap.c
++++ b/fs/xfs/scrub/bmap.c
+@@ -113,6 +113,8 @@ xchk_bmap_get_rmap(
+
+ if (info->whichfork == XFS_ATTR_FORK)
+ rflags |= XFS_RMAP_ATTR_FORK;
++ if (irec->br_state == XFS_EXT_UNWRITTEN)
++ rflags |= XFS_RMAP_UNWRITTEN;
+
+ /*
+ * CoW staging extents are owned (on disk) by the refcountbt, so
+--
+2.27.0
+