--- /dev/null
+From bcff523424c3ae5ae2afcedcaacbb3b745f4bf40 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Sep 2021 15:06:14 +0800
+Subject: bpf, s390: Fix potential memory leak about jit_data
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+[ Upstream commit 686cb8b9f6b46787f035afe8fbd132a74e6b1bdd ]
+
+Make sure to free jit_data through kfree() in the error path.
+
+Fixes: 1c8f9b91c456 ("bpf: s390: add JIT support for multi-function programs")
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
+Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/net/bpf_jit_comp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 840d8594437d..1a374d021e25 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -1826,7 +1826,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
+ jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
+ if (jit.addrs == NULL) {
+ fp = orig_fp;
+- goto out;
++ goto free_addrs;
+ }
+ /*
+ * Three initial passes:
+--
+2.33.0
+
--- /dev/null
+From 7b90e32ec01200396a00a119c6fb5437c31367c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Sep 2021 17:57:18 +0100
+Subject: i2c: acpi: fix resource leak in reconfiguration device addition
+
+From: Jamie Iles <quic_jiles@quicinc.com>
+
+[ Upstream commit 6558b646ce1c2a872fe1c2c7cb116f05a2c1950f ]
+
+acpi_i2c_find_adapter_by_handle() calls bus_find_device() which takes a
+reference on the adapter which is never released which will result in a
+reference count leak and render the adapter unremovable. Make sure to
+put the adapter after creating the client in the same manner that we do
+for OF.
+
+Fixes: 525e6fabeae2 ("i2c / ACPI: add support for ACPI reconfigure notifications")
+Signed-off-by: Jamie Iles <quic_jiles@quicinc.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+[wsa: fixed title]
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/i2c-core-acpi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index 6f0aa0ed3241..74925621f239 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -422,6 +422,7 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
+ break;
+
+ i2c_acpi_register_device(adapter, adev, &info);
++ put_device(&adapter->dev);
+ break;
+ case ACPI_RECONFIG_DEVICE_REMOVE:
+ if (!acpi_device_enumerated(adev))
+--
+2.33.0
+
--- /dev/null
+From 0e0eeea37cda84f8fde648b548e0ceb1cf9d471b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Sep 2021 18:14:14 +0800
+Subject: i2c: mediatek: Add OFFSET_EXT_CONF setting back
+
+From: Kewei Xu <kewei.xu@mediatek.com>
+
+[ Upstream commit 3bce7703c7ba648bd9e174dc1413f422b7998833 ]
+
+In the commit be5ce0e97cc7 ("i2c: mediatek: Add i2c ac-timing adjust
+support"), we miss setting OFFSET_EXT_CONF register if
+i2c->dev_comp->timing_adjust is false, now add it back.
+
+Fixes: be5ce0e97cc7 ("i2c: mediatek: Add i2c ac-timing adjust support")
+Signed-off-by: Kewei Xu <kewei.xu@mediatek.com>
+Reviewed-by: Qii Wang <qii.wang@mediatek.com>
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/busses/i2c-mt65xx.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
+index 477480d1de6b..7d4b3eb7077a 100644
+--- a/drivers/i2c/busses/i2c-mt65xx.c
++++ b/drivers/i2c/busses/i2c-mt65xx.c
+@@ -41,6 +41,8 @@
+ #define I2C_HANDSHAKE_RST 0x0020
+ #define I2C_FIFO_ADDR_CLR 0x0001
+ #define I2C_DELAY_LEN 0x0002
++#define I2C_ST_START_CON 0x8001
++#define I2C_FS_START_CON 0x1800
+ #define I2C_TIME_CLR_VALUE 0x0000
+ #define I2C_TIME_DEFAULT_VALUE 0x0003
+ #define I2C_WRRD_TRANAC_VALUE 0x0002
+@@ -480,6 +482,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
+ {
+ u16 control_reg;
+ u16 intr_stat_reg;
++ u16 ext_conf_val;
+
+ mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START);
+ intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
+@@ -518,8 +521,13 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
+ if (i2c->dev_comp->ltiming_adjust)
+ mtk_i2c_writew(i2c, i2c->ltiming_reg, OFFSET_LTIMING);
+
++ if (i2c->speed_hz <= I2C_MAX_STANDARD_MODE_FREQ)
++ ext_conf_val = I2C_ST_START_CON;
++ else
++ ext_conf_val = I2C_FS_START_CON;
++
+ if (i2c->dev_comp->timing_adjust) {
+- mtk_i2c_writew(i2c, i2c->ac_timing.ext, OFFSET_EXT_CONF);
++ ext_conf_val = i2c->ac_timing.ext;
+ mtk_i2c_writew(i2c, i2c->ac_timing.inter_clk_div,
+ OFFSET_CLOCK_DIV);
+ mtk_i2c_writew(i2c, I2C_SCL_MIS_COMP_VALUE,
+@@ -544,6 +552,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
+ OFFSET_HS_STA_STO_AC_TIMING);
+ }
+ }
++ mtk_i2c_writew(i2c, ext_conf_val, OFFSET_EXT_CONF);
+
+ /* If use i2c pin from PMIC mt6397 side, need set PATH_DIR first */
+ if (i2c->have_pmic)
+--
+2.33.0
+
--- /dev/null
+From 816daeabaf5d5fd52cf9a96bcaf23dbed96d04b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Aug 2021 17:45:01 +0300
+Subject: i2c: mlxcpld: Fix criteria for frequency setting
+
+From: Vadim Pasternak <vadimp@nvidia.com>
+
+[ Upstream commit 52f57396c75acd77ebcdf3d20aed24ed248e9f79 ]
+
+Value for getting frequency capability wrongly has been taken from
+register offset instead of register value.
+
+Fixes: 66b0c2846ba8 ("i2c: mlxcpld: Add support for I2C bus frequency setting")
+Signed-off-by: Vadim Pasternak <vadimp@nvidia.com>
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/busses/i2c-mlxcpld.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
+index 4e0b7c2882ce..6d41c3db8a2b 100644
+--- a/drivers/i2c/busses/i2c-mlxcpld.c
++++ b/drivers/i2c/busses/i2c-mlxcpld.c
+@@ -495,7 +495,7 @@ mlxcpld_i2c_set_frequency(struct mlxcpld_i2c_priv *priv,
+ return err;
+
+ /* Set frequency only if it is not 100KHz, which is default. */
+- switch ((data->reg & data->mask) >> data->bit) {
++ switch ((regval & data->mask) >> data->bit) {
+ case MLXCPLD_I2C_FREQ_1000KHZ:
+ freq = MLXCPLD_I2C_FREQ_1000KHZ_SET;
+ break;
+--
+2.33.0
+
--- /dev/null
+From 86273c267ee8779b8ebf5a28174a2e2839c82d4e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Aug 2021 17:45:02 +0300
+Subject: i2c: mlxcpld: Modify register setting for 400KHz frequency
+
+From: Vadim Pasternak <vadimp@nvidia.com>
+
+[ Upstream commit fa1049135c15b4930ce7ea757a81b1b78908f304 ]
+
+Change setting for 400KHz frequency support by more accurate value.
+
+Fixes: 66b0c2846ba8 ("i2c: mlxcpld: Add support for I2C bus frequency setting")
+Signed-off-by: Vadim Pasternak <vadimp@nvidia.com>
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/busses/i2c-mlxcpld.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
+index 6d41c3db8a2b..015e11c4663f 100644
+--- a/drivers/i2c/busses/i2c-mlxcpld.c
++++ b/drivers/i2c/busses/i2c-mlxcpld.c
+@@ -49,7 +49,7 @@
+ #define MLXCPLD_LPCI2C_NACK_IND 2
+
+ #define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04
+-#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0f
++#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0c
+ #define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42
+
+ enum mlxcpld_i2c_frequency {
+--
+2.33.0
+
--- /dev/null
+From 9644e387c94b261560f2b26be8dea74f42786b6d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 22 Aug 2021 18:50:36 -0400
+Subject: objtool: Make .altinstructions section entry size consistent
+
+From: Joe Lawrence <joe.lawrence@redhat.com>
+
+[ Upstream commit dc02368164bd0ec603e3f5b3dd8252744a667b8a ]
+
+Commit e31694e0a7a7 ("objtool: Don't make .altinstructions writable")
+aligned objtool-created and kernel-created .altinstructions section
+flags, but there remains a minor discrepency in their use of a section
+entry size: objtool sets one while the kernel build does not.
+
+While sh_entsize of sizeof(struct alt_instr) seems intuitive, this small
+deviation can cause failures with external tooling (kpatch-build).
+
+Fix this by creating new .altinstructions sections with sh_entsize of 0
+and then later updating sec->sh_size as alternatives are added to the
+section. An added benefit is avoiding the data descriptor and buffer
+created by elf_create_section(), but previously unused by
+elf_add_alternative().
+
+Fixes: 9bc0bb50727c ("objtool/x86: Rewrite retpoline thunk calls")
+Signed-off-by: Joe Lawrence <joe.lawrence@redhat.com>
+Reviewed-by: Miroslav Benes <mbenes@suse.cz>
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lore.kernel.org/r/20210822225037.54620-2-joe.lawrence@redhat.com
+Cc: Andy Lavr <andy.lavr@gmail.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: x86@kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/objtool/arch/x86/decode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
+index bc821056aba9..0893436cc09f 100644
+--- a/tools/objtool/arch/x86/decode.c
++++ b/tools/objtool/arch/x86/decode.c
+@@ -684,7 +684,7 @@ static int elf_add_alternative(struct elf *elf,
+ sec = find_section_by_name(elf, ".altinstructions");
+ if (!sec) {
+ sec = elf_create_section(elf, ".altinstructions",
+- SHF_ALLOC, size, 0);
++ SHF_ALLOC, 0, 0);
+
+ if (!sec) {
+ WARN_ELF("elf_create_section");
+--
+2.33.0
+
--- /dev/null
+From 10a369b4fa108f3d2993259e62a0f4d00f7a9256 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Oct 2021 10:07:50 -0700
+Subject: objtool: Remove reloc symbol type checks in get_alt_entry()
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+[ Upstream commit 4d8b35968bbf9e42b6b202eedb510e2c82ad8b38 ]
+
+Converting a special section's relocation reference to a symbol is
+straightforward. No need for objtool to complain that it doesn't know
+how to handle it. Just handle it.
+
+This fixes the following warning:
+
+ arch/x86/kvm/emulate.o: warning: objtool: __ex_table+0x4: don't know how to handle reloc symbol type: kvm_fastop_exception
+
+Fixes: 24ff65257375 ("objtool: Teach get_alt_entry() about more relocation types")
+Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lore.kernel.org/r/feadbc3dfb3440d973580fad8d3db873cbfe1694.1633367242.git.jpoimboe@redhat.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: x86@kernel.org
+Cc: Miroslav Benes <mbenes@suse.cz>
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/objtool/special.c | 36 +++++++-----------------------------
+ 1 file changed, 7 insertions(+), 29 deletions(-)
+
+diff --git a/tools/objtool/special.c b/tools/objtool/special.c
+index f1428e32a505..83d5f969bcb0 100644
+--- a/tools/objtool/special.c
++++ b/tools/objtool/special.c
+@@ -58,22 +58,11 @@ void __weak arch_handle_alternative(unsigned short feature, struct special_alt *
+ {
+ }
+
+-static bool reloc2sec_off(struct reloc *reloc, struct section **sec, unsigned long *off)
++static void reloc_to_sec_off(struct reloc *reloc, struct section **sec,
++ unsigned long *off)
+ {
+- switch (reloc->sym->type) {
+- case STT_FUNC:
+- *sec = reloc->sym->sec;
+- *off = reloc->sym->offset + reloc->addend;
+- return true;
+-
+- case STT_SECTION:
+- *sec = reloc->sym->sec;
+- *off = reloc->addend;
+- return true;
+-
+- default:
+- return false;
+- }
++ *sec = reloc->sym->sec;
++ *off = reloc->sym->offset + reloc->addend;
+ }
+
+ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
+@@ -109,13 +98,8 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
+ WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
+ return -1;
+ }
+- if (!reloc2sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off)) {
+- WARN_FUNC("don't know how to handle reloc symbol type %d: %s",
+- sec, offset + entry->orig,
+- orig_reloc->sym->type,
+- orig_reloc->sym->name);
+- return -1;
+- }
++
++ reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
+
+ if (!entry->group || alt->new_len) {
+ new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
+@@ -133,13 +117,7 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
+ if (arch_is_retpoline(new_reloc->sym))
+ return 1;
+
+- if (!reloc2sec_off(new_reloc, &alt->new_sec, &alt->new_off)) {
+- WARN_FUNC("don't know how to handle reloc symbol type %d: %s",
+- sec, offset + entry->new,
+- new_reloc->sym->type,
+- new_reloc->sym->name);
+- return -1;
+- }
++ reloc_to_sec_off(new_reloc, &alt->new_sec, &alt->new_off);
+
+ /* _ASM_EXTABLE_EX hack */
+ if (alt->new_off >= 0x7ffffff0)
+--
+2.33.0
+
--- /dev/null
+From 673807cf4980bfaefbf329d69064d884e5344bfd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Sep 2021 16:12:24 +0200
+Subject: powerpc/32s: Fix kuap_kernel_restore()
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+[ Upstream commit d93f9e23744b7bf11a98b2ddb091d129482ae179 ]
+
+At interrupt exit, kuap_kernel_restore() calls kuap_unlock() with the
+value contained in regs->kuap. However, when regs->kuap contains
+0xffffffff it means that KUAP was not unlocked so calling kuap_unlock()
+is unrelevant and results in jeopardising the contents of kernel space
+segment registers.
+
+So check that regs->kuap doesn't contain KUAP_NONE before calling
+kuap_unlock(). In the meantime it also means that if KUAP has not
+been correcly locked back at interrupt exit, it must be locked
+before continuing. This is done by checking the content of
+current->thread.kuap which was returned by kuap_get_and_assert_locked()
+
+Fixes: 16132529cee5 ("powerpc/32s: Rework Kernel Userspace Access Protection")
+Reported-by: Stan Johnson <userm57@yahoo.com>
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/0d0c4d0f050a637052287c09ba521bad960a2790.1631715131.git.christophe.leroy@csgroup.eu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/book3s/32/kup.h | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
+index d4b145b279f6..9f38040f0641 100644
+--- a/arch/powerpc/include/asm/book3s/32/kup.h
++++ b/arch/powerpc/include/asm/book3s/32/kup.h
+@@ -136,6 +136,14 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+ if (kuap_is_disabled())
+ return;
+
++ if (unlikely(kuap != KUAP_NONE)) {
++ current->thread.kuap = KUAP_NONE;
++ kuap_lock(kuap, false);
++ }
++
++ if (likely(regs->kuap == KUAP_NONE))
++ return;
++
+ current->thread.kuap = regs->kuap;
+
+ kuap_unlock(regs->kuap, false);
+--
+2.33.0
+
--- /dev/null
+From 8c123d2df12aba76ee58f2a25960e45d3cb997e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Oct 2021 00:56:38 +1000
+Subject: powerpc/64s: fix program check interrupt emergency stack path
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+[ Upstream commit 3e607dc4df180b72a38e75030cb0f94d12808712 ]
+
+Emergency stack path was jumping into a 3: label inside the
+__GEN_COMMON_BODY macro for the normal path after it had finished,
+rather than jumping over it. By a small miracle this is the correct
+place to build up a new interrupt frame with the existing stack
+pointer, so things basically worked okay with an added weird looking
+700 trap frame on top (which had the wrong ->nip so it didn't decode
+bug messages either).
+
+Fix this by avoiding using numeric labels when jumping over non-trivial
+macros.
+
+Before:
+
+ LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=2048 NUMA PowerNV
+ Modules linked in:
+ CPU: 0 PID: 88 Comm: sh Not tainted 5.15.0-rc2-00034-ge057cdade6e5 #2637
+ NIP: 7265677368657265 LR: c00000000006c0c8 CTR: c0000000000097f0
+ REGS: c0000000fffb3a50 TRAP: 0700 Not tainted
+ MSR: 9000000000021031 <SF,HV,ME,IR,DR,LE> CR: 00000700 XER: 20040000
+ CFAR: c0000000000098b0 IRQMASK: 0
+ GPR00: c00000000006c964 c0000000fffb3cf0 c000000001513800 0000000000000000
+ GPR04: 0000000048ab0778 0000000042000000 0000000000000000 0000000000001299
+ GPR08: 000001e447c718ec 0000000022424282 0000000000002710 c00000000006bee8
+ GPR12: 9000000000009033 c0000000016b0000 00000000000000b0 0000000000000001
+ GPR16: 0000000000000000 0000000000000002 0000000000000000 0000000000000ff8
+ GPR20: 0000000000001fff 0000000000000007 0000000000000080 00007fff89d90158
+ GPR24: 0000000002000000 0000000002000000 0000000000000255 0000000000000300
+ GPR28: c000000001270000 0000000042000000 0000000048ab0778 c000000080647e80
+ NIP [7265677368657265] 0x7265677368657265
+ LR [c00000000006c0c8] ___do_page_fault+0x3f8/0xb10
+ Call Trace:
+ [c0000000fffb3cf0] [c00000000000bdac] soft_nmi_common+0x13c/0x1d0 (unreliable)
+ --- interrupt: 700 at decrementer_common_virt+0xb8/0x230
+ NIP: c0000000000098b8 LR: c00000000006c0c8 CTR: c0000000000097f0
+ REGS: c0000000fffb3d60 TRAP: 0700 Not tainted
+ MSR: 9000000000021031 <SF,HV,ME,IR,DR,LE> CR: 22424282 XER: 20040000
+ CFAR: c0000000000098b0 IRQMASK: 0
+ GPR00: c00000000006c964 0000000000002400 c000000001513800 0000000000000000
+ GPR04: 0000000048ab0778 0000000042000000 0000000000000000 0000000000001299
+ GPR08: 000001e447c718ec 0000000022424282 0000000000002710 c00000000006bee8
+ GPR12: 9000000000009033 c0000000016b0000 00000000000000b0 0000000000000001
+ GPR16: 0000000000000000 0000000000000002 0000000000000000 0000000000000ff8
+ GPR20: 0000000000001fff 0000000000000007 0000000000000080 00007fff89d90158
+ GPR24: 0000000002000000 0000000002000000 0000000000000255 0000000000000300
+ GPR28: c000000001270000 0000000042000000 0000000048ab0778 c000000080647e80
+ NIP [c0000000000098b8] decrementer_common_virt+0xb8/0x230
+ LR [c00000000006c0c8] ___do_page_fault+0x3f8/0xb10
+ --- interrupt: 700
+ Instruction dump:
+ XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
+ XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX
+ ---[ end trace 6d28218e0cc3c949 ]---
+
+After:
+
+ ------------[ cut here ]------------
+ kernel BUG at arch/powerpc/kernel/exceptions-64s.S:491!
+ Oops: Exception in kernel mode, sig: 5 [#1]
+ LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=2048 NUMA PowerNV
+ Modules linked in:
+ CPU: 0 PID: 88 Comm: login Not tainted 5.15.0-rc2-00034-ge057cdade6e5-dirty #2638
+ NIP: c0000000000098b8 LR: c00000000006bf04 CTR: c0000000000097f0
+ REGS: c0000000fffb3d60 TRAP: 0700 Not tainted
+ MSR: 9000000000021031 <SF,HV,ME,IR,DR,LE> CR: 24482227 XER: 00040000
+ CFAR: c0000000000098b0 IRQMASK: 0
+ GPR00: c00000000006bf04 0000000000002400 c000000001513800 c000000001271868
+ GPR04: 00000000100f0d29 0000000042000000 0000000000000007 0000000000000009
+ GPR08: 00000000100f0d29 0000000024482227 0000000000002710 c000000000181b3c
+ GPR12: 9000000000009033 c0000000016b0000 00000000100f0d29 c000000005b22f00
+ GPR16: 00000000ffff0000 0000000000000001 0000000000000009 00000000100eed90
+ GPR20: 00000000100eed90 0000000010000000 000000001000a49c 00000000100f1430
+ GPR24: c000000001271868 0000000002000000 0000000000000215 0000000000000300
+ GPR28: c000000001271800 0000000042000000 00000000100f0d29 c000000080647860
+ NIP [c0000000000098b8] decrementer_common_virt+0xb8/0x230
+ LR [c00000000006bf04] ___do_page_fault+0x234/0xb10
+ Call Trace:
+ Instruction dump:
+ 4182000c 39400001 48000008 894d0932 714a0001 39400008 408225fc 718a4000
+ 7c2a0b78 3821fcf0 41c20008 e82d0910 <0981fcf0> f92101a0 f9610170 f9810178
+ ---[ end trace a5dbd1f5ea4ccc51 ]---
+
+Fixes: 0a882e28468f4 ("powerpc/64s/exception: remove bad stack branch")
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20211004145642.1331214-2-npiggin@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/exceptions-64s.S | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 37859e62a8dc..024d9231f88c 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1665,27 +1665,30 @@ EXC_COMMON_BEGIN(program_check_common)
+ */
+
+ andi. r10,r12,MSR_PR
+- bne 2f /* If userspace, go normal path */
++ bne .Lnormal_stack /* If userspace, go normal path */
+
+ andis. r10,r12,(SRR1_PROGTM)@h
+- bne 1f /* If TM, emergency */
++ bne .Lemergency_stack /* If TM, emergency */
+
+ cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */
+- blt 2f /* normal path if not */
++ blt .Lnormal_stack /* normal path if not */
+
+ /* Use the emergency stack */
+-1: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
++.Lemergency_stack:
++ andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
+ /* 3 in EXCEPTION_PROLOG_COMMON */
+ mr r10,r1 /* Save r1 */
+ ld r1,PACAEMERGSP(r13) /* Use emergency stack */
+ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
+ __ISTACK(program_check)=0
+ __GEN_COMMON_BODY program_check
+- b 3f
+-2:
++ b .Ldo_program_check
++
++.Lnormal_stack:
+ __ISTACK(program_check)=1
+ __GEN_COMMON_BODY program_check
+-3:
++
++.Ldo_program_check:
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl program_check_exception
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+--
+2.33.0
+
--- /dev/null
+From 90635952e820a26ddf53d6315562c6d00597136f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Oct 2021 00:56:42 +1000
+Subject: powerpc/64s: Fix unrecoverable MCE calling async handler from NMI
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+[ Upstream commit f08fb25bc66986b0952724530a640d9970fa52c1 ]
+
+The machine check handler is not considered NMI on 64s. The early
+handler is the true NMI handler, and then it schedules the
+machine_check_exception handler to run when interrupts are enabled.
+
+This works fine except the case of an unrecoverable MCE, where the true
+NMI is taken when MSR[RI] is clear, it can not recover, so it calls
+machine_check_exception directly so something might be done about it.
+
+Calling an async handler from NMI context can result in irq state and
+other things getting corrupted. This can also trigger the BUG at
+ arch/powerpc/include/asm/interrupt.h:168
+ BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
+
+Fix this by making an _async version of the handler which is called
+in the normal case, and a NMI version that is called for unrecoverable
+interrupts.
+
+Fixes: 2b43dd7653cc ("powerpc/64: enable MSR[EE] in irq replay pt_regs")
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Tested-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20211004145642.1331214-6-npiggin@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/interrupt.h | 5 ++---
+ arch/powerpc/kernel/exceptions-64s.S | 8 +++++--
+ arch/powerpc/kernel/traps.c | 31 ++++++++++++++++------------
+ 3 files changed, 26 insertions(+), 18 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
+index 6b800d3e2681..a925dbc5833c 100644
+--- a/arch/powerpc/include/asm/interrupt.h
++++ b/arch/powerpc/include/asm/interrupt.h
+@@ -525,10 +525,9 @@ static __always_inline long ____##func(struct pt_regs *regs)
+ /* kernel/traps.c */
+ DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
+ #ifdef CONFIG_PPC_BOOK3S_64
+-DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
+-#else
+-DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
++DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
+ #endif
++DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
+ DECLARE_INTERRUPT_HANDLER(SMIException);
+ DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
+ DECLARE_INTERRUPT_HANDLER(unknown_exception);
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 024d9231f88c..eaf1f72131a1 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1243,7 +1243,7 @@ EXC_COMMON_BEGIN(machine_check_common)
+ li r10,MSR_RI
+ mtmsrd r10,1
+ addi r3,r1,STACK_FRAME_OVERHEAD
+- bl machine_check_exception
++ bl machine_check_exception_async
+ b interrupt_return_srr
+
+
+@@ -1303,7 +1303,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+ subi r12,r12,1
+ sth r12,PACA_IN_MCE(r13)
+
+- /* Invoke machine_check_exception to print MCE event and panic. */
++ /*
++ * Invoke machine_check_exception to print MCE event and panic.
++ * This is the NMI version of the handler because we are called from
++ * the early handler which is a true NMI.
++ */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl machine_check_exception
+
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 4ac85ab15ad7..08356ec9bfed 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -797,24 +797,22 @@ void die_mce(const char *str, struct pt_regs *regs, long err)
+ * do_exit() checks for in_interrupt() and panics in that case, so
+ * exit the irq/nmi before calling die.
+ */
+- if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+- irq_exit();
+- else
++ if (in_nmi())
+ nmi_exit();
++ else
++ irq_exit();
+ die(str, regs, err);
+ }
+
+ /*
+- * BOOK3S_64 does not call this handler as a non-maskable interrupt
++ * BOOK3S_64 does not usually call this handler as a non-maskable interrupt
+ * (it uses its own early real-mode handler to handle the MCE proper
+ * and then raises irq_work to call this handler when interrupts are
+- * enabled).
++ * enabled). The only time when this is not true is if the early handler
++ * is unrecoverable, then it does call this directly to try to get a
++ * message out.
+ */
+-#ifdef CONFIG_PPC_BOOK3S_64
+-DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception)
+-#else
+-DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
+-#endif
++static void __machine_check_exception(struct pt_regs *regs)
+ {
+ int recover = 0;
+
+@@ -848,12 +846,19 @@ DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
+ /* Must die if the interrupt is not recoverable */
+ if (!(regs->msr & MSR_RI))
+ die_mce("Unrecoverable Machine check", regs, SIGBUS);
++}
+
+ #ifdef CONFIG_PPC_BOOK3S_64
+- return;
+-#else
+- return 0;
++DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
++{
++ __machine_check_exception(regs);
++}
+ #endif
++DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
++{
++ __machine_check_exception(regs);
++
++ return 0;
+ }
+
+ DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
+--
+2.33.0
+
--- /dev/null
+From 721b118867eff1b051d7aebd30e29aa159d8ed48 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Oct 2021 01:55:22 +0530
+Subject: powerpc/bpf: Fix BPF_MOD when imm == 1
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+[ Upstream commit 8bbc9d822421d9ac8ff9ed26a3713c9afc69d6c8 ]
+
+Only ignore the operation if dividing by 1.
+
+Fixes: 156d0e290e969c ("powerpc/ebpf/jit: Implement JIT compiler for extended BPF")
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Tested-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
+Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Acked-by: Song Liu <songliubraving@fb.com>
+Acked-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/c674ca18c3046885602caebb326213731c675d06.1633464148.git.naveen.n.rao@linux.vnet.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/net/bpf_jit_comp64.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
+index b87a63dba9c8..d16b97179646 100644
+--- a/arch/powerpc/net/bpf_jit_comp64.c
++++ b/arch/powerpc/net/bpf_jit_comp64.c
+@@ -389,8 +389,14 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
+ case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
+ if (imm == 0)
+ return -EINVAL;
+- else if (imm == 1)
+- goto bpf_alu32_trunc;
++ if (imm == 1) {
++ if (BPF_OP(code) == BPF_DIV) {
++ goto bpf_alu32_trunc;
++ } else {
++ EMIT(PPC_RAW_LI(dst_reg, 0));
++ break;
++ }
++ }
+
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ switch (BPF_CLASS(code)) {
+--
+2.33.0
+
--- /dev/null
+From 7aa347e2ec9b8546c933581f98c3dd4cd67fc545 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Oct 2021 01:55:23 +0530
+Subject: powerpc/bpf: Fix BPF_SUB when imm == 0x80000000
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+[ Upstream commit 5855c4c1f415ca3ba1046e77c0b3d3dfc96c9025 ]
+
+We aren't handling subtraction involving an immediate value of
+0x80000000 properly. Fix the same.
+
+Fixes: 156d0e290e969c ("powerpc/ebpf/jit: Implement JIT compiler for extended BPF")
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+[mpe: Fold in fix from Naveen to use imm <= 32768]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/fc4b1276eb10761fd7ce0814c8dd089da2815251.1633464148.git.naveen.n.rao@linux.vnet.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/net/bpf_jit_comp64.c | 27 +++++++++++++++++----------
+ 1 file changed, 17 insertions(+), 10 deletions(-)
+
+diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
+index d16b97179646..dff4a2930970 100644
+--- a/arch/powerpc/net/bpf_jit_comp64.c
++++ b/arch/powerpc/net/bpf_jit_comp64.c
+@@ -328,18 +328,25 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
+- case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
+ case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
++ if (!imm) {
++ goto bpf_alu32_trunc;
++ } else if (imm >= -32768 && imm < 32768) {
++ EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
++ } else {
++ PPC_LI32(b2p[TMP_REG_1], imm);
++ EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
++ }
++ goto bpf_alu32_trunc;
++ case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
+ case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
+- if (BPF_OP(code) == BPF_SUB)
+- imm = -imm;
+- if (imm) {
+- if (imm >= -32768 && imm < 32768)
+- EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
+- else {
+- PPC_LI32(b2p[TMP_REG_1], imm);
+- EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
+- }
++ if (!imm) {
++ goto bpf_alu32_trunc;
++ } else if (imm > -32768 && imm <= 32768) {
++ EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
++ } else {
++ PPC_LI32(b2p[TMP_REG_1], imm);
++ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
+--
+2.33.0
+
--- /dev/null
+From 28a03134b59e9a776ebd53162fdb562f723f7a87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Oct 2021 01:55:28 +0530
+Subject: powerpc/bpf ppc32: Do not emit zero extend instruction for 64-bit
+ BPF_END
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+[ Upstream commit 48164fccdff6d5cc11308126c050bd25a329df25 ]
+
+Suppress emitting zero extend instruction for 64-bit BPF_END_FROM_[L|B]E
+operation.
+
+Fixes: 51c66ad849a703 ("powerpc/bpf: Implement extended BPF on PPC32")
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/b4e3c3546121315a8e2059b19a1bda84971816e4.1633464148.git.naveen.n.rao@linux.vnet.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/net/bpf_jit_comp32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
+index ae3a31cb7b7e..c48de048c8ce 100644
+--- a/arch/powerpc/net/bpf_jit_comp32.c
++++ b/arch/powerpc/net/bpf_jit_comp32.c
+@@ -1103,7 +1103,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
+ return -EOPNOTSUPP;
+ }
+ if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
+- !insn_is_zext(&insn[i + 1]))
++ !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ }
+
+--
+2.33.0
+
--- /dev/null
+From 991f3e1b4f60262d30cb2dd792669cd311a5d043 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Oct 2021 01:55:26 +0530
+Subject: powerpc/bpf ppc32: Fix ALU32 BPF_ARSH operation
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+[ Upstream commit c9b8da77f22d28348d1f89a6c4d3fec102e9b1c4 ]
+
+Correct the destination register used for ALU32 BPF_ARSH operation.
+
+Fixes: 51c66ad849a703 ("powerpc/bpf: Implement extended BPF on PPC32")
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/6d24c1f9e79b6f61f5135eaf2ea1e8bcd4dac87b.1633464148.git.naveen.n.rao@linux.vnet.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/net/bpf_jit_comp32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
+index beb12cbc8c29..faef4a1598fd 100644
+--- a/arch/powerpc/net/bpf_jit_comp32.c
++++ b/arch/powerpc/net/bpf_jit_comp32.c
+@@ -623,7 +623,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ break;
+ case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
+- EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg, src_reg));
++ EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
+ bpf_set_seen_register(ctx, tmp_reg);
+--
+2.33.0
+
--- /dev/null
+From 82e3935490e27117aad613222f792125159071f0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Oct 2021 01:55:29 +0530
+Subject: powerpc/bpf ppc32: Fix BPF_SUB when imm == 0x80000000
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+[ Upstream commit 548b762763b885b81850db676258df47c55dd5f9 ]
+
+Special case handling of the smallest 32-bit negative number for BPF_SUB.
+
+Fixes: 51c66ad849a703 ("powerpc/bpf: Implement extended BPF on PPC32")
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/7135360a0cdf70adedbccf9863128b8daef18764.1633464148.git.naveen.n.rao@linux.vnet.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/net/bpf_jit_comp32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
+index c48de048c8ce..a7759aa8043d 100644
+--- a/arch/powerpc/net/bpf_jit_comp32.c
++++ b/arch/powerpc/net/bpf_jit_comp32.c
+@@ -355,7 +355,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0));
+ }
+- if (imm >= 0)
++ if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
+ EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h));
+ else
+ EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h));
+--
+2.33.0
+
--- /dev/null
+From ee2104c12f7bffb8511e40ac8984040b5b557a8b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Oct 2021 01:55:27 +0530
+Subject: powerpc/bpf ppc32: Fix JMP32_JSET_K
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+[ Upstream commit e8278d44443207bb6609c7b064073f353e6f4978 ]
+
+'andi' only takes an unsigned 16-bit value. Correct the imm range used
+when emitting andi.
+
+Fixes: 51c66ad849a703 ("powerpc/bpf: Implement extended BPF on PPC32")
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/b94489f52831305ec15aca4dd04a3527236be7e8.1633464148.git.naveen.n.rao@linux.vnet.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/net/bpf_jit_comp32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
+index faef4a1598fd..ae3a31cb7b7e 100644
+--- a/arch/powerpc/net/bpf_jit_comp32.c
++++ b/arch/powerpc/net/bpf_jit_comp32.c
+@@ -1073,7 +1073,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
+ break;
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ /* andi does not sign-extend the immediate */
+- if (imm >= -32768 && imm < 32768) {
++ if (imm >= 0 && imm < 32768) {
+ /* PPC_ANDI is _only/always_ dot-form */
+ EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
+ } else {
+--
+2.33.0
+
--- /dev/null
+From 71aa45882996b41b2cfeb6a82192f968aa2f1acd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Sep 2021 13:44:54 +1000
+Subject: powerpc/iommu: Report the correct most efficient DMA mask for PCI
+ devices
+
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+
+[ Upstream commit 23c216b335d1fbd716076e8263b54a714ea3cf0e ]
+
+According to dma-api.rst, the dma_get_required_mask() helper should return
+"the mask that the platform requires to operate efficiently". Which in
+the case of PPC64 means the bypass mask and not a mask from an IOMMU table
+which is shorter and slower to use due to map/unmap operations (especially
+expensive on "pseries").
+
+However the existing implementation ignores the possibility of bypassing
+and returns the IOMMU table mask on the pseries platform which makes some
+drivers (mpt3sas is one example) choose 32bit DMA even though bypass is
+supported. The powernv platform sort of handles it by having a bigger
+default window with a mask >=40 but it only works as drivers choose
+63/64bit if the required mask is >32 which is rather pointless.
+
+This reintroduces the bypass capability check to let drivers make
+a better choice of the DMA mask.
+
+Fixes: f1565c24b596 ("powerpc: use the generic dma_ops_bypass mode")
+Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20210930034454.95794-1-aik@ozlabs.ru
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/dma-iommu.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
+index 111249fd619d..038ce8d9061d 100644
+--- a/arch/powerpc/kernel/dma-iommu.c
++++ b/arch/powerpc/kernel/dma-iommu.c
+@@ -184,6 +184,15 @@ u64 dma_iommu_get_required_mask(struct device *dev)
+ struct iommu_table *tbl = get_iommu_table_base(dev);
+ u64 mask;
+
++ if (dev_is_pci(dev)) {
++ u64 bypass_mask = dma_direct_get_required_mask(dev);
++
++ if (dma_iommu_dma_supported(dev, bypass_mask)) {
++ dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
++ return bypass_mask;
++ }
++ }
++
+ if (!tbl)
+ return 0;
+
+--
+2.33.0
+
--- /dev/null
+From 9d8f64c8b2a91cd43d0f6d0c35a3540c04d47868 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Oct 2021 00:56:39 +1000
+Subject: powerpc/traps: do not enable irqs in _exception
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+[ Upstream commit d0afd44c05f8f4e4c91487c02d43c87a31552462 ]
+
+_exception can be called by machine check handlers when the MCE hits
+user code (e.g., pseries and powernv). This will enable local irqs
+because, which is a dicey thing to do in NMI or hard irq context.
+
+This seemed to worked out okay because a userspace MCE can basically be
+treated like a synchronous interrupt (after async / imprecise MCEs are
+filtered out). Since NMI and hard irq handlers have started growing
+nmi_enter / irq_enter, and more irq state sanity checks, this has
+started to cause problems (or at least trigger warnings).
+
+The Fixes tag to the commit which introduced this rather than try to
+work out exactly which commit was the first that could possibly cause a
+problem because that may be difficult to prove.
+
+Fixes: 9f2f79e3a3c1 ("powerpc: Disable interrupts in 64-bit kernel FP and vector faults")
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20211004145642.1331214-3-npiggin@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/traps.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index d56254f05e17..4ac85ab15ad7 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -341,10 +341,16 @@ static bool exception_common(int signr, struct pt_regs *regs, int code,
+ return false;
+ }
+
+- show_signal_msg(signr, regs, code, addr);
++ /*
++ * Must not enable interrupts even for user-mode exception, because
++ * this can be called from machine check, which may be a NMI or IRQ
++ * which don't like interrupts being enabled. Could check for
++ * in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
++ * reason why _exception() should enable irqs for an exception handler,
++ * the handlers themselves do that directly.
++ */
+
+- if (arch_irqs_disabled())
+- interrupt_cond_local_irq_enable(regs);
++ show_signal_msg(signr, regs, code, addr);
+
+ current->thread.trap_nr = code;
+
+--
+2.33.0
+
--- /dev/null
+From a96c2a3b3e8d8ae2df1e1828f8add9729b68bb08 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Sep 2021 22:03:26 +0530
+Subject: pseries/eeh: Fix the kdump kernel crash during eeh_pseries_init
+
+From: Mahesh Salgaonkar <mahesh@linux.ibm.com>
+
+[ Upstream commit eb8257a12192f43ffd41bd90932c39dade958042 ]
+
+On pseries LPAR when an empty slot is assigned to partition OR in single
+LPAR mode, kdump kernel crashes during issuing PHB reset.
+
+In the kdump scenario, we traverse all PHBs and issue reset using the
+pe_config_addr of the first child device present under each PHB. However
+the code assumes that none of the PHB slots can be empty and uses
+list_first_entry() to get the first child device under the PHB. Since
+list_first_entry() expects the list to be non-empty, it returns an
+invalid pci_dn entry and ends up accessing NULL phb pointer under
+pci_dn->phb causing kdump kernel crash.
+
+This patch fixes the below kdump kernel crash by skipping empty slots:
+
+ audit: initializing netlink subsys (disabled)
+ thermal_sys: Registered thermal governor 'fair_share'
+ thermal_sys: Registered thermal governor 'step_wise'
+ cpuidle: using governor menu
+ pstore: Registered nvram as persistent store backend
+ Issue PHB reset ...
+ audit: type=2000 audit(1631267818.000:1): state=initialized audit_enabled=0 res=1
+ BUG: Kernel NULL pointer dereference on read at 0x00000268
+ Faulting instruction address: 0xc000000008101fb0
+ Oops: Kernel access of bad area, sig: 7 [#1]
+ LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=2048 NUMA pSeries
+ Modules linked in:
+ CPU: 7 PID: 1 Comm: swapper/7 Not tainted 5.14.0 #1
+ NIP: c000000008101fb0 LR: c000000009284ccc CTR: c000000008029d70
+ REGS: c00000001161b840 TRAP: 0300 Not tainted (5.14.0)
+ MSR: 8000000002009033 <SF,VEC,EE,ME,IR,DR,RI,LE> CR: 28000224 XER: 20040002
+ CFAR: c000000008101f0c DAR: 0000000000000268 DSISR: 00080000 IRQMASK: 0
+ ...
+ NIP pseries_eeh_get_pe_config_addr+0x100/0x1b0
+ LR __machine_initcall_pseries_eeh_pseries_init+0x2cc/0x350
+ Call Trace:
+ 0xc00000001161bb80 (unreliable)
+ __machine_initcall_pseries_eeh_pseries_init+0x2cc/0x350
+ do_one_initcall+0x60/0x2d0
+ kernel_init_freeable+0x350/0x3f8
+ kernel_init+0x3c/0x17c
+ ret_from_kernel_thread+0x5c/0x64
+
+Fixes: 5a090f7c363fd ("powerpc/pseries: PCIE PHB reset")
+Signed-off-by: Mahesh Salgaonkar <mahesh@linux.ibm.com>
+[mpe: Tweak wording and trim oops]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/163215558252.413351.8600189949820258982.stgit@jupiter
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/platforms/pseries/eeh_pseries.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
+index bc15200852b7..09fafcf2d3a0 100644
+--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
+@@ -867,6 +867,10 @@ static int __init eeh_pseries_init(void)
+ if (is_kdump_kernel() || reset_devices) {
+ pr_info("Issue PHB reset ...\n");
+ list_for_each_entry(phb, &hose_list, list_node) {
++ // Skip if the slot is empty
++ if (list_empty(&PCI_DN(phb->dn)->child_list))
++ continue;
++
+ pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list);
+ config_addr = pseries_eeh_get_pe_config_addr(pdn);
+
+--
+2.33.0
+
--- /dev/null
+From bd218da2e057f703733b20e0fc6aac634859d12c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Aug 2021 22:52:45 -0700
+Subject: RISC-V: Fix VDSO build for !MMU
+
+From: Palmer Dabbelt <palmerdabbelt@google.com>
+
+[ Upstream commit a290f510a178830a01bfc06e66a54bbe4ece5d2a ]
+
+We don't have a VDSO for the !MMU configurations, so don't try to build
+one.
+
+Fixes: fde9c59aebaf ("riscv: explicitly use symbol offsets for VDSO")
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/Makefile | 2 ++
+ arch/riscv/include/asm/vdso.h | 9 +++++++++
+ 2 files changed, 11 insertions(+)
+
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index e026b2d0a5a4..83ee0e71204c 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -108,9 +108,11 @@ PHONY += vdso_install
+ vdso_install:
+ $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
+
++ifeq ($(CONFIG_MMU),y)
+ prepare: vdso_prepare
+ vdso_prepare: prepare0
+ $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h
++endif
+
+ ifneq ($(CONFIG_XIP_KERNEL),y)
+ ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy)
+diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
+index d8d003c2b5a3..893e47195e30 100644
+--- a/arch/riscv/include/asm/vdso.h
++++ b/arch/riscv/include/asm/vdso.h
+@@ -8,6 +8,13 @@
+ #ifndef _ASM_RISCV_VDSO_H
+ #define _ASM_RISCV_VDSO_H
+
++
++/*
++ * All systems with an MMU have a VDSO, but systems without an MMU don't
++ * support shared libraries and therefor don't have one.
++ */
++#ifdef CONFIG_MMU
++
+ #include <linux/types.h>
+ #include <generated/vdso-offsets.h>
+
+@@ -19,6 +26,8 @@ struct vdso_data {
+ #define VDSO_SYMBOL(base, name) \
+ (void __user *)((unsigned long)(base) + __vdso_##name##_offset)
+
++#endif /* CONFIG_MMU */
++
+ asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+
+ #endif /* _ASM_RISCV_VDSO_H */
+--
+2.33.0
+
--- /dev/null
+From fd6fee7bd3fe88dcb9c73421f46b7b477406a2fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 2 Oct 2021 17:21:20 -0700
+Subject: RISC-V: Include clone3() on rv32
+
+From: Palmer Dabbelt <palmerdabbelt@google.com>
+
+[ Upstream commit 59a4e0d5511ba61353ea9a4efdb1b86c23ecf134 ]
+
+As far as I can tell this should be enabled on rv32 as well, I'm not
+sure why it's rv64-only. checksyscalls is complaining about our lack of
+clone3() on rv32.
+
+Fixes: 56ac5e213933 ("riscv: enable sys_clone3 syscall for rv64")
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/include/uapi/asm/unistd.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h
+index 4b989ae15d59..8062996c2dfd 100644
+--- a/arch/riscv/include/uapi/asm/unistd.h
++++ b/arch/riscv/include/uapi/asm/unistd.h
+@@ -18,9 +18,10 @@
+ #ifdef __LP64__
+ #define __ARCH_WANT_NEW_STAT
+ #define __ARCH_WANT_SET_GET_RLIMIT
+-#define __ARCH_WANT_SYS_CLONE3
+ #endif /* __LP64__ */
+
++#define __ARCH_WANT_SYS_CLONE3
++
+ #include <asm-generic/unistd.h>
+
+ /*
+--
+2.33.0
+
--- /dev/null
+From 3348445183d4025768a5a5f243071406f11c1444 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Aug 2021 17:32:14 +0000
+Subject: riscv: explicitly use symbol offsets for VDSO
+
+From: Saleem Abdulrasool <abdulras@google.com>
+
+[ Upstream commit fde9c59aebafb91caeed816cc510b56f14aa63ae ]
+
+The current implementation of the `__rt_sigaction` reference computed an
+absolute offset relative to the mapped base of the VDSO. While this can
+be handled in the medlow model, the medany model cannot handle this as
+it is meant to be position independent. The current implementation
+relied on the BFD linker relaxing the PC-relative relocation into an
+absolute relocation as it was a near-zero address allowing it to be
+referenced relative to `zero`.
+
+We now extract the offsets and create a generated header allowing the
+build with LLVM and lld to succeed as we no longer depend on the linker
+rewriting address references near zero. This change was largely
+modelled after the ARM64 target which does something similar.
+
+Signed-off-by: Saleem Abdulrasool <abdulras@google.com>
+Tested-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/Makefile | 4 ++++
+ arch/riscv/include/asm/vdso.h | 14 ++----------
+ arch/riscv/kernel/vdso/Makefile | 25 ++++++++++------------
+ arch/riscv/kernel/vdso/gen_vdso_offsets.sh | 5 +++++
+ arch/riscv/kernel/vdso/so2s.sh | 6 ------
+ 5 files changed, 22 insertions(+), 32 deletions(-)
+ create mode 100755 arch/riscv/kernel/vdso/gen_vdso_offsets.sh
+ delete mode 100755 arch/riscv/kernel/vdso/so2s.sh
+
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index bc74afdbf31e..e026b2d0a5a4 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -108,6 +108,10 @@ PHONY += vdso_install
+ vdso_install:
+ $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
+
++prepare: vdso_prepare
++vdso_prepare: prepare0
++ $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h
++
+ ifneq ($(CONFIG_XIP_KERNEL),y)
+ ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy)
+ KBUILD_IMAGE := $(boot)/loader.bin
+diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
+index 1453a2f563bc..d8d003c2b5a3 100644
+--- a/arch/riscv/include/asm/vdso.h
++++ b/arch/riscv/include/asm/vdso.h
+@@ -9,25 +9,15 @@
+ #define _ASM_RISCV_VDSO_H
+
+ #include <linux/types.h>
++#include <generated/vdso-offsets.h>
+
+ #ifndef CONFIG_GENERIC_TIME_VSYSCALL
+ struct vdso_data {
+ };
+ #endif
+
+-/*
+- * The VDSO symbols are mapped into Linux so we can just use regular symbol
+- * addressing to get their offsets in userspace. The symbols are mapped at an
+- * offset of 0, but since the linker must support setting weak undefined
+- * symbols to the absolute address 0 it also happens to support other low
+- * addresses even when the code model suggests those low addresses would not
+- * otherwise be availiable.
+- */
+ #define VDSO_SYMBOL(base, name) \
+-({ \
+- extern const char __vdso_##name[]; \
+- (void __user *)((unsigned long)(base) + __vdso_##name); \
+-})
++ (void __user *)((unsigned long)(base) + __vdso_##name##_offset)
+
+ asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
+index 24d936c147cd..f8cb9144a284 100644
+--- a/arch/riscv/kernel/vdso/Makefile
++++ b/arch/riscv/kernel/vdso/Makefile
+@@ -23,10 +23,10 @@ ifneq ($(c-gettimeofday-y),)
+ endif
+
+ # Build rules
+-targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S
++targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds
+ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+
+-obj-y += vdso.o vdso-syms.o
++obj-y += vdso.o
+ CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+ # Disable -pg to prevent insert call site
+@@ -43,20 +43,22 @@ $(obj)/vdso.o: $(obj)/vdso.so
+ # link rule for the .so file, .lds has to be first
+ $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold)
+-LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \
++LDFLAGS_vdso.so.dbg = -shared -S -soname=linux-vdso.so.1 \
+ --build-id=sha1 --hash-style=both --eh-frame-hdr
+
+-# We also create a special relocatable object that should mirror the symbol
+-# table and layout of the linked DSO. With ld --just-symbols we can then
+-# refer to these symbols in the kernel code rather than hand-coded addresses.
+-$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE
+- $(call if_changed,so2s)
+-
+ # strip rule for the .so file
+ $(obj)/%.so: OBJCOPYFLAGS := -S
+ $(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
++# Generate VDSO offsets using helper script
++gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
++quiet_cmd_vdsosym = VDSOSYM $@
++ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
++
++include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
++ $(call if_changed,vdsosym)
++
+ # actual build commands
+ # The DSO images are built using a special linker script
+ # Make sure only to export the intended __vdso_xxx symbol offsets.
+@@ -65,11 +67,6 @@ quiet_cmd_vdsold = VDSOLD $@
+ $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
+ rm $@.tmp
+
+-# Extracts symbol offsets from the VDSO, converting them into an assembly file
+-# that contains the same symbols at the same offsets.
+-quiet_cmd_so2s = SO2S $@
+- cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@
+-
+ # install commands for the unstripped file
+ quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+diff --git a/arch/riscv/kernel/vdso/gen_vdso_offsets.sh b/arch/riscv/kernel/vdso/gen_vdso_offsets.sh
+new file mode 100755
+index 000000000000..c2e5613f3495
+--- /dev/null
++++ b/arch/riscv/kernel/vdso/gen_vdso_offsets.sh
+@@ -0,0 +1,5 @@
++#!/bin/sh
++# SPDX-License-Identifier: GPL-2.0
++
++LC_ALL=C
++sed -n -e 's/^[0]\+\(0[0-9a-fA-F]*\) . \(__vdso_[a-zA-Z0-9_]*\)$/\#define \2_offset\t0x\1/p'
+diff --git a/arch/riscv/kernel/vdso/so2s.sh b/arch/riscv/kernel/vdso/so2s.sh
+deleted file mode 100755
+index e64cb6d9440e..000000000000
+--- a/arch/riscv/kernel/vdso/so2s.sh
++++ /dev/null
+@@ -1,6 +0,0 @@
+-#!/bin/sh
+-# SPDX-License-Identifier: GPL-2.0+
+-# Copyright 2020 Palmer Dabbelt <palmerdabbelt@google.com>
+-
+-sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_4.15\)*!.global \2\n.set \2,0x\1!' \
+-| grep '^\.'
+--
+2.33.0
+
--- /dev/null
+From 0872ecce7cf6c72c4122df507b3206a69681accd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Sep 2021 02:46:21 +0000
+Subject: riscv/vdso: make arch_setup_additional_pages wait for mmap_sem for
+ write killable
+
+From: Tong Tiangen <tongtiangen@huawei.com>
+
+[ Upstream commit 8bb0ab3ae7a4dbe6cf32deb830cf2bdbf5736867 ]
+
+riscv architectures relying on mmap_sem for write in their
+arch_setup_additional_pages. If the waiting task gets killed by the oom
+killer it would block oom_reaper from asynchronous address space reclaim
+and reduce the chances of timely OOM resolving. Wait for the lock in
+the killable mode and return with EINTR if the task got killed while
+waiting.
+
+Signed-off-by: Tong Tiangen <tongtiangen@huawei.com>
+Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Fixes: 76d2a0493a17 ("RISC-V: Init and Halt Code")
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/vdso.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
+index e7bd92d8749b..b70956d80408 100644
+--- a/arch/riscv/kernel/vdso.c
++++ b/arch/riscv/kernel/vdso.c
+@@ -77,7 +77,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
+
+ vdso_len = (vdso_pages + VVAR_NR_PAGES) << PAGE_SHIFT;
+
+- mmap_write_lock(mm);
++ if (mmap_write_lock_killable(mm))
++ return -EINTR;
++
+ vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
+ if (IS_ERR_VALUE(vdso_base)) {
+ ret = vdso_base;
+--
+2.33.0
+
--- /dev/null
+From 3d03c554529b382d2765b4249df97f6ebaf4110b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Sep 2021 02:46:20 +0000
+Subject: riscv/vdso: Move vdso data page up front
+
+From: Tong Tiangen <tongtiangen@huawei.com>
+
+[ Upstream commit 78a743cd82a35ca0724179fc22834f06a2151fc2 ]
+
+As commit 601255ae3c98 ("arm64: vdso: move data page before code pages"), the
+same issue exists on riscv, testcase is shown below, make sure that vdso.so is
+bigger than page size,
+
+ struct timespec tp;
+ clock_gettime(5, &tp);
+ printf("tv_sec: %ld, tv_nsec: %ld\n", tp.tv_sec, tp.tv_nsec);
+
+without this patch, test result : tv_sec: 0, tv_nsec: 0
+ with this patch, test result : tv_sec: 1629271537, tv_nsec: 748000000
+
+Move the vdso data page in front of the VDSO area to fix the issue.
+
+Fixes: ad5d1122b82fb ("riscv: use vDSO common flow to reduce the latency of the time-related functions")
+Signed-off-by: Tong Tiangen <tongtiangen@huawei.com>
+Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/include/asm/vdso.h | 2 ++
+ arch/riscv/kernel/vdso.c | 44 ++++++++++++++++++-------------
+ arch/riscv/kernel/vdso/vdso.lds.S | 3 ++-
+ 3 files changed, 30 insertions(+), 19 deletions(-)
+
+diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
+index a4a979c89ea0..208e31bc5d1c 100644
+--- a/arch/riscv/include/asm/vdso.h
++++ b/arch/riscv/include/asm/vdso.h
+@@ -22,6 +22,8 @@
+ */
+ #ifdef CONFIG_MMU
+
++#define __VVAR_PAGES 1
++
+ #ifndef __ASSEMBLY__
+ #include <generated/vdso-offsets.h>
+
+diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
+index 72e93d218335..e7bd92d8749b 100644
+--- a/arch/riscv/kernel/vdso.c
++++ b/arch/riscv/kernel/vdso.c
+@@ -23,6 +23,13 @@ struct vdso_data {
+
+ extern char vdso_start[], vdso_end[];
+
++enum vvar_pages {
++ VVAR_DATA_PAGE_OFFSET,
++ VVAR_NR_PAGES,
++};
++
++#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
++
+ static unsigned int vdso_pages __ro_after_init;
+ static struct page **vdso_pagelist __ro_after_init;
+
+@@ -41,7 +48,7 @@ static int __init vdso_init(void)
+
+ vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
+ vdso_pagelist =
+- kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
++ kcalloc(vdso_pages + VVAR_NR_PAGES, sizeof(struct page *), GFP_KERNEL);
+ if (unlikely(vdso_pagelist == NULL)) {
+ pr_err("vdso: pagelist allocation failed\n");
+ return -ENOMEM;
+@@ -66,7 +73,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
+ unsigned long vdso_base, vdso_len;
+ int ret;
+
+- vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
++ BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
++
++ vdso_len = (vdso_pages + VVAR_NR_PAGES) << PAGE_SHIFT;
+
+ mmap_write_lock(mm);
+ vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
+@@ -75,29 +84,28 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
+ goto end;
+ }
+
+- /*
+- * Put vDSO base into mm struct. We need to do this before calling
+- * install_special_mapping or the perf counter mmap tracking code
+- * will fail to recognise it as a vDSO (since arch_vma_name fails).
+- */
+- mm->context.vdso = (void *)vdso_base;
++ mm->context.vdso = NULL;
++ ret = install_special_mapping(mm, vdso_base, VVAR_SIZE,
++ (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
++ if (unlikely(ret))
++ goto end;
+
+ ret =
+- install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
++ install_special_mapping(mm, vdso_base + VVAR_SIZE,
++ vdso_pages << PAGE_SHIFT,
+ (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
+ vdso_pagelist);
+
+- if (unlikely(ret)) {
+- mm->context.vdso = NULL;
++ if (unlikely(ret))
+ goto end;
+- }
+
+- vdso_base += (vdso_pages << PAGE_SHIFT);
+- ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+- (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
++ /*
++ * Put vDSO base into mm struct. We need to do this before calling
++ * install_special_mapping or the perf counter mmap tracking code
++ * will fail to recognise it as a vDSO (since arch_vma_name fails).
++ */
++ mm->context.vdso = (void *)vdso_base + VVAR_SIZE;
+
+- if (unlikely(ret))
+- mm->context.vdso = NULL;
+ end:
+ mmap_write_unlock(mm);
+ return ret;
+@@ -108,7 +116,7 @@ const char *arch_vma_name(struct vm_area_struct *vma)
+ if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
+ return "[vdso]";
+ if (vma->vm_mm && (vma->vm_start ==
+- (long)vma->vm_mm->context.vdso + PAGE_SIZE))
++ (long)vma->vm_mm->context.vdso - VVAR_SIZE))
+ return "[vdso_data]";
+ return NULL;
+ }
+diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
+index e6f558bca71b..e9111f700af0 100644
+--- a/arch/riscv/kernel/vdso/vdso.lds.S
++++ b/arch/riscv/kernel/vdso/vdso.lds.S
+@@ -3,12 +3,13 @@
+ * Copyright (C) 2012 Regents of the University of California
+ */
+ #include <asm/page.h>
++#include <asm/vdso.h>
+
+ OUTPUT_ARCH(riscv)
+
+ SECTIONS
+ {
+- PROVIDE(_vdso_data = . + PAGE_SIZE);
++ PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
+ . = SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+--
+2.33.0
+
--- /dev/null
+From 02b5267f9ac179c2e44faa11d4b670de44d0a2e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Sep 2021 02:46:19 +0000
+Subject: riscv/vdso: Refactor asm/vdso.h
+
+From: Tong Tiangen <tongtiangen@huawei.com>
+
+[ Upstream commit bb4a23c994aebcd96c567a0be8e964d516bd4a61 ]
+
+The asm/vdso.h will be included in vdso.lds.S in the next patch, the
+following cleanup is needed to avoid syntax error:
+
+ 1.the declaration of sys_riscv_flush_icache() is moved into asm/syscall.h.
+ 2.the definition of struct vdso_data is moved into kernel/vdso.c.
+ 2.the definition of VDSO_SYMBOL is placed under "#ifndef __ASSEMBLY__".
+
+Also remove the redundant linux/types.h include.
+
+Signed-off-by: Tong Tiangen <tongtiangen@huawei.com>
+Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/include/asm/syscall.h | 1 +
+ arch/riscv/include/asm/vdso.h | 16 ++++++++++------
+ arch/riscv/kernel/syscall_table.c | 1 -
+ arch/riscv/kernel/vdso.c | 5 ++++-
+ 4 files changed, 15 insertions(+), 8 deletions(-)
+
+diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
+index b933b1583c9f..34fbb3ea21d5 100644
+--- a/arch/riscv/include/asm/syscall.h
++++ b/arch/riscv/include/asm/syscall.h
+@@ -82,4 +82,5 @@ static inline int syscall_get_arch(struct task_struct *task)
+ #endif
+ }
+
++asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+ #endif /* _ASM_RISCV_SYSCALL_H */
+diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
+index 893e47195e30..a4a979c89ea0 100644
+--- a/arch/riscv/include/asm/vdso.h
++++ b/arch/riscv/include/asm/vdso.h
+@@ -16,18 +16,22 @@
+ #ifdef CONFIG_MMU
+
+ #include <linux/types.h>
+-#include <generated/vdso-offsets.h>
++/*
++ * All systems with an MMU have a VDSO, but systems without an MMU don't
++ * support shared libraries and therefor don't have one.
++ */
++#ifdef CONFIG_MMU
+
+-#ifndef CONFIG_GENERIC_TIME_VSYSCALL
+-struct vdso_data {
+-};
+-#endif
++#ifndef __ASSEMBLY__
++#include <generated/vdso-offsets.h>
+
+ #define VDSO_SYMBOL(base, name) \
+ (void __user *)((unsigned long)(base) + __vdso_##name##_offset)
+
+ #endif /* CONFIG_MMU */
+
+-asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
++#endif /* !__ASSEMBLY__ */
++
++#endif /* CONFIG_MMU */
+
+ #endif /* _ASM_RISCV_VDSO_H */
+diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c
+index a63c667c27b3..44b1420a2270 100644
+--- a/arch/riscv/kernel/syscall_table.c
++++ b/arch/riscv/kernel/syscall_table.c
+@@ -7,7 +7,6 @@
+ #include <linux/linkage.h>
+ #include <linux/syscalls.h>
+ #include <asm-generic/syscalls.h>
+-#include <asm/vdso.h>
+ #include <asm/syscall.h>
+
+ #undef __SYSCALL
+diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
+index 25a3b8849599..72e93d218335 100644
+--- a/arch/riscv/kernel/vdso.c
++++ b/arch/riscv/kernel/vdso.c
+@@ -12,10 +12,13 @@
+ #include <linux/binfmts.h>
+ #include <linux/err.h>
+ #include <asm/page.h>
++#include <asm/vdso.h>
++
+ #ifdef CONFIG_GENERIC_TIME_VSYSCALL
+ #include <vdso/datapage.h>
+ #else
+-#include <asm/vdso.h>
++struct vdso_data {
++};
+ #endif
+
+ extern char vdso_start[], vdso_end[];
+--
+2.33.0
+
--- /dev/null
+From b4acaa8e2a1a8b7dd174b447065a7f190908c8df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Oct 2021 13:38:51 +0300
+Subject: scsi: elx: efct: Delete stray unlock statement
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit a013c71c6315d6e9d6364d12251b98c75c9a2861 ]
+
+It's not holding the lock at this stage and the IRQ "flags" are not correct
+so it would restore something bogus. Delete the unlock statement.
+
+Link: https://lore.kernel.org/r/20211004103851.GE25015@kili
+Fixes: 3e6414003bf9 ("scsi: elx: efct: SCSI I/O handling routines")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/elx/efct/efct_scsi.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/elx/efct/efct_scsi.c b/drivers/scsi/elx/efct/efct_scsi.c
+index 40fb3a724c76..cf2e41dd354c 100644
+--- a/drivers/scsi/elx/efct/efct_scsi.c
++++ b/drivers/scsi/elx/efct/efct_scsi.c
+@@ -32,7 +32,7 @@ efct_scsi_io_alloc(struct efct_node *node)
+ struct efct *efct;
+ struct efct_xport *xport;
+ struct efct_io *io;
+- unsigned long flags = 0;
++ unsigned long flags;
+
+ efct = node->efct;
+
+@@ -44,7 +44,6 @@ efct_scsi_io_alloc(struct efct_node *node)
+ if (!io) {
+ efc_log_err(efct, "IO alloc Failed\n");
+ atomic_add_return(1, &xport->io_alloc_failed_count);
+- spin_unlock_irqrestore(&node->active_ios_lock, flags);
+ return NULL;
+ }
+
+--
+2.33.0
+
--- /dev/null
+From 79326481f8cc247a09d2aeeb652df34872b350f4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Oct 2021 16:06:08 -0500
+Subject: scsi: iscsi: Fix iscsi_task use after free
+
+From: Mike Christie <michael.christie@oracle.com>
+
+[ Upstream commit 258aad75c62146453d03028a44f2f1590d58e1f6 ]
+
+Commit d39df158518c ("scsi: iscsi: Have abort handler get ref to conn")
+added iscsi_get_conn()/iscsi_put_conn() calls during abort handling but
+then also changed the handling of the case where we detect an already
+completed task where we now end up doing a goto to the common put/cleanup
+code. This results in a iscsi_task use after free, because the common
+cleanup code will do a put on the iscsi_task.
+
+This reverts the goto and moves the iscsi_get_conn() to after we've checked
+if the iscsi_task is valid.
+
+Link: https://lore.kernel.org/r/20211004210608.9962-1-michael.christie@oracle.com
+Fixes: d39df158518c ("scsi: iscsi: Have abort handler get ref to conn")
+Signed-off-by: Mike Christie <michael.christie@oracle.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/libiscsi.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 4683c183e9d4..5bc91d34df63 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -2281,11 +2281,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ return FAILED;
+ }
+
+- conn = session->leadconn;
+- iscsi_get_conn(conn->cls_conn);
+- conn->eh_abort_cnt++;
+- age = session->age;
+-
+ spin_lock(&session->back_lock);
+ task = (struct iscsi_task *)sc->SCp.ptr;
+ if (!task || !task->sc) {
+@@ -2293,8 +2288,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
+
+ spin_unlock(&session->back_lock);
+- goto success;
++ spin_unlock_bh(&session->frwd_lock);
++ mutex_unlock(&session->eh_mutex);
++ return SUCCESS;
+ }
++
++ conn = session->leadconn;
++ iscsi_get_conn(conn->cls_conn);
++ conn->eh_abort_cnt++;
++ age = session->age;
++
+ ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
+ __iscsi_get_task(task);
+ spin_unlock(&session->back_lock);
+--
+2.33.0
+
i40e-fix-freeing-of-uninitialized-misc-irq-vector.patch
iavf-fix-double-unlock-of-crit_lock.patch
net-prefer-socket-bound-to-interface-when-not-in-vrf.patch
+powerpc-iommu-report-the-correct-most-efficient-dma-.patch
+i2c-acpi-fix-resource-leak-in-reconfiguration-device.patch
+i2c-mediatek-add-offset_ext_conf-setting-back.patch
+riscv-explicitly-use-symbol-offsets-for-vdso.patch
+risc-v-fix-vdso-build-for-mmu.patch
+riscv-vdso-refactor-asm-vdso.h.patch
+riscv-vdso-move-vdso-data-page-up-front.patch
+riscv-vdso-make-arch_setup_additional_pages-wait-for.patch
+bpf-s390-fix-potential-memory-leak-about-jit_data.patch
+i2c-mlxcpld-fix-criteria-for-frequency-setting.patch
+i2c-mlxcpld-modify-register-setting-for-400khz-frequ.patch
+risc-v-include-clone3-on-rv32.patch
+scsi-elx-efct-delete-stray-unlock-statement.patch
+scsi-iscsi-fix-iscsi_task-use-after-free.patch
+objtool-remove-reloc-symbol-type-checks-in-get_alt_e.patch
+objtool-make-.altinstructions-section-entry-size-con.patch
+powerpc-bpf-fix-bpf_mod-when-imm-1.patch
+powerpc-bpf-fix-bpf_sub-when-imm-0x80000000.patch
+powerpc-bpf-ppc32-fix-alu32-bpf_arsh-operation.patch
+powerpc-bpf-ppc32-fix-jmp32_jset_k.patch
+powerpc-bpf-ppc32-do-not-emit-zero-extend-instructio.patch
+powerpc-bpf-ppc32-fix-bpf_sub-when-imm-0x80000000.patch
+powerpc-64s-fix-program-check-interrupt-emergency-st.patch
+powerpc-traps-do-not-enable-irqs-in-_exception.patch
+powerpc-64s-fix-unrecoverable-mce-calling-async-hand.patch
+powerpc-32s-fix-kuap_kernel_restore.patch
+pseries-eeh-fix-the-kdump-kernel-crash-during-eeh_ps.patch