]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 16 Oct 2023 08:02:22 +0000 (10:02 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 16 Oct 2023 08:02:22 +0000 (10:02 +0200)
added patches:
arm64-allow-kprobes-on-el0-handlers.patch
arm64-armv8_deprecated-fix-unused-function-error.patch
arm64-armv8_deprecated-fold-ops-into-insn_emulation.patch
arm64-armv8_deprecated-move-aarch32-helper-earlier.patch
arm64-armv8_deprecated-move-emulation-functions.patch
arm64-armv8_deprecated-rework-deprected-instruction-handling.patch
arm64-consistently-pass-esr_elx-to-die.patch
arm64-die-pass-err-as-long.patch
arm64-factor-insn-read-out-of-call_undef_hook.patch
arm64-factor-out-el1-ssbs-emulation-hook.patch
arm64-report-el1-undefs-better.patch
arm64-rework-bti-exception-handling.patch
arm64-rework-el0-mrs-emulation.patch
arm64-rework-fpac-exception-handling.patch
arm64-split-el0-el1-undef-handlers.patch

16 files changed:
queue-5.10/arm64-allow-kprobes-on-el0-handlers.patch [new file with mode: 0644]
queue-5.10/arm64-armv8_deprecated-fix-unused-function-error.patch [new file with mode: 0644]
queue-5.10/arm64-armv8_deprecated-fold-ops-into-insn_emulation.patch [new file with mode: 0644]
queue-5.10/arm64-armv8_deprecated-move-aarch32-helper-earlier.patch [new file with mode: 0644]
queue-5.10/arm64-armv8_deprecated-move-emulation-functions.patch [new file with mode: 0644]
queue-5.10/arm64-armv8_deprecated-rework-deprected-instruction-handling.patch [new file with mode: 0644]
queue-5.10/arm64-consistently-pass-esr_elx-to-die.patch [new file with mode: 0644]
queue-5.10/arm64-die-pass-err-as-long.patch [new file with mode: 0644]
queue-5.10/arm64-factor-insn-read-out-of-call_undef_hook.patch [new file with mode: 0644]
queue-5.10/arm64-factor-out-el1-ssbs-emulation-hook.patch [new file with mode: 0644]
queue-5.10/arm64-report-el1-undefs-better.patch [new file with mode: 0644]
queue-5.10/arm64-rework-bti-exception-handling.patch [new file with mode: 0644]
queue-5.10/arm64-rework-el0-mrs-emulation.patch [new file with mode: 0644]
queue-5.10/arm64-rework-fpac-exception-handling.patch [new file with mode: 0644]
queue-5.10/arm64-split-el0-el1-undef-handlers.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/arm64-allow-kprobes-on-el0-handlers.patch b/queue-5.10/arm64-allow-kprobes-on-el0-handlers.patch
new file mode 100644 (file)
index 0000000..5692cd0
--- /dev/null
@@ -0,0 +1,104 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:07:04 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:36 +0000
+Subject: arm64: allow kprobes on EL0 handlers
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-7-ruanjinjie@huawei.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit b3a0c010e900a9f89dcd99f10bd8f7538d21b0a9 upstream.
+
+Currently do_sysinstr() and do_cp15instr() are marked with
+NOKPROBE_SYMBOL(). However, these are only called for exceptions taken
+from EL0, and there is no risk of recursion in kprobes, so this is not
+necessary.
+
+Remove the NOKPROBE_SYMBOL() annotation, and rename the two functions to
+more clearly indicate that these are solely for exceptions taken from
+EL0, better matching the names used by the lower level entry points in
+entry-common.c.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Joey Gouly <joey.gouly@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20221019144123.612388-2-mark.rutland@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/exception.h |    4 ++--
+ arch/arm64/kernel/entry-common.c   |    4 ++--
+ arch/arm64/kernel/traps.c          |    6 ++----
+ 3 files changed, 6 insertions(+), 8 deletions(-)
+
+--- a/arch/arm64/include/asm/exception.h
++++ b/arch/arm64/include/asm/exception.h
+@@ -42,10 +42,10 @@ void do_debug_exception(unsigned long ad
+ void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs);
+ void do_sve_acc(unsigned int esr, struct pt_regs *regs);
+ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs);
+-void do_sysinstr(unsigned int esr, struct pt_regs *regs);
++void do_el0_sys(unsigned long esr, struct pt_regs *regs);
+ void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
+ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
+-void do_cp15instr(unsigned int esr, struct pt_regs *regs);
++void do_el0_cp15(unsigned long esr, struct pt_regs *regs);
+ void do_el0_svc(struct pt_regs *regs);
+ void do_el0_svc_compat(struct pt_regs *regs);
+ void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
+--- a/arch/arm64/kernel/entry-common.c
++++ b/arch/arm64/kernel/entry-common.c
+@@ -306,7 +306,7 @@ static void noinstr el0_sys(struct pt_re
+ {
+       enter_from_user_mode();
+       local_daif_restore(DAIF_PROCCTX);
+-      do_sysinstr(esr, regs);
++      do_el0_sys(esr, regs);
+ }
+ static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
+@@ -430,7 +430,7 @@ static void noinstr el0_cp15(struct pt_r
+ {
+       enter_from_user_mode();
+       local_daif_restore(DAIF_PROCCTX);
+-      do_cp15instr(esr, regs);
++      do_el0_cp15(esr, regs);
+ }
+ static void noinstr el0_svc_compat(struct pt_regs *regs)
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -650,7 +650,7 @@ static const struct sys64_hook cp15_64_h
+       {},
+ };
+-void do_cp15instr(unsigned int esr, struct pt_regs *regs)
++void do_el0_cp15(unsigned long esr, struct pt_regs *regs)
+ {
+       const struct sys64_hook *hook, *hook_base;
+@@ -688,10 +688,9 @@ void do_cp15instr(unsigned int esr, stru
+        */
+       do_undefinstr(regs, esr);
+ }
+-NOKPROBE_SYMBOL(do_cp15instr);
+ #endif
+-void do_sysinstr(unsigned int esr, struct pt_regs *regs)
++void do_el0_sys(unsigned long esr, struct pt_regs *regs)
+ {
+       const struct sys64_hook *hook;
+@@ -708,7 +707,6 @@ void do_sysinstr(unsigned int esr, struc
+        */
+       do_undefinstr(regs, esr);
+ }
+-NOKPROBE_SYMBOL(do_sysinstr);
+ static const char *esr_class_str[] = {
+       [0 ... ESR_ELx_EC_MAX]          = "UNRECOGNIZED EC",
diff --git a/queue-5.10/arm64-armv8_deprecated-fix-unused-function-error.patch b/queue-5.10/arm64-armv8_deprecated-fix-unused-function-error.patch
new file mode 100644 (file)
index 0000000..c48df4c
--- /dev/null
@@ -0,0 +1,45 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:06:43 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:45 +0000
+Subject: arm64: armv8_deprecated: fix unused-function error
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-16-ruanjinjie@huawei.com>
+
+From: Ren Zhijie <renzhijie2@huawei.com>
+
+commit 223d3a0d30b6e9f979f5642e430e1753d3e29f89 upstream.
+
+If CONFIG_SWP_EMULATION is not set and
+CONFIG_CP15_BARRIER_EMULATION is not set,
+aarch64-linux-gnu complained about unused-function :
+
+arch/arm64/kernel/armv8_deprecated.c:67:21: error: ‘aarch32_check_condition’ defined but not used [-Werror=unused-function]
+ static unsigned int aarch32_check_condition(u32 opcode, u32 psr)
+                     ^~~~~~~~~~~~~~~~~~~~~~~
+cc1: all warnings being treated as errors
+
+To fix this warning, modify aarch32_check_condition() with __maybe_unused.
+
+Fixes: 0c5f416219da ("arm64: armv8_deprecated: move aarch32 helper earlier")
+Signed-off-by: Ren Zhijie <renzhijie2@huawei.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Link: https://lore.kernel.org/r/20221124022429.19024-1-renzhijie2@huawei.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/armv8_deprecated.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -64,7 +64,7 @@ struct insn_emulation {
+ #define       ARM_OPCODE_CONDITION_UNCOND     0xf
+-static unsigned int aarch32_check_condition(u32 opcode, u32 psr)
++static unsigned int __maybe_unused aarch32_check_condition(u32 opcode, u32 psr)
+ {
+       u32 cc_bits  = opcode >> 28;
diff --git a/queue-5.10/arm64-armv8_deprecated-fold-ops-into-insn_emulation.patch b/queue-5.10/arm64-armv8_deprecated-fold-ops-into-insn_emulation.patch
new file mode 100644 (file)
index 0000000..89ec33b
--- /dev/null
@@ -0,0 +1,241 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:06:39 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:41 +0000
+Subject: arm64: armv8_deprecated: fold ops into insn_emulation
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-12-ruanjinjie@huawei.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit b4453cc8a7ebbd45436a8cd3ffeaa069ceac146f upstream.
+
+The code for emulating deprecated instructions has two related
+structures: struct insn_emulation_ops and struct insn_emulation, where
+each struct insn_emulation_ops is associated 1-1 with a struct
+insn_emulation.
+
+It would be simpler to combine the two into a single structure, removing
+the need for (unconditional) dynamic allocation at boot time, and
+simplifying some runtime pointer chasing.
+
+This patch merges the two structures together.
+
+There should be no functional change as a result of this patch.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Joey Gouly <joey.gouly@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20221019144123.612388-7-mark.rutland@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/armv8_deprecated.c |   76 +++++++++++++++--------------------
+ 1 file changed, 33 insertions(+), 43 deletions(-)
+
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -41,16 +41,12 @@ enum legacy_insn_status {
+       INSN_OBSOLETE,
+ };
+-struct insn_emulation_ops {
+-      const char              *name;
+-      enum legacy_insn_status status;
+-      struct undef_hook       *hooks;
+-      int                     (*set_hw_mode)(bool enable);
+-};
+-
+ struct insn_emulation {
+-      struct list_head node;
+-      struct insn_emulation_ops *ops;
++      const char                      *name;
++      struct list_head                node;
++      enum legacy_insn_status         status;
++      struct undef_hook               *hooks;
++      int                             (*set_hw_mode)(bool enable);
+       int current_mode;
+       int min;
+       int max;
+@@ -61,48 +57,48 @@ static int nr_insn_emulated __initdata;
+ static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
+ static DEFINE_MUTEX(insn_emulation_mutex);
+-static void register_emulation_hooks(struct insn_emulation_ops *ops)
++static void register_emulation_hooks(struct insn_emulation *insn)
+ {
+       struct undef_hook *hook;
+-      BUG_ON(!ops->hooks);
++      BUG_ON(!insn->hooks);
+-      for (hook = ops->hooks; hook->instr_mask; hook++)
++      for (hook = insn->hooks; hook->instr_mask; hook++)
+               register_undef_hook(hook);
+-      pr_notice("Registered %s emulation handler\n", ops->name);
++      pr_notice("Registered %s emulation handler\n", insn->name);
+ }
+-static void remove_emulation_hooks(struct insn_emulation_ops *ops)
++static void remove_emulation_hooks(struct insn_emulation *insn)
+ {
+       struct undef_hook *hook;
+-      BUG_ON(!ops->hooks);
++      BUG_ON(!insn->hooks);
+-      for (hook = ops->hooks; hook->instr_mask; hook++)
++      for (hook = insn->hooks; hook->instr_mask; hook++)
+               unregister_undef_hook(hook);
+-      pr_notice("Removed %s emulation handler\n", ops->name);
++      pr_notice("Removed %s emulation handler\n", insn->name);
+ }
+ static void enable_insn_hw_mode(void *data)
+ {
+       struct insn_emulation *insn = (struct insn_emulation *)data;
+-      if (insn->ops->set_hw_mode)
+-              insn->ops->set_hw_mode(true);
++      if (insn->set_hw_mode)
++              insn->set_hw_mode(true);
+ }
+ static void disable_insn_hw_mode(void *data)
+ {
+       struct insn_emulation *insn = (struct insn_emulation *)data;
+-      if (insn->ops->set_hw_mode)
+-              insn->ops->set_hw_mode(false);
++      if (insn->set_hw_mode)
++              insn->set_hw_mode(false);
+ }
+ /* Run set_hw_mode(mode) on all active CPUs */
+ static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
+ {
+-      if (!insn->ops->set_hw_mode)
++      if (!insn->set_hw_mode)
+               return -EINVAL;
+       if (enable)
+               on_each_cpu(enable_insn_hw_mode, (void *)insn, true);
+@@ -126,9 +122,9 @@ static int run_all_insn_set_hw_mode(unsi
+       raw_spin_lock_irqsave(&insn_emulation_lock, flags);
+       list_for_each_entry(insn, &insn_emulation, node) {
+               bool enable = (insn->current_mode == INSN_HW);
+-              if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
++              if (insn->set_hw_mode && insn->set_hw_mode(enable)) {
+                       pr_warn("CPU[%u] cannot support the emulation of %s",
+-                              cpu, insn->ops->name);
++                              cpu, insn->name);
+                       rc = -EINVAL;
+               }
+       }
+@@ -145,11 +141,11 @@ static int update_insn_emulation_mode(st
+       case INSN_UNDEF: /* Nothing to be done */
+               break;
+       case INSN_EMULATE:
+-              remove_emulation_hooks(insn->ops);
++              remove_emulation_hooks(insn);
+               break;
+       case INSN_HW:
+               if (!run_all_cpu_set_hw_mode(insn, false))
+-                      pr_notice("Disabled %s support\n", insn->ops->name);
++                      pr_notice("Disabled %s support\n", insn->name);
+               break;
+       }
+@@ -157,31 +153,25 @@ static int update_insn_emulation_mode(st
+       case INSN_UNDEF:
+               break;
+       case INSN_EMULATE:
+-              register_emulation_hooks(insn->ops);
++              register_emulation_hooks(insn);
+               break;
+       case INSN_HW:
+               ret = run_all_cpu_set_hw_mode(insn, true);
+               if (!ret)
+-                      pr_notice("Enabled %s support\n", insn->ops->name);
++                      pr_notice("Enabled %s support\n", insn->name);
+               break;
+       }
+       return ret;
+ }
+-static void __init register_insn_emulation(struct insn_emulation_ops *ops)
++static void __init register_insn_emulation(struct insn_emulation *insn)
+ {
+       unsigned long flags;
+-      struct insn_emulation *insn;
+-
+-      insn = kzalloc(sizeof(*insn), GFP_KERNEL);
+-      if (!insn)
+-              return;
+-      insn->ops = ops;
+       insn->min = INSN_UNDEF;
+-      switch (ops->status) {
++      switch (insn->status) {
+       case INSN_DEPRECATED:
+               insn->current_mode = INSN_EMULATE;
+               /* Disable the HW mode if it was turned on at early boot time */
+@@ -247,7 +237,7 @@ static void __init register_insn_emulati
+               sysctl->mode = 0644;
+               sysctl->maxlen = sizeof(int);
+-              sysctl->procname = insn->ops->name;
++              sysctl->procname = insn->name;
+               sysctl->data = &insn->current_mode;
+               sysctl->extra1 = &insn->min;
+               sysctl->extra2 = &insn->max;
+@@ -451,7 +441,7 @@ static struct undef_hook swp_hooks[] = {
+       { }
+ };
+-static struct insn_emulation_ops swp_ops = {
++static struct insn_emulation insn_swp = {
+       .name = "swp",
+       .status = INSN_OBSOLETE,
+       .hooks = swp_hooks,
+@@ -538,7 +528,7 @@ static struct undef_hook cp15_barrier_ho
+       { }
+ };
+-static struct insn_emulation_ops cp15_barrier_ops = {
++static struct insn_emulation insn_cp15_barrier = {
+       .name = "cp15_barrier",
+       .status = INSN_DEPRECATED,
+       .hooks = cp15_barrier_hooks,
+@@ -611,7 +601,7 @@ static struct undef_hook setend_hooks[]
+       {}
+ };
+-static struct insn_emulation_ops setend_ops = {
++static struct insn_emulation insn_setend = {
+       .name = "setend",
+       .status = INSN_DEPRECATED,
+       .hooks = setend_hooks,
+@@ -625,14 +615,14 @@ static struct insn_emulation_ops setend_
+ static int __init armv8_deprecated_init(void)
+ {
+       if (IS_ENABLED(CONFIG_SWP_EMULATION))
+-              register_insn_emulation(&swp_ops);
++              register_insn_emulation(&insn_swp);
+       if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION))
+-              register_insn_emulation(&cp15_barrier_ops);
++              register_insn_emulation(&insn_cp15_barrier);
+       if (IS_ENABLED(CONFIG_SETEND_EMULATION)) {
+               if (system_supports_mixed_endian_el0())
+-                      register_insn_emulation(&setend_ops);
++                      register_insn_emulation(&insn_setend);
+               else
+                       pr_info("setend instruction emulation is not supported on this system\n");
+       }
diff --git a/queue-5.10/arm64-armv8_deprecated-move-aarch32-helper-earlier.patch b/queue-5.10/arm64-armv8_deprecated-move-aarch32-helper-earlier.patch
new file mode 100644 (file)
index 0000000..8ac00ce
--- /dev/null
@@ -0,0 +1,100 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:07:12 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:43 +0000
+Subject: arm64: armv8_deprecated: move aarch32 helper earlier
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-14-ruanjinjie@huawei.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 0c5f416219da3795dc8b33e5bb7865a6b3c4e55c upstream.
+
+Subsequent patches will rework the logic in armv8_deprecated.c.
+
+In preparation for subsequent changes, this patch moves some shared logic
+earlier in the file. This will make subsequent diffs simpler and easier to
+read.
+
+At the same time, drop the `__kprobes` annotation from
+aarch32_check_condition(), as this is only used for traps from compat
+userspace, and has no risk of recursion within kprobes. As this is the
+last kprobes annotation in armve8_deprecated.c, we no longer need to
+include <asm/kprobes.h>.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Joey Gouly <joey.gouly@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20221019144123.612388-9-mark.rutland@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/armv8_deprecated.c |   39 +++++++++++++++++------------------
+ 1 file changed, 19 insertions(+), 20 deletions(-)
+
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -17,7 +17,6 @@
+ #include <asm/sysreg.h>
+ #include <asm/system_misc.h>
+ #include <asm/traps.h>
+-#include <asm/kprobes.h>
+ #define CREATE_TRACE_POINTS
+ #include "trace-events-emulation.h"
+@@ -52,6 +51,25 @@ struct insn_emulation {
+       int max;
+ };
++#define ARM_OPCODE_CONDTEST_FAIL   0
++#define ARM_OPCODE_CONDTEST_PASS   1
++#define ARM_OPCODE_CONDTEST_UNCOND 2
++
++#define       ARM_OPCODE_CONDITION_UNCOND     0xf
++
++static unsigned int aarch32_check_condition(u32 opcode, u32 psr)
++{
++      u32 cc_bits  = opcode >> 28;
++
++      if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
++              if ((*aarch32_opcode_cond_checks[cc_bits])(psr))
++                      return ARM_OPCODE_CONDTEST_PASS;
++              else
++                      return ARM_OPCODE_CONDTEST_FAIL;
++      }
++      return ARM_OPCODE_CONDTEST_UNCOND;
++}
++
+ /*
+  *  Implement emulation of the SWP/SWPB instructions using load-exclusive and
+  *  store-exclusive.
+@@ -138,25 +156,6 @@ static int emulate_swpX(unsigned int add
+       return res;
+ }
+-#define ARM_OPCODE_CONDTEST_FAIL   0
+-#define ARM_OPCODE_CONDTEST_PASS   1
+-#define ARM_OPCODE_CONDTEST_UNCOND 2
+-
+-#define       ARM_OPCODE_CONDITION_UNCOND     0xf
+-
+-static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
+-{
+-      u32 cc_bits  = opcode >> 28;
+-
+-      if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
+-              if ((*aarch32_opcode_cond_checks[cc_bits])(psr))
+-                      return ARM_OPCODE_CONDTEST_PASS;
+-              else
+-                      return ARM_OPCODE_CONDTEST_FAIL;
+-      }
+-      return ARM_OPCODE_CONDTEST_UNCOND;
+-}
+-
+ /*
+  * swp_handler logs the id of calling process, dissects the instruction, sanity
+  * checks the memory location, calls emulate_swpX for the actual operation and
diff --git a/queue-5.10/arm64-armv8_deprecated-move-emulation-functions.patch b/queue-5.10/arm64-armv8_deprecated-move-emulation-functions.patch
new file mode 100644 (file)
index 0000000..1446b68
--- /dev/null
@@ -0,0 +1,445 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:06:40 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:42 +0000
+Subject: arm64: armv8_deprecated move emulation functions
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-13-ruanjinjie@huawei.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 25eeac0cfe7c97ade1be07340e11e7143aab57a6 upstream.
+
+Subsequent patches will rework the logic in armv8_deprecated.c.
+
+In preparation for subsequent changes, this patch moves the emulation
+logic earlier in the file, and moves the infrastructure later in the
+file. This will make subsequent diffs simpler and easier to read.
+
+This is purely a move. There should be no functional change as a result
+of this patch.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Joey Gouly <joey.gouly@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20221019144123.612388-8-mark.rutland@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/armv8_deprecated.c |  394 +++++++++++++++++------------------
+ 1 file changed, 197 insertions(+), 197 deletions(-)
+
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -52,203 +52,6 @@ struct insn_emulation {
+       int max;
+ };
+-static LIST_HEAD(insn_emulation);
+-static int nr_insn_emulated __initdata;
+-static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
+-static DEFINE_MUTEX(insn_emulation_mutex);
+-
+-static void register_emulation_hooks(struct insn_emulation *insn)
+-{
+-      struct undef_hook *hook;
+-
+-      BUG_ON(!insn->hooks);
+-
+-      for (hook = insn->hooks; hook->instr_mask; hook++)
+-              register_undef_hook(hook);
+-
+-      pr_notice("Registered %s emulation handler\n", insn->name);
+-}
+-
+-static void remove_emulation_hooks(struct insn_emulation *insn)
+-{
+-      struct undef_hook *hook;
+-
+-      BUG_ON(!insn->hooks);
+-
+-      for (hook = insn->hooks; hook->instr_mask; hook++)
+-              unregister_undef_hook(hook);
+-
+-      pr_notice("Removed %s emulation handler\n", insn->name);
+-}
+-
+-static void enable_insn_hw_mode(void *data)
+-{
+-      struct insn_emulation *insn = (struct insn_emulation *)data;
+-      if (insn->set_hw_mode)
+-              insn->set_hw_mode(true);
+-}
+-
+-static void disable_insn_hw_mode(void *data)
+-{
+-      struct insn_emulation *insn = (struct insn_emulation *)data;
+-      if (insn->set_hw_mode)
+-              insn->set_hw_mode(false);
+-}
+-
+-/* Run set_hw_mode(mode) on all active CPUs */
+-static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
+-{
+-      if (!insn->set_hw_mode)
+-              return -EINVAL;
+-      if (enable)
+-              on_each_cpu(enable_insn_hw_mode, (void *)insn, true);
+-      else
+-              on_each_cpu(disable_insn_hw_mode, (void *)insn, true);
+-      return 0;
+-}
+-
+-/*
+- * Run set_hw_mode for all insns on a starting CPU.
+- * Returns:
+- *  0                 - If all the hooks ran successfully.
+- * -EINVAL    - At least one hook is not supported by the CPU.
+- */
+-static int run_all_insn_set_hw_mode(unsigned int cpu)
+-{
+-      int rc = 0;
+-      unsigned long flags;
+-      struct insn_emulation *insn;
+-
+-      raw_spin_lock_irqsave(&insn_emulation_lock, flags);
+-      list_for_each_entry(insn, &insn_emulation, node) {
+-              bool enable = (insn->current_mode == INSN_HW);
+-              if (insn->set_hw_mode && insn->set_hw_mode(enable)) {
+-                      pr_warn("CPU[%u] cannot support the emulation of %s",
+-                              cpu, insn->name);
+-                      rc = -EINVAL;
+-              }
+-      }
+-      raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
+-      return rc;
+-}
+-
+-static int update_insn_emulation_mode(struct insn_emulation *insn,
+-                                     enum insn_emulation_mode prev)
+-{
+-      int ret = 0;
+-
+-      switch (prev) {
+-      case INSN_UNDEF: /* Nothing to be done */
+-              break;
+-      case INSN_EMULATE:
+-              remove_emulation_hooks(insn);
+-              break;
+-      case INSN_HW:
+-              if (!run_all_cpu_set_hw_mode(insn, false))
+-                      pr_notice("Disabled %s support\n", insn->name);
+-              break;
+-      }
+-
+-      switch (insn->current_mode) {
+-      case INSN_UNDEF:
+-              break;
+-      case INSN_EMULATE:
+-              register_emulation_hooks(insn);
+-              break;
+-      case INSN_HW:
+-              ret = run_all_cpu_set_hw_mode(insn, true);
+-              if (!ret)
+-                      pr_notice("Enabled %s support\n", insn->name);
+-              break;
+-      }
+-
+-      return ret;
+-}
+-
+-static void __init register_insn_emulation(struct insn_emulation *insn)
+-{
+-      unsigned long flags;
+-
+-      insn->min = INSN_UNDEF;
+-
+-      switch (insn->status) {
+-      case INSN_DEPRECATED:
+-              insn->current_mode = INSN_EMULATE;
+-              /* Disable the HW mode if it was turned on at early boot time */
+-              run_all_cpu_set_hw_mode(insn, false);
+-              insn->max = INSN_HW;
+-              break;
+-      case INSN_OBSOLETE:
+-              insn->current_mode = INSN_UNDEF;
+-              insn->max = INSN_EMULATE;
+-              break;
+-      }
+-
+-      raw_spin_lock_irqsave(&insn_emulation_lock, flags);
+-      list_add(&insn->node, &insn_emulation);
+-      nr_insn_emulated++;
+-      raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
+-
+-      /* Register any handlers if required */
+-      update_insn_emulation_mode(insn, INSN_UNDEF);
+-}
+-
+-static int emulation_proc_handler(struct ctl_table *table, int write,
+-                                void *buffer, size_t *lenp,
+-                                loff_t *ppos)
+-{
+-      int ret = 0;
+-      struct insn_emulation *insn = container_of(table->data, struct insn_emulation, current_mode);
+-      enum insn_emulation_mode prev_mode = insn->current_mode;
+-
+-      mutex_lock(&insn_emulation_mutex);
+-      ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+-
+-      if (ret || !write || prev_mode == insn->current_mode)
+-              goto ret;
+-
+-      ret = update_insn_emulation_mode(insn, prev_mode);
+-      if (ret) {
+-              /* Mode change failed, revert to previous mode. */
+-              insn->current_mode = prev_mode;
+-              update_insn_emulation_mode(insn, INSN_UNDEF);
+-      }
+-ret:
+-      mutex_unlock(&insn_emulation_mutex);
+-      return ret;
+-}
+-
+-static void __init register_insn_emulation_sysctl(void)
+-{
+-      unsigned long flags;
+-      int i = 0;
+-      struct insn_emulation *insn;
+-      struct ctl_table *insns_sysctl, *sysctl;
+-
+-      insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl),
+-                             GFP_KERNEL);
+-      if (!insns_sysctl)
+-              return;
+-
+-      raw_spin_lock_irqsave(&insn_emulation_lock, flags);
+-      list_for_each_entry(insn, &insn_emulation, node) {
+-              sysctl = &insns_sysctl[i];
+-
+-              sysctl->mode = 0644;
+-              sysctl->maxlen = sizeof(int);
+-
+-              sysctl->procname = insn->name;
+-              sysctl->data = &insn->current_mode;
+-              sysctl->extra1 = &insn->min;
+-              sysctl->extra2 = &insn->max;
+-              sysctl->proc_handler = emulation_proc_handler;
+-              i++;
+-      }
+-      raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
+-
+-      register_sysctl("abi", insns_sysctl);
+-}
+-
+ /*
+  *  Implement emulation of the SWP/SWPB instructions using load-exclusive and
+  *  store-exclusive.
+@@ -608,6 +411,203 @@ static struct insn_emulation insn_setend
+       .set_hw_mode = setend_set_hw_mode,
+ };
++static LIST_HEAD(insn_emulation);
++static int nr_insn_emulated __initdata;
++static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
++static DEFINE_MUTEX(insn_emulation_mutex);
++
++static void register_emulation_hooks(struct insn_emulation *insn)
++{
++      struct undef_hook *hook;
++
++      BUG_ON(!insn->hooks);
++
++      for (hook = insn->hooks; hook->instr_mask; hook++)
++              register_undef_hook(hook);
++
++      pr_notice("Registered %s emulation handler\n", insn->name);
++}
++
++static void remove_emulation_hooks(struct insn_emulation *insn)
++{
++      struct undef_hook *hook;
++
++      BUG_ON(!insn->hooks);
++
++      for (hook = insn->hooks; hook->instr_mask; hook++)
++              unregister_undef_hook(hook);
++
++      pr_notice("Removed %s emulation handler\n", insn->name);
++}
++
++static void enable_insn_hw_mode(void *data)
++{
++      struct insn_emulation *insn = (struct insn_emulation *)data;
++      if (insn->set_hw_mode)
++              insn->set_hw_mode(true);
++}
++
++static void disable_insn_hw_mode(void *data)
++{
++      struct insn_emulation *insn = (struct insn_emulation *)data;
++      if (insn->set_hw_mode)
++              insn->set_hw_mode(false);
++}
++
++/* Run set_hw_mode(mode) on all active CPUs */
++static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
++{
++      if (!insn->set_hw_mode)
++              return -EINVAL;
++      if (enable)
++              on_each_cpu(enable_insn_hw_mode, (void *)insn, true);
++      else
++              on_each_cpu(disable_insn_hw_mode, (void *)insn, true);
++      return 0;
++}
++
++/*
++ * Run set_hw_mode for all insns on a starting CPU.
++ * Returns:
++ *  0                 - If all the hooks ran successfully.
++ * -EINVAL    - At least one hook is not supported by the CPU.
++ */
++static int run_all_insn_set_hw_mode(unsigned int cpu)
++{
++      int rc = 0;
++      unsigned long flags;
++      struct insn_emulation *insn;
++
++      raw_spin_lock_irqsave(&insn_emulation_lock, flags);
++      list_for_each_entry(insn, &insn_emulation, node) {
++              bool enable = (insn->current_mode == INSN_HW);
++              if (insn->set_hw_mode && insn->set_hw_mode(enable)) {
++                      pr_warn("CPU[%u] cannot support the emulation of %s",
++                              cpu, insn->name);
++                      rc = -EINVAL;
++              }
++      }
++      raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
++      return rc;
++}
++
++static int update_insn_emulation_mode(struct insn_emulation *insn,
++                                     enum insn_emulation_mode prev)
++{
++      int ret = 0;
++
++      switch (prev) {
++      case INSN_UNDEF: /* Nothing to be done */
++              break;
++      case INSN_EMULATE:
++              remove_emulation_hooks(insn);
++              break;
++      case INSN_HW:
++              if (!run_all_cpu_set_hw_mode(insn, false))
++                      pr_notice("Disabled %s support\n", insn->name);
++              break;
++      }
++
++      switch (insn->current_mode) {
++      case INSN_UNDEF:
++              break;
++      case INSN_EMULATE:
++              register_emulation_hooks(insn);
++              break;
++      case INSN_HW:
++              ret = run_all_cpu_set_hw_mode(insn, true);
++              if (!ret)
++                      pr_notice("Enabled %s support\n", insn->name);
++              break;
++      }
++
++      return ret;
++}
++
++static void __init register_insn_emulation(struct insn_emulation *insn)
++{
++      unsigned long flags;
++
++      insn->min = INSN_UNDEF;
++
++      switch (insn->status) {
++      case INSN_DEPRECATED:
++              insn->current_mode = INSN_EMULATE;
++              /* Disable the HW mode if it was turned on at early boot time */
++              run_all_cpu_set_hw_mode(insn, false);
++              insn->max = INSN_HW;
++              break;
++      case INSN_OBSOLETE:
++              insn->current_mode = INSN_UNDEF;
++              insn->max = INSN_EMULATE;
++              break;
++      }
++
++      raw_spin_lock_irqsave(&insn_emulation_lock, flags);
++      list_add(&insn->node, &insn_emulation);
++      nr_insn_emulated++;
++      raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
++
++      /* Register any handlers if required */
++      update_insn_emulation_mode(insn, INSN_UNDEF);
++}
++
++static int emulation_proc_handler(struct ctl_table *table, int write,
++                                void *buffer, size_t *lenp,
++                                loff_t *ppos)
++{
++      int ret = 0;
++      struct insn_emulation *insn = container_of(table->data, struct insn_emulation, current_mode);
++      enum insn_emulation_mode prev_mode = insn->current_mode;
++
++      mutex_lock(&insn_emulation_mutex);
++      ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
++
++      if (ret || !write || prev_mode == insn->current_mode)
++              goto ret;
++
++      ret = update_insn_emulation_mode(insn, prev_mode);
++      if (ret) {
++              /* Mode change failed, revert to previous mode. */
++              insn->current_mode = prev_mode;
++              update_insn_emulation_mode(insn, INSN_UNDEF);
++      }
++ret:
++      mutex_unlock(&insn_emulation_mutex);
++      return ret;
++}
++
++static void __init register_insn_emulation_sysctl(void)
++{
++      unsigned long flags;
++      int i = 0;
++      struct insn_emulation *insn;
++      struct ctl_table *insns_sysctl, *sysctl;
++
++      insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl),
++                             GFP_KERNEL);
++      if (!insns_sysctl)
++              return;
++
++      raw_spin_lock_irqsave(&insn_emulation_lock, flags);
++      list_for_each_entry(insn, &insn_emulation, node) {
++              sysctl = &insns_sysctl[i];
++
++              sysctl->mode = 0644;
++              sysctl->maxlen = sizeof(int);
++
++              sysctl->procname = insn->name;
++              sysctl->data = &insn->current_mode;
++              sysctl->extra1 = &insn->min;
++              sysctl->extra2 = &insn->max;
++              sysctl->proc_handler = emulation_proc_handler;
++              i++;
++      }
++      raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
++
++      register_sysctl("abi", insns_sysctl);
++}
++
+ /*
+  * Invoked as core_initcall, which guarantees that the instruction
+  * emulation is ready for userspace.
diff --git a/queue-5.10/arm64-armv8_deprecated-rework-deprected-instruction-handling.patch b/queue-5.10/arm64-armv8_deprecated-rework-deprected-instruction-handling.patch
new file mode 100644 (file)
index 0000000..b52b76e
--- /dev/null
@@ -0,0 +1,582 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:06:42 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:44 +0000
+Subject: arm64: armv8_deprecated: rework deprected instruction handling
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-15-ruanjinjie@huawei.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 124c49b1b5d947b7180c5d6cbb09ddf76ea45ea2 upstream.
+
+Support for deprecated instructions can be enabled or disabled at
+runtime. To handle this, the code in armv8_deprecated.c registers and
+unregisters undef_hooks, and makes cross CPU calls to configure HW
+support. This is rather complicated, and the synchronization required to
+make this safe ends up serializing the handling of instructions which
+have been trapped.
+
+This patch simplifies the deprecated instruction handling by removing
+the dynamic registration and unregistration, and changing the trap
+handling code to determine whether a handler should be invoked. This
+removes the need for dynamic list management, and simplifies the locking
+requirements, making it possible to handle trapped instructions entirely
+in parallel.
+
+Where changing the emulation state requires a cross-call, this is
+serialized by locally disabling interrupts, ensuring that the CPU is not
+left in an inconsistent state.
+
+To simplify sysctl management, each insn_emulation is given a separate
+sysctl table, permitting these to be registered separately. The core
+sysctl code will iterate over all of these when walking sysfs.
+
+I've tested this with userspace programs which use each of the
+deprecated instructions, and I've concurrently modified the support
+level for each of the features back-and-forth between HW and emulated to
+check that there are no spurious SIGILLs sent to userspace when the
+support level is changed.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Joey Gouly <joey.gouly@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20221019144123.612388-10-mark.rutland@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/traps.h       |   19 +-
+ arch/arm64/kernel/armv8_deprecated.c |  291 +++++++++++++++++------------------
+ arch/arm64/kernel/traps.c            |   40 ----
+ 3 files changed, 156 insertions(+), 194 deletions(-)
+
+--- a/arch/arm64/include/asm/traps.h
++++ b/arch/arm64/include/asm/traps.h
+@@ -13,17 +13,16 @@
+ struct pt_regs;
+-struct undef_hook {
+-      struct list_head node;
+-      u32 instr_mask;
+-      u32 instr_val;
+-      u64 pstate_mask;
+-      u64 pstate_val;
+-      int (*fn)(struct pt_regs *regs, u32 instr);
+-};
++#ifdef CONFIG_ARMV8_DEPRECATED
++bool try_emulate_armv8_deprecated(struct pt_regs *regs, u32 insn);
++#else
++static inline bool
++try_emulate_armv8_deprecated(struct pt_regs *regs, u32 insn)
++{
++      return false;
++}
++#endif /* CONFIG_ARMV8_DEPRECATED */
+-void register_undef_hook(struct undef_hook *hook);
+-void unregister_undef_hook(struct undef_hook *hook);
+ void force_signal_inject(int signal, int code, unsigned long address, unsigned int err);
+ void arm64_notify_segfault(unsigned long addr);
+ void arm64_force_sig_fault(int signo, int code, void __user *addr, const char *str);
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -38,17 +38,24 @@ enum insn_emulation_mode {
+ enum legacy_insn_status {
+       INSN_DEPRECATED,
+       INSN_OBSOLETE,
++      INSN_UNAVAILABLE,
+ };
+ struct insn_emulation {
+       const char                      *name;
+-      struct list_head                node;
+       enum legacy_insn_status         status;
+-      struct undef_hook               *hooks;
++      bool                            (*try_emulate)(struct pt_regs *regs,
++                                                     u32 insn);
+       int                             (*set_hw_mode)(bool enable);
++
+       int current_mode;
+       int min;
+       int max;
++
++      /*
++       * sysctl for this emulation + a sentinal entry.
++       */
++      struct ctl_table sysctl[2];
+ };
+ #define ARM_OPCODE_CONDTEST_FAIL   0
+@@ -70,6 +77,7 @@ static unsigned int aarch32_check_condit
+       return ARM_OPCODE_CONDTEST_UNCOND;
+ }
++#ifdef CONFIG_SWP_EMULATION
+ /*
+  *  Implement emulation of the SWP/SWPB instructions using load-exclusive and
+  *  store-exclusive.
+@@ -228,28 +236,27 @@ fault:
+       return 0;
+ }
+-/*
+- * Only emulate SWP/SWPB executed in ARM state/User mode.
+- * The kernel must be SWP free and SWP{B} does not exist in Thumb.
+- */
+-static struct undef_hook swp_hooks[] = {
+-      {
+-              .instr_mask     = 0x0fb00ff0,
+-              .instr_val      = 0x01000090,
+-              .pstate_mask    = PSR_AA32_MODE_MASK,
+-              .pstate_val     = PSR_AA32_MODE_USR,
+-              .fn             = swp_handler
+-      },
+-      { }
+-};
++static bool try_emulate_swp(struct pt_regs *regs, u32 insn)
++{
++      /* SWP{B} only exists in ARM state and does not exist in Thumb */
++      if (!compat_user_mode(regs) || compat_thumb_mode(regs))
++              return false;
++
++      if ((insn & 0x0fb00ff0) != 0x01000090)
++              return false;
++
++      return swp_handler(regs, insn) == 0;
++}
+ static struct insn_emulation insn_swp = {
+       .name = "swp",
+       .status = INSN_OBSOLETE,
+-      .hooks = swp_hooks,
++      .try_emulate = try_emulate_swp,
+       .set_hw_mode = NULL,
+ };
++#endif /* CONFIG_SWP_EMULATION */
++#ifdef CONFIG_CP15_BARRIER_EMULATION
+ static int cp15barrier_handler(struct pt_regs *regs, u32 instr)
+ {
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
+@@ -312,31 +319,29 @@ static int cp15_barrier_set_hw_mode(bool
+       return 0;
+ }
+-static struct undef_hook cp15_barrier_hooks[] = {
+-      {
+-              .instr_mask     = 0x0fff0fdf,
+-              .instr_val      = 0x0e070f9a,
+-              .pstate_mask    = PSR_AA32_MODE_MASK,
+-              .pstate_val     = PSR_AA32_MODE_USR,
+-              .fn             = cp15barrier_handler,
+-      },
+-      {
+-              .instr_mask     = 0x0fff0fff,
+-              .instr_val      = 0x0e070f95,
+-              .pstate_mask    = PSR_AA32_MODE_MASK,
+-              .pstate_val     = PSR_AA32_MODE_USR,
+-              .fn             = cp15barrier_handler,
+-      },
+-      { }
+-};
++static bool try_emulate_cp15_barrier(struct pt_regs *regs, u32 insn)
++{
++      if (!compat_user_mode(regs) || compat_thumb_mode(regs))
++              return false;
++
++      if ((insn & 0x0fff0fdf) == 0x0e070f9a)
++              return cp15barrier_handler(regs, insn) == 0;
++
++      if ((insn & 0x0fff0fff) == 0x0e070f95)
++              return cp15barrier_handler(regs, insn) == 0;
++
++      return false;
++}
+ static struct insn_emulation insn_cp15_barrier = {
+       .name = "cp15_barrier",
+       .status = INSN_DEPRECATED,
+-      .hooks = cp15_barrier_hooks,
++      .try_emulate = try_emulate_cp15_barrier,
+       .set_hw_mode = cp15_barrier_set_hw_mode,
+ };
++#endif /* CONFIG_CP15_BARRIER_EMULATION */
++#ifdef CONFIG_SETEND_EMULATION
+ static int setend_set_hw_mode(bool enable)
+ {
+       if (!cpu_supports_mixed_endian_el0())
+@@ -384,60 +389,40 @@ static int t16_setend_handler(struct pt_
+       return rc;
+ }
+-static struct undef_hook setend_hooks[] = {
+-      {
+-              .instr_mask     = 0xfffffdff,
+-              .instr_val      = 0xf1010000,
+-              .pstate_mask    = PSR_AA32_MODE_MASK,
+-              .pstate_val     = PSR_AA32_MODE_USR,
+-              .fn             = a32_setend_handler,
+-      },
+-      {
+-              /* Thumb mode */
+-              .instr_mask     = 0xfffffff7,
+-              .instr_val      = 0x0000b650,
+-              .pstate_mask    = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK),
+-              .pstate_val     = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR),
+-              .fn             = t16_setend_handler,
+-      },
+-      {}
+-};
++static bool try_emulate_setend(struct pt_regs *regs, u32 insn)
++{
++      if (compat_thumb_mode(regs) &&
++          (insn & 0xfffffff7) == 0x0000b650)
++              return t16_setend_handler(regs, insn) == 0;
++
++      if (compat_user_mode(regs) &&
++          (insn & 0xfffffdff) == 0xf1010000)
++              return a32_setend_handler(regs, insn) == 0;
++
++      return false;
++}
+ static struct insn_emulation insn_setend = {
+       .name = "setend",
+       .status = INSN_DEPRECATED,
+-      .hooks = setend_hooks,
++      .try_emulate = try_emulate_setend,
+       .set_hw_mode = setend_set_hw_mode,
+ };
++#endif /* CONFIG_SETEND_EMULATION */
+-static LIST_HEAD(insn_emulation);
+-static int nr_insn_emulated __initdata;
+-static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
+-static DEFINE_MUTEX(insn_emulation_mutex);
+-
+-static void register_emulation_hooks(struct insn_emulation *insn)
+-{
+-      struct undef_hook *hook;
+-
+-      BUG_ON(!insn->hooks);
+-
+-      for (hook = insn->hooks; hook->instr_mask; hook++)
+-              register_undef_hook(hook);
+-
+-      pr_notice("Registered %s emulation handler\n", insn->name);
+-}
+-
+-static void remove_emulation_hooks(struct insn_emulation *insn)
+-{
+-      struct undef_hook *hook;
+-
+-      BUG_ON(!insn->hooks);
+-
+-      for (hook = insn->hooks; hook->instr_mask; hook++)
+-              unregister_undef_hook(hook);
++static struct insn_emulation *insn_emulations[] = {
++#ifdef CONFIG_SWP_EMULATION
++      &insn_swp,
++#endif
++#ifdef CONFIG_CP15_BARRIER_EMULATION
++      &insn_cp15_barrier,
++#endif
++#ifdef CONFIG_SETEND_EMULATION
++      &insn_setend,
++#endif
++};
+-      pr_notice("Removed %s emulation handler\n", insn->name);
+-}
++static DEFINE_MUTEX(insn_emulation_mutex);
+ static void enable_insn_hw_mode(void *data)
+ {
+@@ -473,20 +458,27 @@ static int run_all_cpu_set_hw_mode(struc
+  */
+ static int run_all_insn_set_hw_mode(unsigned int cpu)
+ {
++      int i;
+       int rc = 0;
+       unsigned long flags;
+-      struct insn_emulation *insn;
+-      raw_spin_lock_irqsave(&insn_emulation_lock, flags);
+-      list_for_each_entry(insn, &insn_emulation, node) {
+-              bool enable = (insn->current_mode == INSN_HW);
++      /*
++       * Disable IRQs to serialize against an IPI from
++       * run_all_cpu_set_hw_mode(), ensuring the HW is programmed to the most
++       * recent enablement state if the two race with one another.
++       */
++      local_irq_save(flags);
++      for (i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
++              struct insn_emulation *insn = insn_emulations[i];
++              bool enable = READ_ONCE(insn->current_mode) == INSN_HW;
+               if (insn->set_hw_mode && insn->set_hw_mode(enable)) {
+                       pr_warn("CPU[%u] cannot support the emulation of %s",
+                               cpu, insn->name);
+                       rc = -EINVAL;
+               }
+       }
+-      raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
++      local_irq_restore(flags);
++
+       return rc;
+ }
+@@ -499,7 +491,6 @@ static int update_insn_emulation_mode(st
+       case INSN_UNDEF: /* Nothing to be done */
+               break;
+       case INSN_EMULATE:
+-              remove_emulation_hooks(insn);
+               break;
+       case INSN_HW:
+               if (!run_all_cpu_set_hw_mode(insn, false))
+@@ -511,7 +502,6 @@ static int update_insn_emulation_mode(st
+       case INSN_UNDEF:
+               break;
+       case INSN_EMULATE:
+-              register_emulation_hooks(insn);
+               break;
+       case INSN_HW:
+               ret = run_all_cpu_set_hw_mode(insn, true);
+@@ -523,34 +513,6 @@ static int update_insn_emulation_mode(st
+       return ret;
+ }
+-static void __init register_insn_emulation(struct insn_emulation *insn)
+-{
+-      unsigned long flags;
+-
+-      insn->min = INSN_UNDEF;
+-
+-      switch (insn->status) {
+-      case INSN_DEPRECATED:
+-              insn->current_mode = INSN_EMULATE;
+-              /* Disable the HW mode if it was turned on at early boot time */
+-              run_all_cpu_set_hw_mode(insn, false);
+-              insn->max = INSN_HW;
+-              break;
+-      case INSN_OBSOLETE:
+-              insn->current_mode = INSN_UNDEF;
+-              insn->max = INSN_EMULATE;
+-              break;
+-      }
+-
+-      raw_spin_lock_irqsave(&insn_emulation_lock, flags);
+-      list_add(&insn->node, &insn_emulation);
+-      nr_insn_emulated++;
+-      raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
+-
+-      /* Register any handlers if required */
+-      update_insn_emulation_mode(insn, INSN_UNDEF);
+-}
+-
+ static int emulation_proc_handler(struct ctl_table *table, int write,
+                                 void *buffer, size_t *lenp,
+                                 loff_t *ppos)
+@@ -568,7 +530,7 @@ static int emulation_proc_handler(struct
+       ret = update_insn_emulation_mode(insn, prev_mode);
+       if (ret) {
+               /* Mode change failed, revert to previous mode. */
+-              insn->current_mode = prev_mode;
++              WRITE_ONCE(insn->current_mode, prev_mode);
+               update_insn_emulation_mode(insn, INSN_UNDEF);
+       }
+ ret:
+@@ -576,21 +538,34 @@ ret:
+       return ret;
+ }
+-static void __init register_insn_emulation_sysctl(void)
++static void __init register_insn_emulation(struct insn_emulation *insn)
+ {
+-      unsigned long flags;
+-      int i = 0;
+-      struct insn_emulation *insn;
+-      struct ctl_table *insns_sysctl, *sysctl;
+-
+-      insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl),
+-                             GFP_KERNEL);
+-      if (!insns_sysctl)
+-              return;
+-
+-      raw_spin_lock_irqsave(&insn_emulation_lock, flags);
+-      list_for_each_entry(insn, &insn_emulation, node) {
+-              sysctl = &insns_sysctl[i];
++      struct ctl_table *sysctl;
++
++      insn->min = INSN_UNDEF;
++
++      switch (insn->status) {
++      case INSN_DEPRECATED:
++              insn->current_mode = INSN_EMULATE;
++              /* Disable the HW mode if it was turned on at early boot time */
++              run_all_cpu_set_hw_mode(insn, false);
++              insn->max = INSN_HW;
++              break;
++      case INSN_OBSOLETE:
++              insn->current_mode = INSN_UNDEF;
++              insn->max = INSN_EMULATE;
++              break;
++      case INSN_UNAVAILABLE:
++              insn->current_mode = INSN_UNDEF;
++              insn->max = INSN_UNDEF;
++              break;
++      }
++
++      /* Program the HW if required */
++      update_insn_emulation_mode(insn, INSN_UNDEF);
++
++      if (insn->status != INSN_UNAVAILABLE) {
++              sysctl = &insn->sysctl[0];
+               sysctl->mode = 0644;
+               sysctl->maxlen = sizeof(int);
+@@ -600,11 +575,34 @@ static void __init register_insn_emulati
+               sysctl->extra1 = &insn->min;
+               sysctl->extra2 = &insn->max;
+               sysctl->proc_handler = emulation_proc_handler;
+-              i++;
++
++              register_sysctl("abi", sysctl);
++      }
++}
++
++bool try_emulate_armv8_deprecated(struct pt_regs *regs, u32 insn)
++{
++      int i;
++
++      for (i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
++              struct insn_emulation *ie = insn_emulations[i];
++
++              if (ie->status == INSN_UNAVAILABLE)
++                      continue;
++
++              /*
++               * A trap may race with the mode being changed
++               * INSN_EMULATE<->INSN_HW. Try to emulate the instruction to
++               * avoid a spurious UNDEF.
++               */
++              if (READ_ONCE(ie->current_mode) == INSN_UNDEF)
++                      continue;
++
++              if (ie->try_emulate(regs, insn))
++                      return true;
+       }
+-      raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
+-      register_sysctl("abi", insns_sysctl);
++      return false;
+ }
+ /*
+@@ -613,24 +611,27 @@ static void __init register_insn_emulati
+  */
+ static int __init armv8_deprecated_init(void)
+ {
+-      if (IS_ENABLED(CONFIG_SWP_EMULATION))
+-              register_insn_emulation(&insn_swp);
++      int i;
+-      if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION))
+-              register_insn_emulation(&insn_cp15_barrier);
++#ifdef CONFIG_SETEND_EMULATION
++      if (!system_supports_mixed_endian_el0()) {
++              insn_setend.status = INSN_UNAVAILABLE;
++              pr_info("setend instruction emulation is not supported on this system\n");
++      }
+-      if (IS_ENABLED(CONFIG_SETEND_EMULATION)) {
+-              if (system_supports_mixed_endian_el0())
+-                      register_insn_emulation(&insn_setend);
+-              else
+-                      pr_info("setend instruction emulation is not supported on this system\n");
++#endif
++      for (i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
++              struct insn_emulation *ie = insn_emulations[i];
++
++              if (ie->status == INSN_UNAVAILABLE)
++                      continue;
++
++              register_insn_emulation(ie);
+       }
+       cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
+                                 "arm64/isndep:starting",
+                                 run_all_insn_set_hw_mode, NULL);
+-      register_insn_emulation_sysctl();
+-
+       return 0;
+ }
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -282,27 +282,6 @@ void arm64_skip_faulting_instruction(str
+               regs->pstate &= ~PSR_BTYPE_MASK;
+ }
+-static LIST_HEAD(undef_hook);
+-static DEFINE_RAW_SPINLOCK(undef_lock);
+-
+-void register_undef_hook(struct undef_hook *hook)
+-{
+-      unsigned long flags;
+-
+-      raw_spin_lock_irqsave(&undef_lock, flags);
+-      list_add(&hook->node, &undef_hook);
+-      raw_spin_unlock_irqrestore(&undef_lock, flags);
+-}
+-
+-void unregister_undef_hook(struct undef_hook *hook)
+-{
+-      unsigned long flags;
+-
+-      raw_spin_lock_irqsave(&undef_lock, flags);
+-      list_del(&hook->node);
+-      raw_spin_unlock_irqrestore(&undef_lock, flags);
+-}
+-
+ static int user_insn_read(struct pt_regs *regs, u32 *insnp)
+ {
+       u32 instr;
+@@ -334,23 +313,6 @@ static int user_insn_read(struct pt_regs
+       return 0;
+ }
+-static int call_undef_hook(struct pt_regs *regs, u32 instr)
+-{
+-      struct undef_hook *hook;
+-      unsigned long flags;
+-      int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
+-
+-      raw_spin_lock_irqsave(&undef_lock, flags);
+-      list_for_each_entry(hook, &undef_hook, node)
+-              if ((instr & hook->instr_mask) == hook->instr_val &&
+-                      (regs->pstate & hook->pstate_mask) == hook->pstate_val)
+-                      fn = hook->fn;
+-
+-      raw_spin_unlock_irqrestore(&undef_lock, flags);
+-
+-      return fn ? fn(regs, instr) : 1;
+-}
+-
+ void force_signal_inject(int signal, int code, unsigned long address, unsigned int err)
+ {
+       const char *desc;
+@@ -411,7 +373,7 @@ void do_el0_undef(struct pt_regs *regs,
+       if (try_emulate_mrs(regs, insn))
+               return;
+-      if (call_undef_hook(regs, insn) == 0)
++      if (try_emulate_armv8_deprecated(regs, insn))
+               return;
+ out_err:
diff --git a/queue-5.10/arm64-consistently-pass-esr_elx-to-die.patch b/queue-5.10/arm64-consistently-pass-esr_elx-to-die.patch
new file mode 100644 (file)
index 0000000..e0eec72
--- /dev/null
@@ -0,0 +1,174 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:06:30 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:33 +0000
+Subject: arm64: consistently pass ESR_ELx to die()
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-4-ruanjinjie@huawei.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 0f2cb928a1547ae8f89e80a4b8df2c6c02ae5f96 upstream.
+
+Currently, bug_handler() and kasan_handler() call die() with '0' as the
+'err' value, whereas die_kernel_fault() passes the ESR_ELx value.
+
+For consistency, this patch ensures we always pass the ESR_ELx value to
+die(). As this is only called for exceptions taken from kernel mode,
+there should be no user-visible change as a result of this patch.
+
+For UNDEFINED exceptions, I've had to modify do_undefinstr() and its
+callers to pass the ESR_ELx value. In all cases the ESR_ELx value had
+already been read and was available.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Mark Brown <broonie@kernel.org>
+Cc: Alexandru Elisei <alexandru.elisei@arm.com>
+Cc: Amit Daniel Kachhap <amit.kachhap@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Link: https://lore.kernel.org/r/20220913101732.3925290-4-mark.rutland@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/exception.h |    2 +-
+ arch/arm64/kernel/entry-common.c   |   14 +++++++-------
+ arch/arm64/kernel/traps.c          |   14 +++++++-------
+ 3 files changed, 15 insertions(+), 15 deletions(-)
+
+--- a/arch/arm64/include/asm/exception.h
++++ b/arch/arm64/include/asm/exception.h
+@@ -33,7 +33,7 @@ asmlinkage void exit_to_user_mode(void);
+ void arm64_enter_nmi(struct pt_regs *regs);
+ void arm64_exit_nmi(struct pt_regs *regs);
+ void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
+-void do_undefinstr(struct pt_regs *regs);
++void do_undefinstr(struct pt_regs *regs, unsigned long esr);
+ void do_bti(struct pt_regs *regs);
+ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
+ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
+--- a/arch/arm64/kernel/entry-common.c
++++ b/arch/arm64/kernel/entry-common.c
+@@ -132,11 +132,11 @@ static void noinstr el1_pc(struct pt_reg
+       exit_to_kernel_mode(regs);
+ }
+-static void noinstr el1_undef(struct pt_regs *regs)
++static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
+ {
+       enter_from_kernel_mode(regs);
+       local_daif_inherit(regs);
+-      do_undefinstr(regs);
++      do_undefinstr(regs, esr);
+       local_daif_mask();
+       exit_to_kernel_mode(regs);
+ }
+@@ -210,7 +210,7 @@ asmlinkage void noinstr el1_sync_handler
+               break;
+       case ESR_ELx_EC_SYS64:
+       case ESR_ELx_EC_UNKNOWN:
+-              el1_undef(regs);
++              el1_undef(regs, esr);
+               break;
+       case ESR_ELx_EC_BREAKPT_CUR:
+       case ESR_ELx_EC_SOFTSTP_CUR:
+@@ -316,11 +316,11 @@ static void noinstr el0_sp(struct pt_reg
+       do_sp_pc_abort(regs->sp, esr, regs);
+ }
+-static void noinstr el0_undef(struct pt_regs *regs)
++static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
+ {
+       enter_from_user_mode();
+       local_daif_restore(DAIF_PROCCTX);
+-      do_undefinstr(regs);
++      do_undefinstr(regs, esr);
+ }
+ static void noinstr el0_bti(struct pt_regs *regs)
+@@ -394,7 +394,7 @@ asmlinkage void noinstr el0_sync_handler
+               el0_pc(regs, esr);
+               break;
+       case ESR_ELx_EC_UNKNOWN:
+-              el0_undef(regs);
++              el0_undef(regs, esr);
+               break;
+       case ESR_ELx_EC_BTI:
+               el0_bti(regs);
+@@ -454,7 +454,7 @@ asmlinkage void noinstr el0_sync_compat_
+       case ESR_ELx_EC_CP14_MR:
+       case ESR_ELx_EC_CP14_LS:
+       case ESR_ELx_EC_CP14_64:
+-              el0_undef(regs);
++              el0_undef(regs, esr);
+               break;
+       case ESR_ELx_EC_CP15_32:
+       case ESR_ELx_EC_CP15_64:
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -395,7 +395,7 @@ void arm64_notify_segfault(unsigned long
+       force_signal_inject(SIGSEGV, code, addr, 0);
+ }
+-void do_undefinstr(struct pt_regs *regs)
++void do_undefinstr(struct pt_regs *regs, unsigned long esr)
+ {
+       /* check for AArch32 breakpoint instructions */
+       if (!aarch32_break_handler(regs))
+@@ -405,7 +405,7 @@ void do_undefinstr(struct pt_regs *regs)
+               return;
+       if (!user_mode(regs))
+-              die("Oops - Undefined instruction", regs, 0);
++              die("Oops - Undefined instruction", regs, esr);
+       force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+ }
+@@ -663,7 +663,7 @@ void do_cp15instr(unsigned int esr, stru
+               hook_base = cp15_64_hooks;
+               break;
+       default:
+-              do_undefinstr(regs);
++              do_undefinstr(regs, esr);
+               return;
+       }
+@@ -678,7 +678,7 @@ void do_cp15instr(unsigned int esr, stru
+        * EL0. Fall back to our usual undefined instruction handler
+        * so that we handle these consistently.
+        */
+-      do_undefinstr(regs);
++      do_undefinstr(regs, esr);
+ }
+ NOKPROBE_SYMBOL(do_cp15instr);
+ #endif
+@@ -698,7 +698,7 @@ void do_sysinstr(unsigned int esr, struc
+        * back to our usual undefined instruction handler so that we handle
+        * these consistently.
+        */
+-      do_undefinstr(regs);
++      do_undefinstr(regs, esr);
+ }
+ NOKPROBE_SYMBOL(do_sysinstr);
+@@ -901,7 +901,7 @@ static int bug_handler(struct pt_regs *r
+ {
+       switch (report_bug(regs->pc, regs)) {
+       case BUG_TRAP_TYPE_BUG:
+-              die("Oops - BUG", regs, 0);
++              die("Oops - BUG", regs, esr);
+               break;
+       case BUG_TRAP_TYPE_WARN:
+@@ -969,7 +969,7 @@ static int kasan_handler(struct pt_regs
+        * This is something that might be fixed at some point in the future.
+        */
+       if (!recover)
+-              die("Oops - KASAN", regs, 0);
++              die("Oops - KASAN", regs, esr);
+       /* If thread survives, skip over the brk instruction and continue: */
+       arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
diff --git a/queue-5.10/arm64-die-pass-err-as-long.patch b/queue-5.10/arm64-die-pass-err-as-long.patch
new file mode 100644 (file)
index 0000000..d63449a
--- /dev/null
@@ -0,0 +1,80 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:06:29 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:32 +0000
+Subject: arm64: die(): pass 'err' as long
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-3-ruanjinjie@huawei.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 18906ff9af6517c20763ed63dab602a4150794f7 upstream.
+
+Recently, we reworked a lot of code to consistentlt pass ESR_ELx as a
+64-bit quantity. However, we missed that this can be passed into die()
+and __die() as the 'err' parameter where it is truncated to a 32-bit
+int.
+
+As notify_die() already takes 'err' as a long, this patch changes die()
+and __die() to also take 'err' as a long, ensuring that the full value
+of ESR_ELx is retained.
+
+At the same time, die() is updated to consistently log 'err' as a
+zero-padded 64-bit quantity.
+
+Subsequent patches will pass the ESR_ELx value to die() for a number of
+exceptions.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Cc: Alexandru Elisei <alexandru.elisei@arm.com>
+Cc: Amit Daniel Kachhap <amit.kachhap@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20220913101732.3925290-3-mark.rutland@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/system_misc.h |    2 +-
+ arch/arm64/kernel/traps.c            |    6 +++---
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/include/asm/system_misc.h
++++ b/arch/arm64/include/asm/system_misc.h
+@@ -18,7 +18,7 @@
+ struct pt_regs;
+-void die(const char *msg, struct pt_regs *regs, int err);
++void die(const char *msg, struct pt_regs *regs, long err);
+ struct siginfo;
+ void arm64_notify_die(const char *str, struct pt_regs *regs,
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -90,12 +90,12 @@ static void dump_kernel_instr(const char
+ #define S_SMP " SMP"
+-static int __die(const char *str, int err, struct pt_regs *regs)
++static int __die(const char *str, long err, struct pt_regs *regs)
+ {
+       static int die_counter;
+       int ret;
+-      pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
++      pr_emerg("Internal error: %s: %016lx [#%d]" S_PREEMPT S_SMP "\n",
+                str, err, ++die_counter);
+       /* trap and error numbers are mostly meaningless on ARM */
+@@ -116,7 +116,7 @@ static DEFINE_RAW_SPINLOCK(die_lock);
+ /*
+  * This function is protected against re-entrancy.
+  */
+-void die(const char *str, struct pt_regs *regs, int err)
++void die(const char *str, struct pt_regs *regs, long err)
+ {
+       int ret;
+       unsigned long flags;
diff --git a/queue-5.10/arm64-factor-insn-read-out-of-call_undef_hook.patch b/queue-5.10/arm64-factor-insn-read-out-of-call_undef_hook.patch
new file mode 100644 (file)
index 0000000..a8e4948
--- /dev/null
@@ -0,0 +1,118 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:06:37 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:39 +0000
+Subject: arm64: factor insn read out of call_undef_hook()
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-10-ruanjinjie@huawei.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit dbfbd87efa79575491af0ba1a87bf567eaea6cae upstream.
+
+Subsequent patches will rework EL0 UNDEF handling, removing the need for
+struct undef_hook and call_undef_hook. In preparation for those changes,
+this patch factors the logic for reading user instructions out of
+call_undef_hook() and into a new user_insn_read() helper, matching the
+style of the existing aarch64_insn_read() helper used for reading kernel
+instructions.
+
+There should be no functional change as a result of this patch.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Joey Gouly <joey.gouly@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20221019144123.612388-5-mark.rutland@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/traps.c |   31 ++++++++++++++++++++++---------
+ 1 file changed, 22 insertions(+), 9 deletions(-)
+
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -303,25 +303,22 @@ void unregister_undef_hook(struct undef_
+       raw_spin_unlock_irqrestore(&undef_lock, flags);
+ }
+-static int call_undef_hook(struct pt_regs *regs)
++static int user_insn_read(struct pt_regs *regs, u32 *insnp)
+ {
+-      struct undef_hook *hook;
+-      unsigned long flags;
+       u32 instr;
+-      int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
+       void __user *pc = (void __user *)instruction_pointer(regs);
+       if (compat_thumb_mode(regs)) {
+               /* 16-bit Thumb instruction */
+               __le16 instr_le;
+               if (get_user(instr_le, (__le16 __user *)pc))
+-                      goto exit;
++                      return -EFAULT;
+               instr = le16_to_cpu(instr_le);
+               if (aarch32_insn_is_wide(instr)) {
+                       u32 instr2;
+                       if (get_user(instr_le, (__le16 __user *)(pc + 2)))
+-                              goto exit;
++                              return -EFAULT;
+                       instr2 = le16_to_cpu(instr_le);
+                       instr = (instr << 16) | instr2;
+               }
+@@ -329,10 +326,20 @@ static int call_undef_hook(struct pt_reg
+               /* 32-bit ARM instruction */
+               __le32 instr_le;
+               if (get_user(instr_le, (__le32 __user *)pc))
+-                      goto exit;
++                      return -EFAULT;
+               instr = le32_to_cpu(instr_le);
+       }
++      *insnp = instr;
++      return 0;
++}
++
++static int call_undef_hook(struct pt_regs *regs, u32 instr)
++{
++      struct undef_hook *hook;
++      unsigned long flags;
++      int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
++
+       raw_spin_lock_irqsave(&undef_lock, flags);
+       list_for_each_entry(hook, &undef_hook, node)
+               if ((instr & hook->instr_mask) == hook->instr_val &&
+@@ -340,7 +347,7 @@ static int call_undef_hook(struct pt_reg
+                       fn = hook->fn;
+       raw_spin_unlock_irqrestore(&undef_lock, flags);
+-exit:
++
+       return fn ? fn(regs, instr) : 1;
+ }
+@@ -392,13 +399,19 @@ void arm64_notify_segfault(unsigned long
+ void do_el0_undef(struct pt_regs *regs, unsigned long esr)
+ {
++      u32 insn;
++
+       /* check for AArch32 breakpoint instructions */
+       if (!aarch32_break_handler(regs))
+               return;
+-      if (call_undef_hook(regs) == 0)
++      if (user_insn_read(regs, &insn))
++              goto out_err;
++
++      if (call_undef_hook(regs, insn) == 0)
+               return;
++out_err:
+       force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+ }
diff --git a/queue-5.10/arm64-factor-out-el1-ssbs-emulation-hook.patch b/queue-5.10/arm64-factor-out-el1-ssbs-emulation-hook.patch
new file mode 100644 (file)
index 0000000..68f137a
--- /dev/null
@@ -0,0 +1,166 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:07:07 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:38 +0000
+Subject: arm64: factor out EL1 SSBS emulation hook
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-9-ruanjinjie@huawei.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit bff8f413c71ffc3cb679dbd9a5632b33af563f9f upstream.
+
+Currently call_undef_hook() is used to handle UNDEFINED exceptions from
+EL0 and EL1. As support for deprecated instructions may be enabled
+independently, the handlers for individual instructions are organised as
+a linked list of struct undef_hook which can be manipulated dynamically.
+As this can be manipulated dynamically, the list is protected with a
+raw_spinlock which must be acquired when handling UNDEFINED exceptions
+or when manipulating the list of handlers.
+
+This locking is unfortunate as it serialises handling of UNDEFINED
+exceptions, and requires RCU to be enabled for lockdep, requiring the
+use of RCU_NONIDLE() in resume path of cpu_suspend() since commit:
+
+  a2c42bbabbe260b7 ("arm64: spectre: Prevent lockdep splat on v4 mitigation enable path")
+
+The list of UNDEFINED handlers largely consist of handlers for
+exceptions taken from EL0, and the only handler for exceptions taken
+from EL1 handles `MSR SSBS, #imm` on CPUs which feature PSTATE.SSBS but
+lack the corresponding MSR (Immediate) instruction. Other than this we
+never expect to take an UNDEFINED exception from EL1 in normal
+operation.
+
+This patch reworks do_el0_undef() to invoke the EL1 SSBS handler
+directly, relegating call_undef_hook() to only handle EL0 UNDEFs. This
+removes redundant work to iterate the list for EL1 UNDEFs, and removes
+the need for locking, permitting EL1 UNDEFs to be handled in parallel
+without contention.
+
+The RCU_NONIDLE() call in cpu_suspend() will be removed in a subsequent
+patch, as there are other potential issues with the use of
+instrumentable code and RCU in the CPU suspend code.
+
+I've tested this by forcing the detection of SSBS on a CPU that doesn't
+have it, and verifying that the try_emulate_el1_ssbs() callback is
+invoked.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Joey Gouly <joey.gouly@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20221019144123.612388-4-mark.rutland@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/spectre.h |    2 ++
+ arch/arm64/kernel/proton-pack.c  |   26 +++++++-------------------
+ arch/arm64/kernel/traps.c        |   15 ++++++++-------
+ 3 files changed, 17 insertions(+), 26 deletions(-)
+
+--- a/arch/arm64/include/asm/spectre.h
++++ b/arch/arm64/include/asm/spectre.h
+@@ -18,6 +18,7 @@ enum mitigation_state {
+       SPECTRE_VULNERABLE,
+ };
++struct pt_regs;
+ struct task_struct;
+ enum mitigation_state arm64_get_spectre_v2_state(void);
+@@ -33,4 +34,5 @@ enum mitigation_state arm64_get_spectre_
+ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
+ u8 spectre_bhb_loop_affected(int scope);
+ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
++bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
+ #endif        /* __ASM_SPECTRE_H */
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -537,10 +537,13 @@ bool has_spectre_v4(const struct arm64_c
+       return state != SPECTRE_UNAFFECTED;
+ }
+-static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
++bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr)
+ {
+-      if (user_mode(regs))
+-              return 1;
++      const u32 instr_mask = ~(1U << PSTATE_Imm_shift);
++      const u32 instr_val = 0xd500401f | PSTATE_SSBS;
++
++      if ((instr & instr_mask) != instr_val)
++              return false;
+       if (instr & BIT(PSTATE_Imm_shift))
+               regs->pstate |= PSR_SSBS_BIT;
+@@ -548,19 +551,11 @@ static int ssbs_emulation_handler(struct
+               regs->pstate &= ~PSR_SSBS_BIT;
+       arm64_skip_faulting_instruction(regs, 4);
+-      return 0;
++      return true;
+ }
+-static struct undef_hook ssbs_emulation_hook = {
+-      .instr_mask     = ~(1U << PSTATE_Imm_shift),
+-      .instr_val      = 0xd500401f | PSTATE_SSBS,
+-      .fn             = ssbs_emulation_handler,
+-};
+-
+ static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
+ {
+-      static bool undef_hook_registered = false;
+-      static DEFINE_RAW_SPINLOCK(hook_lock);
+       enum mitigation_state state;
+       /*
+@@ -571,13 +566,6 @@ static enum mitigation_state spectre_v4_
+       if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
+               return state;
+-      raw_spin_lock(&hook_lock);
+-      if (!undef_hook_registered) {
+-              register_undef_hook(&ssbs_emulation_hook);
+-              undef_hook_registered = true;
+-      }
+-      raw_spin_unlock(&hook_lock);
+-
+       if (spectre_v4_mitigations_off()) {
+               sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
+               asm volatile(SET_PSTATE_SSBS(1));
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -311,12 +311,7 @@ static int call_undef_hook(struct pt_reg
+       int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
+       void __user *pc = (void __user *)instruction_pointer(regs);
+-      if (!user_mode(regs)) {
+-              __le32 instr_le;
+-              if (get_kernel_nofault(instr_le, (__force __le32 *)pc))
+-                      goto exit;
+-              instr = le32_to_cpu(instr_le);
+-      } else if (compat_thumb_mode(regs)) {
++      if (compat_thumb_mode(regs)) {
+               /* 16-bit Thumb instruction */
+               __le16 instr_le;
+               if (get_user(instr_le, (__le16 __user *)pc))
+@@ -409,9 +404,15 @@ void do_el0_undef(struct pt_regs *regs,
+ void do_el1_undef(struct pt_regs *regs, unsigned long esr)
+ {
+-      if (call_undef_hook(regs) == 0)
++      u32 insn;
++
++      if (aarch64_insn_read((void *)regs->pc, &insn))
++              goto out_err;
++
++      if (try_emulate_el1_ssbs(regs, insn))
+               return;
++out_err:
+       die("Oops - Undefined instruction", regs, esr);
+ }
diff --git a/queue-5.10/arm64-report-el1-undefs-better.patch b/queue-5.10/arm64-report-el1-undefs-better.patch
new file mode 100644 (file)
index 0000000..52296cc
--- /dev/null
@@ -0,0 +1,113 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:06:28 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:31 +0000
+Subject: arm64: report EL1 UNDEFs better
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-2-ruanjinjie@huawei.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit b502c87d2a26c349acbc231ff2acd6f17147926b upstream.
+
+If an UNDEFINED exception is taken from EL1, and do_undefinstr() doesn't
+find any suitable undef_hook, it will call:
+
+       BUG_ON(!user_mode(regs))
+
+... and the kernel will report a failure witin do_undefinstr() rather
+than reporting the original context that the UNDEFINED exception was
+taken from. The pt_regs and ESR value reported within the BUG() handler
+will be from within do_undefinstr() and the code dump will be for the
+BRK in BUG_ON(), which isn't sufficient to debug the cause of the
+original exception.
+
+This patch makes the reporting better by having do_undefinstr() call
+die() directly in this case to report the original context from which
+the UNDEFINED exception was taken.
+
+Prior to this patch, an undefined instruction is reported as:
+
+| kernel BUG at arch/arm64/kernel/traps.c:497!
+| Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
+| Modules linked in:
+| CPU: 0 PID: 0 Comm: swapper Not tainted 5.19.0-rc3-00127-geff044f1b04e-dirty #3
+| Hardware name: linux,dummy-virt (DT)
+| pstate: 000000c5 (nzcv daIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+| pc : do_undefinstr+0x28c/0x2ac
+| lr : do_undefinstr+0x298/0x2ac
+| sp : ffff800009f63bc0
+| x29: ffff800009f63bc0 x28: ffff800009f73c00 x27: ffff800009644a70
+| x26: ffff8000096778a8 x25: 0000000000000040 x24: 0000000000000000
+| x23: 00000000800000c5 x22: ffff800009894060 x21: ffff800009f63d90
+| x20: 0000000000000000 x19: ffff800009f63c40 x18: 0000000000000006
+| x17: 0000000000403000 x16: 00000000bfbfd000 x15: ffff800009f63830
+| x14: ffffffffffffffff x13: 0000000000000000 x12: 0000000000000019
+| x11: 0101010101010101 x10: 0000000000161b98 x9 : 0000000000000000
+| x8 : 0000000000000000 x7 : 0000000000000000 x6 : 0000000000000000
+| x5 : ffff800009f761d0 x4 : 0000000000000000 x3 : ffff80000a2b80f8
+| x2 : 0000000000000000 x1 : ffff800009f73c00 x0 : 00000000800000c5
+| Call trace:
+|  do_undefinstr+0x28c/0x2ac
+|  el1_undef+0x2c/0x4c
+|  el1h_64_sync_handler+0x84/0xd0
+|  el1h_64_sync+0x64/0x68
+|  setup_arch+0x550/0x598
+|  start_kernel+0x88/0x6ac
+|  __primary_switched+0xb8/0xc0
+| Code: 17ffff95 a9425bf5 17ffffb8 a9025bf5 (d4210000)
+
+With this patch applied, an undefined instruction is reported as:
+
+| Internal error: Oops - Undefined instruction: 0 [#1] PREEMPT SMP
+| Modules linked in:
+| CPU: 0 PID: 0 Comm: swapper Not tainted 5.19.0-rc3-00128-gf27cfcc80e52-dirty #5
+| Hardware name: linux,dummy-virt (DT)
+| pstate: 800000c5 (Nzcv daIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+| pc : setup_arch+0x550/0x598
+| lr : setup_arch+0x50c/0x598
+| sp : ffff800009f63d90
+| x29: ffff800009f63d90 x28: 0000000081000200 x27: ffff800009644a70
+| x26: ffff8000096778c8 x25: 0000000000000040 x24: 0000000000000000
+| x23: 0000000000000100 x22: ffff800009f69a58 x21: ffff80000a2b80b8
+| x20: 0000000000000000 x19: 0000000000000000 x18: 0000000000000006
+| x17: 0000000000403000 x16: 00000000bfbfd000 x15: ffff800009f63830
+| x14: ffffffffffffffff x13: 0000000000000000 x12: 0000000000000019
+| x11: 0101010101010101 x10: 0000000000161b98 x9 : 0000000000000000
+| x8 : 0000000000000000 x7 : 0000000000000000 x6 : 0000000000000000
+| x5 : 0000000000000008 x4 : 0000000000000010 x3 : 0000000000000000
+| x2 : 0000000000000000 x1 : 0000000000000000 x0 : 0000000000000000
+| Call trace:
+|  setup_arch+0x550/0x598
+|  start_kernel+0x88/0x6ac
+|  __primary_switched+0xb8/0xc0
+| Code: b4000080 90ffed80 912ac000 97db745f (00000000)
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Cc: Alexandru Elisei <alexandru.elisei@arm.com>
+Cc: Amit Daniel Kachhap <amit.kachhap@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Link: https://lore.kernel.org/r/20220913101732.3925290-2-mark.rutland@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/traps.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -404,7 +404,9 @@ void do_undefinstr(struct pt_regs *regs)
+       if (call_undef_hook(regs) == 0)
+               return;
+-      BUG_ON(!user_mode(regs));
++      if (!user_mode(regs))
++              die("Oops - Undefined instruction", regs, 0);
++
+       force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+ }
+ NOKPROBE_SYMBOL(do_undefinstr);
diff --git a/queue-5.10/arm64-rework-bti-exception-handling.patch b/queue-5.10/arm64-rework-bti-exception-handling.patch
new file mode 100644 (file)
index 0000000..08d266e
--- /dev/null
@@ -0,0 +1,185 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:06:32 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:35 +0000
+Subject: arm64: rework BTI exception handling
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-6-ruanjinjie@huawei.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 830a2a4d853f2c4a1e4606aa03341b7f273b0e9b upstream.
+
+If a BTI exception is taken from EL1, the entry code will treat this as
+an unhandled exception and will panic() the kernel. This is inconsistent
+with the way we handle FPAC exceptions, which have a dedicated handler
+and only necessarily kill the thread from which the exception was taken
+from, and we don't log all the information that could be relevant to
+debug the issue.
+
+The code in do_bti() has:
+
+       BUG_ON(!user_mode(regs));
+
+... and it seems like the intent was to call this for EL1 BTI
+exceptions, as with FPAC, but this was omitted due to an oversight.
+
+This patch adds separate EL0 and EL1 BTI exception handlers, with the
+latter calling die() directly to report the original context the BTI
+exception was taken from. This matches our handling of FPAC exceptions.
+
+Prior to this patch, a BTI failure is reported as:
+
+| Unhandled 64-bit el1h sync exception on CPU0, ESR 0x0000000034000002 -- BTI
+| CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00131-g7d937ff0221d-dirty #9
+| Hardware name: linux,dummy-virt (DT)
+| pstate: 20400809 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=-c)
+| pc : test_bti_callee+0x4/0x10
+| lr : test_bti_caller+0x1c/0x28
+| sp : ffff80000800bdf0
+| x29: ffff80000800bdf0 x28: 0000000000000000 x27: 0000000000000000
+| x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000
+| x23: ffff80000a2b8000 x22: 0000000000000000 x21: 0000000000000000
+| x20: ffff8000099fa5b0 x19: ffff800009ff7000 x18: fffffbfffda37000
+| x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000041a90000
+| x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000
+| x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000040000000
+| x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000f83
+| x5 : ffff80000a2b6000 x4 : ffff0000028d0000 x3 : ffff800009f78378
+| x2 : 0000000000000000 x1 : 0000000040210000 x0 : ffff8000080257e4
+| Kernel panic - not syncing: Unhandled exception
+| CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00131-g7d937ff0221d-dirty #9
+| Hardware name: linux,dummy-virt (DT)
+| Call trace:
+|  dump_backtrace.part.0+0xcc/0xe0
+|  show_stack+0x18/0x5c
+|  dump_stack_lvl+0x64/0x80
+|  dump_stack+0x18/0x34
+|  panic+0x170/0x360
+|  arm64_exit_nmi.isra.0+0x0/0x80
+|  el1h_64_sync_handler+0x64/0xd0
+|  el1h_64_sync+0x64/0x68
+|  test_bti_callee+0x4/0x10
+|  smp_cpus_done+0xb0/0xbc
+|  smp_init+0x7c/0x8c
+|  kernel_init_freeable+0x128/0x28c
+|  kernel_init+0x28/0x13c
+|  ret_from_fork+0x10/0x20
+
+With this patch applied, a BTI failure is reported as:
+
+| Internal error: Oops - BTI: 0000000034000002 [#1] PREEMPT SMP
+| Modules linked in:
+| CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00132-g0ad98265d582-dirty #8
+| Hardware name: linux,dummy-virt (DT)
+| pstate: 20400809 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=-c)
+| pc : test_bti_callee+0x4/0x10
+| lr : test_bti_caller+0x1c/0x28
+| sp : ffff80000800bdf0
+| x29: ffff80000800bdf0 x28: 0000000000000000 x27: 0000000000000000
+| x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000
+| x23: ffff80000a2b8000 x22: 0000000000000000 x21: 0000000000000000
+| x20: ffff8000099fa5b0 x19: ffff800009ff7000 x18: fffffbfffda37000
+| x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000041a90000
+| x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000
+| x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000040000000
+| x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000f83
+| x5 : ffff80000a2b6000 x4 : ffff0000028d0000 x3 : ffff800009f78378
+| x2 : 0000000000000000 x1 : 0000000040210000 x0 : ffff800008025804
+| Call trace:
+|  test_bti_callee+0x4/0x10
+|  smp_cpus_done+0xb0/0xbc
+|  smp_init+0x7c/0x8c
+|  kernel_init_freeable+0x128/0x28c
+|  kernel_init+0x28/0x13c
+|  ret_from_fork+0x10/0x20
+| Code: d50323bf d53cd040 d65f03c0 d503233f (d50323bf)
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Cc: Alexandru Elisei <alexandru.elisei@arm.com>
+Cc: Amit Daniel Kachhap <amit.kachhap@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20220913101732.3925290-6-mark.rutland@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/exception.h |    3 ++-
+ arch/arm64/kernel/entry-common.c   |   14 +++++++++++++-
+ arch/arm64/kernel/traps.c          |   10 +++++++---
+ 3 files changed, 22 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/include/asm/exception.h
++++ b/arch/arm64/include/asm/exception.h
+@@ -34,7 +34,8 @@ void arm64_enter_nmi(struct pt_regs *reg
+ void arm64_exit_nmi(struct pt_regs *regs);
+ void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
+ void do_undefinstr(struct pt_regs *regs, unsigned long esr);
+-void do_bti(struct pt_regs *regs);
++void do_el0_bti(struct pt_regs *regs);
++void do_el1_bti(struct pt_regs *regs, unsigned long esr);
+ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
+ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
+                       struct pt_regs *regs);
+--- a/arch/arm64/kernel/entry-common.c
++++ b/arch/arm64/kernel/entry-common.c
+@@ -141,6 +141,15 @@ static void noinstr el1_undef(struct pt_
+       exit_to_kernel_mode(regs);
+ }
++static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
++{
++      enter_from_kernel_mode(regs);
++      local_daif_inherit(regs);
++      do_el1_bti(regs, esr);
++      local_daif_mask();
++      exit_to_kernel_mode(regs);
++}
++
+ static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
+ {
+       enter_from_kernel_mode(regs);
+@@ -212,6 +221,9 @@ asmlinkage void noinstr el1_sync_handler
+       case ESR_ELx_EC_UNKNOWN:
+               el1_undef(regs, esr);
+               break;
++      case ESR_ELx_EC_BTI:
++              el1_bti(regs, esr);
++              break;
+       case ESR_ELx_EC_BREAKPT_CUR:
+       case ESR_ELx_EC_SOFTSTP_CUR:
+       case ESR_ELx_EC_WATCHPT_CUR:
+@@ -327,7 +339,7 @@ static void noinstr el0_bti(struct pt_re
+ {
+       enter_from_user_mode();
+       local_daif_restore(DAIF_PROCCTX);
+-      do_bti(regs);
++      do_el0_bti(regs);
+ }
+ static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -411,12 +411,16 @@ void do_undefinstr(struct pt_regs *regs,
+ }
+ NOKPROBE_SYMBOL(do_undefinstr);
+-void do_bti(struct pt_regs *regs)
++void do_el0_bti(struct pt_regs *regs)
+ {
+-      BUG_ON(!user_mode(regs));
+       force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+ }
+-NOKPROBE_SYMBOL(do_bti);
++
++void do_el1_bti(struct pt_regs *regs, unsigned long esr)
++{
++      die("Oops - BTI", regs, esr);
++}
++NOKPROBE_SYMBOL(do_el1_bti);
+ void do_el0_fpac(struct pt_regs *regs, unsigned long esr)
+ {
diff --git a/queue-5.10/arm64-rework-el0-mrs-emulation.patch b/queue-5.10/arm64-rework-el0-mrs-emulation.patch
new file mode 100644 (file)
index 0000000..a25b605
--- /dev/null
@@ -0,0 +1,144 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:06:38 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:40 +0000
+Subject: arm64: rework EL0 MRS emulation
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-11-ruanjinjie@huawei.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit f5962add74b61f8ae31c6311f75ca35d7e1d2d8f upstream.
+
+On CPUs without FEAT_IDST, ID register emulation is slower than it needs
+to be, as all threads contend for the same lock to perform the
+emulation. This patch reworks the emulation to avoid this unnecessary
+contention.
+
+On CPUs with FEAT_IDST (which is mandatory from ARMv8.4 onwards), EL0
+accesses to ID registers result in a SYS trap, and emulation of these is
+handled with a sys64_hook. These hooks are statically allocated, and no
+locking is required to iterate through the hooks and perform the
+emulation, allowing emulation to occur in parallel with no contention.
+
+On CPUs without FEAT_IDST, EL0 accesses to ID registers result in an
+UNDEFINED exception, and emulation of these accesses is handled with an
+undef_hook. When an EL0 MRS instruction is trapped to EL1, the kernel
+finds the relevant handler by iterating through all of the undef_hooks,
+requiring undef_lock to be held during this lookup.
+
+This locking is only required to safely traverse the list of undef_hooks
+(as it can be concurrently modified), and the actual emulation of the
+MRS does not require any mutual exclusion. This locking is an
+unfortunate bottleneck, especially given that MRS emulation is enabled
+unconditionally and is never disabled.
+
+This patch reworks the non-FEAT_IDST MRS emulation logic so that it can
+be invoked directly from do_el0_undef(). This removes the bottleneck,
+allowing MRS traps to be handled entirely in parallel, and is a stepping
+stone to making all of the undef_hooks lock-free.
+
+I've tested this in a 64-vCPU VM on a 64-CPU ThunderX2 host, with a
+benchmark which spawns a number of threads which each try to read
+ID_AA64ISAR0_EL1 1000000 times. This is vastly more contention than will
+ever be seen in realistic usage, but clearly demonstrates the removal of
+the bottleneck:
+
+  | Threads || Time (seconds)                       |
+  |         || Before           || After            |
+  |         || Real   | System  || Real   | System  |
+  |---------++--------+---------++--------+---------|
+  |       1 ||   0.29 |    0.20 ||   0.24 |    0.12 |
+  |       2 ||   0.35 |    0.51 ||   0.23 |    0.27 |
+  |       4 ||   1.08 |    3.87 ||   0.24 |    0.56 |
+  |       8 ||   4.31 |   33.60 ||   0.24 |    1.11 |
+  |      16 ||   9.47 |  149.39 ||   0.23 |    2.15 |
+  |      32 ||  19.07 |  605.27 ||   0.24 |    4.38 |
+  |      64 ||  65.40 | 3609.09 ||   0.33 |   11.27 |
+
+Aside from the speedup, there should be no functional change as a result
+of this patch.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Joey Gouly <joey.gouly@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20221019144123.612388-6-mark.rutland@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |    3 ++-
+ arch/arm64/kernel/cpufeature.c      |   23 +++++------------------
+ arch/arm64/kernel/traps.c           |    3 +++
+ 3 files changed, 10 insertions(+), 19 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -759,7 +759,8 @@ static inline bool system_supports_tlb_r
+               cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
+ }
+-extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
++int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
++bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
+ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
+ {
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -2852,35 +2852,22 @@ int do_emulate_mrs(struct pt_regs *regs,
+       return rc;
+ }
+-static int emulate_mrs(struct pt_regs *regs, u32 insn)
++bool try_emulate_mrs(struct pt_regs *regs, u32 insn)
+ {
+       u32 sys_reg, rt;
++      if (compat_user_mode(regs) || !aarch64_insn_is_mrs(insn))
++              return false;
++
+       /*
+        * sys_reg values are defined as used in mrs/msr instruction.
+        * shift the imm value to get the encoding.
+        */
+       sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
+       rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
+-      return do_emulate_mrs(regs, sys_reg, rt);
+-}
+-
+-static struct undef_hook mrs_hook = {
+-      .instr_mask = 0xfff00000,
+-      .instr_val  = 0xd5300000,
+-      .pstate_mask = PSR_AA32_MODE_MASK,
+-      .pstate_val = PSR_MODE_EL0t,
+-      .fn = emulate_mrs,
+-};
+-
+-static int __init enable_mrs_emulation(void)
+-{
+-      register_undef_hook(&mrs_hook);
+-      return 0;
++      return do_emulate_mrs(regs, sys_reg, rt) == 0;
+ }
+-core_initcall(enable_mrs_emulation);
+-
+ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+ {
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -408,6 +408,9 @@ void do_el0_undef(struct pt_regs *regs,
+       if (user_insn_read(regs, &insn))
+               goto out_err;
++      if (try_emulate_mrs(regs, insn))
++              return;
++
+       if (call_undef_hook(regs, insn) == 0)
+               return;
diff --git a/queue-5.10/arm64-rework-fpac-exception-handling.patch b/queue-5.10/arm64-rework-fpac-exception-handling.patch
new file mode 100644 (file)
index 0000000..2f5e721
--- /dev/null
@@ -0,0 +1,171 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:06:31 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:34 +0000
+Subject: arm64: rework FPAC exception handling
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-5-ruanjinjie@huawei.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit a1fafa3b24a70461bbf3e5c0770893feb0a49292 upstream.
+
+If an FPAC exception is taken from EL1, the entry code will call
+do_ptrauth_fault(), where due to:
+
+       BUG_ON(!user_mode(regs))
+
+... the kernel will report a problem within do_ptrauth_fault() rather
+than reporting the original context the FPAC exception was taken from.
+The pt_regs and ESR value reported will be from within
+do_ptrauth_fault() and the code dump will be for the BRK in BUG_ON(),
+which isn't sufficient to debug the cause of the original exception.
+
+This patch makes the reporting better by having separate EL0 and EL1
+FPAC exception handlers, with the latter calling die() directly to
+report the original context the FPAC exception was taken from.
+
+Note that we only need to prevent kprobes of the EL1 FPAC handler, since
+the EL0 FPAC handler cannot be called recursively.
+
+For consistency with do_el0_svc*(), I've named the split functions
+do_el{0,1}_fpac() rather than do_el{0,1}_ptrauth_fault(). I've also
+clarified the comment to not imply there are casues other than FPAC
+exceptions.
+
+Prior to this patch FPAC exceptions are reported as:
+
+| kernel BUG at arch/arm64/kernel/traps.c:517!
+| Internal error: Oops - BUG: 00000000f2000800 [#1] PREEMPT SMP
+| Modules linked in:
+| CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00130-g9c8a180a1cdf-dirty #12
+| Hardware name: FVP Base RevC (DT)
+| pstate: 00400009 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+| pc : do_ptrauth_fault+0x3c/0x40
+| lr : el1_fpac+0x34/0x54
+| sp : ffff80000a3bbc80
+| x29: ffff80000a3bbc80 x28: ffff0008001d8000 x27: 0000000000000000
+| x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000
+| x23: 0000000020400009 x22: ffff800008f70fa4 x21: ffff80000a3bbe00
+| x20: 0000000072000000 x19: ffff80000a3bbcb0 x18: fffffbfffda37000
+| x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000081a90000
+| x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000
+| x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000080000000
+| x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000783
+| x5 : ffff80000a3bbcb0 x4 : ffff0008001d8000 x3 : 0000000072000000
+| x2 : 0000000000000000 x1 : 0000000020400009 x0 : ffff80000a3bbcb0
+| Call trace:
+|  do_ptrauth_fault+0x3c/0x40
+|  el1h_64_sync_handler+0xc4/0xd0
+|  el1h_64_sync+0x64/0x68
+|  test_pac+0x8/0x10
+|  smp_init+0x7c/0x8c
+|  kernel_init_freeable+0x128/0x28c
+|  kernel_init+0x28/0x13c
+|  ret_from_fork+0x10/0x20
+| Code: 97fffe5e a8c17bfd d50323bf d65f03c0 (d4210000)
+
+With this patch applied FPAC exceptions are reported as:
+
+| Internal error: Oops - FPAC: 0000000072000000 [#1] PREEMPT SMP
+| Modules linked in:
+| CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00132-g78846e1c4757-dirty #11
+| Hardware name: FVP Base RevC (DT)
+| pstate: 20400009 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+| pc : test_pac+0x8/0x10
+| lr : 0x0
+| sp : ffff80000a3bbe00
+| x29: ffff80000a3bbe00 x28: 0000000000000000 x27: 0000000000000000
+| x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000
+| x23: ffff80000a2c8000 x22: 0000000000000000 x21: 0000000000000000
+| x20: ffff8000099fa5b0 x19: ffff80000a007000 x18: fffffbfffda37000
+| x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000081a90000
+| x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000
+| x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000080000000
+| x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000783
+| x5 : ffff80000a2c6000 x4 : ffff0008001d8000 x3 : ffff800009f88378
+| x2 : 0000000000000000 x1 : 0000000080210000 x0 : ffff000001a90000
+| Call trace:
+|  test_pac+0x8/0x10
+|  smp_init+0x7c/0x8c
+|  kernel_init_freeable+0x128/0x28c
+|  kernel_init+0x28/0x13c
+|  ret_from_fork+0x10/0x20
+| Code: d50323bf d65f03c0 d503233f aa1f03fe (d50323bf)
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Cc: Alexandru Elisei <alexandru.elisei@arm.com>
+Cc: Amit Daniel Kachhap <amit.kachhap@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20220913101732.3925290-5-mark.rutland@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/exception.h |    3 ++-
+ arch/arm64/kernel/entry-common.c   |    4 ++--
+ arch/arm64/kernel/traps.c          |   16 ++++++++++------
+ 3 files changed, 14 insertions(+), 9 deletions(-)
+
+--- a/arch/arm64/include/asm/exception.h
++++ b/arch/arm64/include/asm/exception.h
+@@ -47,5 +47,6 @@ void bad_el0_sync(struct pt_regs *regs,
+ void do_cp15instr(unsigned int esr, struct pt_regs *regs);
+ void do_el0_svc(struct pt_regs *regs);
+ void do_el0_svc_compat(struct pt_regs *regs);
+-void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
++void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
++void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
+ #endif        /* __ASM_EXCEPTION_H */
+--- a/arch/arm64/kernel/entry-common.c
++++ b/arch/arm64/kernel/entry-common.c
+@@ -187,7 +187,7 @@ static void noinstr el1_fpac(struct pt_r
+ {
+       enter_from_kernel_mode(regs);
+       local_daif_inherit(regs);
+-      do_ptrauth_fault(regs, esr);
++      do_el1_fpac(regs, esr);
+       local_daif_mask();
+       exit_to_kernel_mode(regs);
+ }
+@@ -357,7 +357,7 @@ static void noinstr el0_fpac(struct pt_r
+ {
+       enter_from_user_mode();
+       local_daif_restore(DAIF_PROCCTX);
+-      do_ptrauth_fault(regs, esr);
++      do_el0_fpac(regs, esr);
+ }
+ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -418,16 +418,20 @@ void do_bti(struct pt_regs *regs)
+ }
+ NOKPROBE_SYMBOL(do_bti);
+-void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr)
++void do_el0_fpac(struct pt_regs *regs, unsigned long esr)
++{
++      force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
++}
++
++void do_el1_fpac(struct pt_regs *regs, unsigned long esr)
+ {
+       /*
+-       * Unexpected FPAC exception or pointer authentication failure in
+-       * the kernel: kill the task before it does any more harm.
++       * Unexpected FPAC exception in the kernel: kill the task before it
++       * does any more harm.
+        */
+-      BUG_ON(!user_mode(regs));
+-      force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
++      die("Oops - FPAC", regs, esr);
+ }
+-NOKPROBE_SYMBOL(do_ptrauth_fault);
++NOKPROBE_SYMBOL(do_el1_fpac);
+ #define __user_cache_maint(insn, address, res)                        \
+       if (address >= user_addr_max()) {                       \
diff --git a/queue-5.10/arm64-split-el0-el1-undef-handlers.patch b/queue-5.10/arm64-split-el0-el1-undef-handlers.patch
new file mode 100644 (file)
index 0000000..9c2b1a7
--- /dev/null
@@ -0,0 +1,177 @@
+From ruanjinjie@huawei.com Wed Oct 11 12:06:35 2023
+From: Jinjie Ruan <ruanjinjie@huawei.com>
+Date: Wed, 11 Oct 2023 10:05:37 +0000
+Subject: arm64: split EL0/EL1 UNDEF handlers
+To: <catalin.marinas@arm.com>, <will@kernel.org>, <yuzenghui@huawei.com>, <anshuman.khandual@arm.com>, <gregkh@linuxfoundation.org>, <mark.rutland@arm.com>, <broonie@kernel.org>, <youngmin.nam@samsung.com>, <ardb@kernel.org>, <f.fainelli@gmail.com>, <james.morse@arm.com>, <sashal@kernel.org>, <scott@os.amperecomputing.com>, <ebiederm@xmission.com>, <haibinzhang@tencent.com>, <hewenliang4@huawei.com>, <linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>, <stable@kernel.org>
+Cc: <ruanjinjie@huawei.com>
+Message-ID: <20231011100545.979577-8-ruanjinjie@huawei.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 61d64a376ea80f9097e7ea599bcd68671b836dc6 upstream.
+
+In general, exceptions taken from EL1 need to be handled separately from
+exceptions taken from EL0, as the logic to handle the two cases can be
+significantly divergent, and exceptions taken from EL1 typically have
+more stringent requirements on locking and instrumentation.
+
+Subsequent patches will rework the way EL1 UNDEFs are handled in order
+to address longstanding soundness issues with instrumentation and RCU.
+In preparation for that rework, this patch splits the existing
+do_undefinstr() handler into separate do_el0_undef() and do_el1_undef()
+handlers.
+
+Prior to this patch, do_undefinstr() was marked with NOKPROBE_SYMBOL(),
+preventing instrumentation via kprobes. However, do_undefinstr() invokes
+other code which can be instrumented, and:
+
+* For UNDEFINED exceptions taken from EL0, there is no risk of recursion
+  within kprobes. Therefore it is safe for do_el0_undef to be
+  instrumented with kprobes, and it does not need to be marked with
+  NOKPROBE_SYMBOL().
+
+* For UNDEFINED exceptions taken from EL1, either:
+
+  (a) The exception is has been taken when manipulating SSBS; these cases
+      are limited and do not occur within code that can be invoked
+      recursively via kprobes. Hence, in these cases instrumentation
+      with kprobes is benign.
+
+  (b) The exception has been taken for an unknown reason, as other than
+      manipulating SSBS we do not expect to take UNDEFINED exceptions
+      from EL1. Any handling of these exception is best-effort.
+
+  ... and in either case, marking do_el1_undef() with NOKPROBE_SYMBOL()
+  isn't sufficient to prevent recursion via kprobes as functions it
+  calls (including die()) are instrumentable via kprobes.
+
+  Hence, it's not worthwhile to mark do_el1_undef() with
+  NOKPROBE_SYMBOL(). The same applies to do_el1_bti() and do_el1_fpac(),
+  so their NOKPROBE_SYMBOL() annotations are also removed.
+
+Aside from the new instrumentability, there should be no functional
+change as a result of this patch.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Joey Gouly <joey.gouly@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20221019144123.612388-3-mark.rutland@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/exception.h |    3 ++-
+ arch/arm64/kernel/entry-common.c   |    4 ++--
+ arch/arm64/kernel/traps.c          |   22 ++++++++++++----------
+ 3 files changed, 16 insertions(+), 13 deletions(-)
+
+--- a/arch/arm64/include/asm/exception.h
++++ b/arch/arm64/include/asm/exception.h
+@@ -33,7 +33,8 @@ asmlinkage void exit_to_user_mode(void);
+ void arm64_enter_nmi(struct pt_regs *regs);
+ void arm64_exit_nmi(struct pt_regs *regs);
+ void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
+-void do_undefinstr(struct pt_regs *regs, unsigned long esr);
++void do_el0_undef(struct pt_regs *regs, unsigned long esr);
++void do_el1_undef(struct pt_regs *regs, unsigned long esr);
+ void do_el0_bti(struct pt_regs *regs);
+ void do_el1_bti(struct pt_regs *regs, unsigned long esr);
+ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
+--- a/arch/arm64/kernel/entry-common.c
++++ b/arch/arm64/kernel/entry-common.c
+@@ -136,7 +136,7 @@ static void noinstr el1_undef(struct pt_
+ {
+       enter_from_kernel_mode(regs);
+       local_daif_inherit(regs);
+-      do_undefinstr(regs, esr);
++      do_el1_undef(regs, esr);
+       local_daif_mask();
+       exit_to_kernel_mode(regs);
+ }
+@@ -332,7 +332,7 @@ static void noinstr el0_undef(struct pt_
+ {
+       enter_from_user_mode();
+       local_daif_restore(DAIF_PROCCTX);
+-      do_undefinstr(regs, esr);
++      do_el0_undef(regs, esr);
+ }
+ static void noinstr el0_bti(struct pt_regs *regs)
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -395,7 +395,7 @@ void arm64_notify_segfault(unsigned long
+       force_signal_inject(SIGSEGV, code, addr, 0);
+ }
+-void do_undefinstr(struct pt_regs *regs, unsigned long esr)
++void do_el0_undef(struct pt_regs *regs, unsigned long esr)
+ {
+       /* check for AArch32 breakpoint instructions */
+       if (!aarch32_break_handler(regs))
+@@ -404,12 +404,16 @@ void do_undefinstr(struct pt_regs *regs,
+       if (call_undef_hook(regs) == 0)
+               return;
+-      if (!user_mode(regs))
+-              die("Oops - Undefined instruction", regs, esr);
+-
+       force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+ }
+-NOKPROBE_SYMBOL(do_undefinstr);
++
++void do_el1_undef(struct pt_regs *regs, unsigned long esr)
++{
++      if (call_undef_hook(regs) == 0)
++              return;
++
++      die("Oops - Undefined instruction", regs, esr);
++}
+ void do_el0_bti(struct pt_regs *regs)
+ {
+@@ -420,7 +424,6 @@ void do_el1_bti(struct pt_regs *regs, un
+ {
+       die("Oops - BTI", regs, esr);
+ }
+-NOKPROBE_SYMBOL(do_el1_bti);
+ void do_el0_fpac(struct pt_regs *regs, unsigned long esr)
+ {
+@@ -435,7 +438,6 @@ void do_el1_fpac(struct pt_regs *regs, u
+        */
+       die("Oops - FPAC", regs, esr);
+ }
+-NOKPROBE_SYMBOL(do_el1_fpac);
+ #define __user_cache_maint(insn, address, res)                        \
+       if (address >= user_addr_max()) {                       \
+@@ -671,7 +673,7 @@ void do_el0_cp15(unsigned long esr, stru
+               hook_base = cp15_64_hooks;
+               break;
+       default:
+-              do_undefinstr(regs, esr);
++              do_el0_undef(regs, esr);
+               return;
+       }
+@@ -686,7 +688,7 @@ void do_el0_cp15(unsigned long esr, stru
+        * EL0. Fall back to our usual undefined instruction handler
+        * so that we handle these consistently.
+        */
+-      do_undefinstr(regs, esr);
++      do_el0_undef(regs, esr);
+ }
+ #endif
+@@ -705,7 +707,7 @@ void do_el0_sys(unsigned long esr, struc
+        * back to our usual undefined instruction handler so that we handle
+        * these consistently.
+        */
+-      do_undefinstr(regs, esr);
++      do_el0_undef(regs, esr);
+ }
+ static const char *esr_class_str[] = {
index dd1d6802960777a0091ab4583ac8d1e10adbb163..737a62b552e807f056a158b9d4c73b40ee60289d 100644 (file)
@@ -63,3 +63,18 @@ dmaengine-mediatek-fix-deadlock-caused-by-synchroniz.patch
 powerpc-8xx-fix-pte_access_permitted-for-page_none.patch
 powerpc-64e-fix-wrong-test-in-__ptep_test_and_clear_.patch
 x86-alternatives-disable-kasan-in-apply_alternatives.patch
+arm64-report-el1-undefs-better.patch
+arm64-die-pass-err-as-long.patch
+arm64-consistently-pass-esr_elx-to-die.patch
+arm64-rework-fpac-exception-handling.patch
+arm64-rework-bti-exception-handling.patch
+arm64-allow-kprobes-on-el0-handlers.patch
+arm64-split-el0-el1-undef-handlers.patch
+arm64-factor-out-el1-ssbs-emulation-hook.patch
+arm64-factor-insn-read-out-of-call_undef_hook.patch
+arm64-rework-el0-mrs-emulation.patch
+arm64-armv8_deprecated-fold-ops-into-insn_emulation.patch
+arm64-armv8_deprecated-move-emulation-functions.patch
+arm64-armv8_deprecated-move-aarch32-helper-earlier.patch
+arm64-armv8_deprecated-rework-deprected-instruction-handling.patch
+arm64-armv8_deprecated-fix-unused-function-error.patch