]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Mar 2022 19:34:53 +0000 (20:34 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Mar 2022 19:34:53 +0000 (20:34 +0100)
added patches:
arm-early-traps-initialisation.patch
arm-include-unprivileged-bpf-status-in-spectre-v2-reporting.patch
arm-report-spectre-v2-status-through-sysfs.patch
arm-spectre-bhb-workaround.patch
arm-use-loadaddr-to-get-load-address-of-sections.patch

queue-5.15/arm-early-traps-initialisation.patch [new file with mode: 0644]
queue-5.15/arm-include-unprivileged-bpf-status-in-spectre-v2-reporting.patch [new file with mode: 0644]
queue-5.15/arm-report-spectre-v2-status-through-sysfs.patch [new file with mode: 0644]
queue-5.15/arm-spectre-bhb-workaround.patch [new file with mode: 0644]
queue-5.15/arm-use-loadaddr-to-get-load-address-of-sections.patch [new file with mode: 0644]
queue-5.15/series

diff --git a/queue-5.15/arm-early-traps-initialisation.patch b/queue-5.15/arm-early-traps-initialisation.patch
new file mode 100644 (file)
index 0000000..276aca7
--- /dev/null
@@ -0,0 +1,71 @@
+From foo@baz Tue Mar  8 08:32:37 PM CET 2022
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Fri, 11 Feb 2022 19:46:15 +0000
+Subject: ARM: early traps initialisation
+
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+
+commit 04e91b7324760a377a725e218b5ee783826d30f5 upstream.
+
+Provide a couple of helpers to copy the vectors and stubs, and also
+to flush the copied vectors and stubs.
+
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/traps.c |   27 +++++++++++++++++++++------
+ 1 file changed, 21 insertions(+), 6 deletions(-)
+
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -787,10 +787,22 @@ static inline void __init kuser_init(voi
+ }
+ #endif
++#ifndef CONFIG_CPU_V7M
++static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
++{
++      memcpy(vma, lma_start, lma_end - lma_start);
++}
++
++static void flush_vectors(void *vma, size_t offset, size_t size)
++{
++      unsigned long start = (unsigned long)vma + offset;
++      unsigned long end = start + size;
++
++      flush_icache_range(start, end);
++}
++
+ void __init early_trap_init(void *vectors_base)
+ {
+-#ifndef CONFIG_CPU_V7M
+-      unsigned long vectors = (unsigned long)vectors_base;
+       extern char __stubs_start[], __stubs_end[];
+       extern char __vectors_start[], __vectors_end[];
+       unsigned i;
+@@ -811,17 +823,20 @@ void __init early_trap_init(void *vector
+        * into the vector page, mapped at 0xffff0000, and ensure these
+        * are visible to the instruction stream.
+        */
+-      memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
+-      memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
++      copy_from_lma(vectors_base, __vectors_start, __vectors_end);
++      copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
+       kuser_init(vectors_base);
+-      flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
++      flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
++}
+ #else /* ifndef CONFIG_CPU_V7M */
++void __init early_trap_init(void *vectors_base)
++{
+       /*
+        * on V7-M there is no need to copy the vector table to a dedicated
+        * memory area. The address is configurable and so a table in the kernel
+        * image can be used.
+        */
+-#endif
+ }
++#endif
diff --git a/queue-5.15/arm-include-unprivileged-bpf-status-in-spectre-v2-reporting.patch b/queue-5.15/arm-include-unprivileged-bpf-status-in-spectre-v2-reporting.patch
new file mode 100644 (file)
index 0000000..0c7f2c7
--- /dev/null
@@ -0,0 +1,54 @@
+From 25875aa71dfefd1959f07e626c4d285b88b27ac2 Mon Sep 17 00:00:00 2001
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Mon, 7 Mar 2022 19:28:32 +0000
+Subject: ARM: include unprivileged BPF status in Spectre V2 reporting
+
+From: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+
+commit 25875aa71dfefd1959f07e626c4d285b88b27ac2 upstream.
+
+The mitigations for Spectre-BHB are only applied when an exception
+is taken, but when unprivileged BPF is enabled, userspace can
+load BPF programs that can be used to exploit the problem.
+
+When unprivileged BPF is enabled, report the vulnerable status via
+the spectre_v2 sysfs file.
+
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/spectre.c |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/arm/kernel/spectre.c
++++ b/arch/arm/kernel/spectre.c
+@@ -1,9 +1,19 @@
+ // SPDX-License-Identifier: GPL-2.0-only
++#include <linux/bpf.h>
+ #include <linux/cpu.h>
+ #include <linux/device.h>
+ #include <asm/spectre.h>
++static bool _unprivileged_ebpf_enabled(void)
++{
++#ifdef CONFIG_BPF_SYSCALL
++      return !sysctl_unprivileged_bpf_disabled;
++#else
++      return false
++#endif
++}
++
+ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+ {
+@@ -31,6 +41,9 @@ ssize_t cpu_show_spectre_v2(struct devic
+       if (spectre_v2_state != SPECTRE_MITIGATED)
+               return sprintf(buf, "%s\n", "Vulnerable");
++      if (_unprivileged_ebpf_enabled())
++              return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
++
+       switch (spectre_v2_methods) {
+       case SPECTRE_V2_METHOD_BPIALL:
+               method = "Branch predictor hardening";
diff --git a/queue-5.15/arm-report-spectre-v2-status-through-sysfs.patch b/queue-5.15/arm-report-spectre-v2-status-through-sysfs.patch
new file mode 100644 (file)
index 0000000..6f8c059
--- /dev/null
@@ -0,0 +1,345 @@
+From foo@baz Tue Mar  8 08:32:37 PM CET 2022
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Fri, 11 Feb 2022 16:45:54 +0000
+Subject: ARM: report Spectre v2 status through sysfs
+
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+
+commit 9dd78194a3722fa6712192cdd4f7032d45112a9a upstream.
+
+As per other architectures, add support for reporting the Spectre
+vulnerability status via sysfs CPU.
+
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/spectre.h |   28 ++++++++
+ arch/arm/kernel/Makefile       |    2 
+ arch/arm/kernel/spectre.c      |   54 +++++++++++++++
+ arch/arm/mm/Kconfig            |    1 
+ arch/arm/mm/proc-v7-bugs.c     |  141 +++++++++++++++++++++++++++++------------
+ 5 files changed, 187 insertions(+), 39 deletions(-)
+ create mode 100644 arch/arm/include/asm/spectre.h
+ create mode 100644 arch/arm/kernel/spectre.c
+
+--- /dev/null
++++ b/arch/arm/include/asm/spectre.h
+@@ -0,0 +1,28 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++#ifndef __ASM_SPECTRE_H
++#define __ASM_SPECTRE_H
++
++enum {
++      SPECTRE_UNAFFECTED,
++      SPECTRE_MITIGATED,
++      SPECTRE_VULNERABLE,
++};
++
++enum {
++      __SPECTRE_V2_METHOD_BPIALL,
++      __SPECTRE_V2_METHOD_ICIALLU,
++      __SPECTRE_V2_METHOD_SMC,
++      __SPECTRE_V2_METHOD_HVC,
++};
++
++enum {
++      SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL),
++      SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
++      SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
++      SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
++};
++
++void spectre_v2_update_state(unsigned int state, unsigned int methods);
++
++#endif
+--- a/arch/arm/kernel/Makefile
++++ b/arch/arm/kernel/Makefile
+@@ -106,4 +106,6 @@ endif
+ obj-$(CONFIG_HAVE_ARM_SMCCC)  += smccc-call.o
++obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o
++
+ extra-y := $(head-y) vmlinux.lds
+--- /dev/null
++++ b/arch/arm/kernel/spectre.c
+@@ -0,0 +1,54 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#include <linux/cpu.h>
++#include <linux/device.h>
++
++#include <asm/spectre.h>
++
++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
++                          char *buf)
++{
++      return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++}
++
++static unsigned int spectre_v2_state;
++static unsigned int spectre_v2_methods;
++
++void spectre_v2_update_state(unsigned int state, unsigned int method)
++{
++      if (state > spectre_v2_state)
++              spectre_v2_state = state;
++      spectre_v2_methods |= method;
++}
++
++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
++                          char *buf)
++{
++      const char *method;
++
++      if (spectre_v2_state == SPECTRE_UNAFFECTED)
++              return sprintf(buf, "%s\n", "Not affected");
++
++      if (spectre_v2_state != SPECTRE_MITIGATED)
++              return sprintf(buf, "%s\n", "Vulnerable");
++
++      switch (spectre_v2_methods) {
++      case SPECTRE_V2_METHOD_BPIALL:
++              method = "Branch predictor hardening";
++              break;
++
++      case SPECTRE_V2_METHOD_ICIALLU:
++              method = "I-cache invalidation";
++              break;
++
++      case SPECTRE_V2_METHOD_SMC:
++      case SPECTRE_V2_METHOD_HVC:
++              method = "Firmware call";
++              break;
++
++      default:
++              method = "Multiple mitigations";
++              break;
++      }
++
++      return sprintf(buf, "Mitigation: %s\n", method);
++}
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -830,6 +830,7 @@ config CPU_BPREDICT_DISABLE
+ config CPU_SPECTRE
+       bool
++      select GENERIC_CPU_VULNERABILITIES
+ config HARDEN_BRANCH_PREDICTOR
+       bool "Harden the branch predictor against aliasing attacks" if EXPERT
+--- a/arch/arm/mm/proc-v7-bugs.c
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -6,8 +6,35 @@
+ #include <asm/cp15.h>
+ #include <asm/cputype.h>
+ #include <asm/proc-fns.h>
++#include <asm/spectre.h>
+ #include <asm/system_misc.h>
++#ifdef CONFIG_ARM_PSCI
++static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
++{
++      struct arm_smccc_res res;
++
++      arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++                           ARM_SMCCC_ARCH_WORKAROUND_1, &res);
++
++      switch ((int)res.a0) {
++      case SMCCC_RET_SUCCESS:
++              return SPECTRE_MITIGATED;
++
++      case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
++              return SPECTRE_UNAFFECTED;
++
++      default:
++              return SPECTRE_VULNERABLE;
++      }
++}
++#else
++static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
++{
++      return SPECTRE_VULNERABLE;
++}
++#endif
++
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
+@@ -36,13 +63,60 @@ static void __maybe_unused call_hvc_arch
+       arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+ }
+-static void cpu_v7_spectre_init(void)
++static unsigned int spectre_v2_install_workaround(unsigned int method)
+ {
+       const char *spectre_v2_method = NULL;
+       int cpu = smp_processor_id();
+       if (per_cpu(harden_branch_predictor_fn, cpu))
+-              return;
++              return SPECTRE_MITIGATED;
++
++      switch (method) {
++      case SPECTRE_V2_METHOD_BPIALL:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      harden_branch_predictor_bpiall;
++              spectre_v2_method = "BPIALL";
++              break;
++
++      case SPECTRE_V2_METHOD_ICIALLU:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      harden_branch_predictor_iciallu;
++              spectre_v2_method = "ICIALLU";
++              break;
++
++      case SPECTRE_V2_METHOD_HVC:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      call_hvc_arch_workaround_1;
++              cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
++              spectre_v2_method = "hypervisor";
++              break;
++
++      case SPECTRE_V2_METHOD_SMC:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      call_smc_arch_workaround_1;
++              cpu_do_switch_mm = cpu_v7_smc_switch_mm;
++              spectre_v2_method = "firmware";
++              break;
++      }
++
++      if (spectre_v2_method)
++              pr_info("CPU%u: Spectre v2: using %s workaround\n",
++                      smp_processor_id(), spectre_v2_method);
++
++      return SPECTRE_MITIGATED;
++}
++#else
++static unsigned int spectre_v2_install_workaround(unsigned int method)
++{
++      pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n");
++
++      return SPECTRE_VULNERABLE;
++}
++#endif
++
++static void cpu_v7_spectre_v2_init(void)
++{
++      unsigned int state, method = 0;
+       switch (read_cpuid_part()) {
+       case ARM_CPU_PART_CORTEX_A8:
+@@ -51,68 +125,57 @@ static void cpu_v7_spectre_init(void)
+       case ARM_CPU_PART_CORTEX_A17:
+       case ARM_CPU_PART_CORTEX_A73:
+       case ARM_CPU_PART_CORTEX_A75:
+-              per_cpu(harden_branch_predictor_fn, cpu) =
+-                      harden_branch_predictor_bpiall;
+-              spectre_v2_method = "BPIALL";
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_BPIALL;
+               break;
+       case ARM_CPU_PART_CORTEX_A15:
+       case ARM_CPU_PART_BRAHMA_B15:
+-              per_cpu(harden_branch_predictor_fn, cpu) =
+-                      harden_branch_predictor_iciallu;
+-              spectre_v2_method = "ICIALLU";
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_ICIALLU;
+               break;
+-#ifdef CONFIG_ARM_PSCI
+       case ARM_CPU_PART_BRAHMA_B53:
+               /* Requires no workaround */
++              state = SPECTRE_UNAFFECTED;
+               break;
++
+       default:
+               /* Other ARM CPUs require no workaround */
+-              if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
++              if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
++                      state = SPECTRE_UNAFFECTED;
+                       break;
++              }
++
+               fallthrough;
+-              /* Cortex A57/A72 require firmware workaround */
+-      case ARM_CPU_PART_CORTEX_A57:
+-      case ARM_CPU_PART_CORTEX_A72: {
+-              struct arm_smccc_res res;
+-              arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+-                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+-              if ((int)res.a0 != 0)
+-                      return;
++      /* Cortex A57/A72 require firmware workaround */
++      case ARM_CPU_PART_CORTEX_A57:
++      case ARM_CPU_PART_CORTEX_A72:
++              state = spectre_v2_get_cpu_fw_mitigation_state();
++              if (state != SPECTRE_MITIGATED)
++                      break;
+               switch (arm_smccc_1_1_get_conduit()) {
+               case SMCCC_CONDUIT_HVC:
+-                      per_cpu(harden_branch_predictor_fn, cpu) =
+-                              call_hvc_arch_workaround_1;
+-                      cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
+-                      spectre_v2_method = "hypervisor";
++                      method = SPECTRE_V2_METHOD_HVC;
+                       break;
+               case SMCCC_CONDUIT_SMC:
+-                      per_cpu(harden_branch_predictor_fn, cpu) =
+-                              call_smc_arch_workaround_1;
+-                      cpu_do_switch_mm = cpu_v7_smc_switch_mm;
+-                      spectre_v2_method = "firmware";
++                      method = SPECTRE_V2_METHOD_SMC;
+                       break;
+               default:
++                      state = SPECTRE_VULNERABLE;
+                       break;
+               }
+       }
+-#endif
+-      }
+-      if (spectre_v2_method)
+-              pr_info("CPU%u: Spectre v2: using %s workaround\n",
+-                      smp_processor_id(), spectre_v2_method);
+-}
+-#else
+-static void cpu_v7_spectre_init(void)
+-{
++      if (state == SPECTRE_MITIGATED)
++              state = spectre_v2_install_workaround(method);
++
++      spectre_v2_update_state(state, method);
+ }
+-#endif
+ static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
+                                                 u32 mask, const char *msg)
+@@ -142,16 +205,16 @@ static bool check_spectre_auxcr(bool *wa
+ void cpu_v7_ca8_ibe(void)
+ {
+       if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
+-              cpu_v7_spectre_init();
++              cpu_v7_spectre_v2_init();
+ }
+ void cpu_v7_ca15_ibe(void)
+ {
+       if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
+-              cpu_v7_spectre_init();
++              cpu_v7_spectre_v2_init();
+ }
+ void cpu_v7_bugs_init(void)
+ {
+-      cpu_v7_spectre_init();
++      cpu_v7_spectre_v2_init();
+ }
diff --git a/queue-5.15/arm-spectre-bhb-workaround.patch b/queue-5.15/arm-spectre-bhb-workaround.patch
new file mode 100644 (file)
index 0000000..1a209f8
--- /dev/null
@@ -0,0 +1,433 @@
+From foo@baz Tue Mar  8 08:32:37 PM CET 2022
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Thu, 10 Feb 2022 16:05:45 +0000
+Subject: ARM: Spectre-BHB workaround
+
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+
+comomit b9baf5c8c5c356757f4f9d8180b5e9d234065bc3 upstream.
+
+Workaround the Spectre BHB issues for Cortex-A15, Cortex-A57,
+Cortex-A72, Cortex-A73 and Cortex-A75. We also include Brahma B15 as
+well to be safe, which is affected by Spectre V2 in the same ways as
+Cortex-A15.
+
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+[changes due to lack of SYSTEM_FREEING_INITMEM - gregkh]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/assembler.h   |   10 ++++
+ arch/arm/include/asm/spectre.h     |    4 +
+ arch/arm/include/asm/vmlinux.lds.h |   18 +++++++-
+ arch/arm/kernel/entry-armv.S       |   79 ++++++++++++++++++++++++++++++++++---
+ arch/arm/kernel/entry-common.S     |   24 +++++++++++
+ arch/arm/kernel/spectre.c          |    4 +
+ arch/arm/kernel/traps.c            |   38 +++++++++++++++++
+ arch/arm/mm/Kconfig                |   10 ++++
+ arch/arm/mm/proc-v7-bugs.c         |   76 +++++++++++++++++++++++++++++++++++
+ 9 files changed, 254 insertions(+), 9 deletions(-)
+
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -107,6 +107,16 @@
+       .endm
+ #endif
++#if __LINUX_ARM_ARCH__ < 7
++      .macro  dsb, args
++      mcr     p15, 0, r0, c7, c10, 4
++      .endm
++
++      .macro  isb, args
++      mcr     p15, 0, r0, c7, r5, 4
++      .endm
++#endif
++
+       .macro asm_trace_hardirqs_off, save=1
+ #if defined(CONFIG_TRACE_IRQFLAGS)
+       .if \save
+--- a/arch/arm/include/asm/spectre.h
++++ b/arch/arm/include/asm/spectre.h
+@@ -14,6 +14,7 @@ enum {
+       __SPECTRE_V2_METHOD_ICIALLU,
+       __SPECTRE_V2_METHOD_SMC,
+       __SPECTRE_V2_METHOD_HVC,
++      __SPECTRE_V2_METHOD_LOOP8,
+ };
+ enum {
+@@ -21,8 +22,11 @@ enum {
+       SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
+       SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
+       SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
++      SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
+ };
+ void spectre_v2_update_state(unsigned int state, unsigned int methods);
++int spectre_bhb_update_vectors(unsigned int method);
++
+ #endif
+--- a/arch/arm/include/asm/vmlinux.lds.h
++++ b/arch/arm/include/asm/vmlinux.lds.h
+@@ -116,11 +116,23 @@
+  */
+ #define ARM_VECTORS                                                   \
+       __vectors_lma = .;                                              \
+-      .vectors 0xffff0000 : AT(__vectors_start) {                     \
+-              *(.vectors)                                             \
++      OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) {            \
++              .vectors {                                              \
++                      *(.vectors)                                     \
++              }                                                       \
++              .vectors.bhb.loop8 {                                    \
++                      *(.vectors.bhb.loop8)                           \
++              }                                                       \
++              .vectors.bhb.bpiall {                                   \
++                      *(.vectors.bhb.bpiall)                          \
++              }                                                       \
+       }                                                               \
+       ARM_LMA(__vectors, .vectors);                                   \
+-      . = __vectors_lma + SIZEOF(.vectors);                           \
++      ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8);               \
++      ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall);             \
++      . = __vectors_lma + SIZEOF(.vectors) +                          \
++              SIZEOF(.vectors.bhb.loop8) +                            \
++              SIZEOF(.vectors.bhb.bpiall);                            \
+                                                                       \
+       __stubs_lma = .;                                                \
+       .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) {              \
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -1000,12 +1000,11 @@ vector_\name:
+       sub     lr, lr, #\correction
+       .endif
+-      @
+-      @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
+-      @ (parent CPSR)
+-      @
++      @ Save r0, lr_<exception> (parent PC)
+       stmia   sp, {r0, lr}            @ save r0, lr
+-      mrs     lr, spsr
++
++      @ Save spsr_<exception> (parent CPSR)
++2:    mrs     lr, spsr
+       str     lr, [sp, #8]            @ save spsr
+       @
+@@ -1026,6 +1025,44 @@ vector_\name:
+       movs    pc, lr                  @ branch to handler in SVC mode
+ ENDPROC(vector_\name)
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++      .subsection 1
++      .align 5
++vector_bhb_loop8_\name:
++      .if \correction
++      sub     lr, lr, #\correction
++      .endif
++
++      @ Save r0, lr_<exception> (parent PC)
++      stmia   sp, {r0, lr}
++
++      @ bhb workaround
++      mov     r0, #8
++1:    b       . + 4
++      subs    r0, r0, #1
++      bne     1b
++      dsb
++      isb
++      b       2b
++ENDPROC(vector_bhb_loop8_\name)
++
++vector_bhb_bpiall_\name:
++      .if \correction
++      sub     lr, lr, #\correction
++      .endif
++
++      @ Save r0, lr_<exception> (parent PC)
++      stmia   sp, {r0, lr}
++
++      @ bhb workaround
++      mcr     p15, 0, r0, c7, c5, 6   @ BPIALL
++      @ isb not needed due to "movs pc, lr" in the vector stub
++      @ which gives a "context synchronisation".
++      b       2b
++ENDPROC(vector_bhb_bpiall_\name)
++      .previous
++#endif
++
+       .align  2
+       @ handler addresses follow this label
+ 1:
+@@ -1034,6 +1071,10 @@ ENDPROC(vector_\name)
+       .section .stubs, "ax", %progbits
+       @ This must be the first word
+       .word   vector_swi
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++      .word   vector_bhb_loop8_swi
++      .word   vector_bhb_bpiall_swi
++#endif
+ vector_rst:
+  ARM( swi     SYS_ERROR0      )
+@@ -1148,8 +1189,10 @@ vector_addrexcptn:
+  * FIQ "NMI" handler
+  *-----------------------------------------------------------------------------
+  * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
+- * systems.
++ * systems. This must be the last vector stub, so lets place it in its own
++ * subsection.
+  */
++      .subsection 2
+       vector_stub     fiq, FIQ_MODE, 4
+       .long   __fiq_usr                       @  0  (USR_26 / USR_32)
+@@ -1182,6 +1225,30 @@ vector_addrexcptn:
+       W(b)    vector_irq
+       W(b)    vector_fiq
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++      .section .vectors.bhb.loop8, "ax", %progbits
++.L__vectors_bhb_loop8_start:
++      W(b)    vector_rst
++      W(b)    vector_bhb_loop8_und
++      W(ldr)  pc, .L__vectors_bhb_loop8_start + 0x1004
++      W(b)    vector_bhb_loop8_pabt
++      W(b)    vector_bhb_loop8_dabt
++      W(b)    vector_addrexcptn
++      W(b)    vector_bhb_loop8_irq
++      W(b)    vector_bhb_loop8_fiq
++
++      .section .vectors.bhb.bpiall, "ax", %progbits
++.L__vectors_bhb_bpiall_start:
++      W(b)    vector_rst
++      W(b)    vector_bhb_bpiall_und
++      W(ldr)  pc, .L__vectors_bhb_bpiall_start + 0x1008
++      W(b)    vector_bhb_bpiall_pabt
++      W(b)    vector_bhb_bpiall_dabt
++      W(b)    vector_addrexcptn
++      W(b)    vector_bhb_bpiall_irq
++      W(b)    vector_bhb_bpiall_fiq
++#endif
++
+       .data
+       .align  2
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -154,12 +154,36 @@ ENDPROC(ret_from_fork)
+  */
+       .align  5
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++ENTRY(vector_bhb_loop8_swi)
++      sub     sp, sp, #PT_REGS_SIZE
++      stmia   sp, {r0 - r12}
++      mov     r8, #8
++1:    b       2f
++2:    subs    r8, r8, #1
++      bne     1b
++      dsb
++      isb
++      b       3f
++ENDPROC(vector_bhb_loop8_swi)
++
++      .align  5
++ENTRY(vector_bhb_bpiall_swi)
++      sub     sp, sp, #PT_REGS_SIZE
++      stmia   sp, {r0 - r12}
++      mcr     p15, 0, r8, c7, c5, 6   @ BPIALL
++      isb
++      b       3f
++ENDPROC(vector_bhb_bpiall_swi)
++#endif
++      .align  5
+ ENTRY(vector_swi)
+ #ifdef CONFIG_CPU_V7M
+       v7m_exception_entry
+ #else
+       sub     sp, sp, #PT_REGS_SIZE
+       stmia   sp, {r0 - r12}                  @ Calling r0 - r12
++3:
+  ARM( add     r8, sp, #S_PC           )
+  ARM( stmdb   r8, {sp, lr}^           )       @ Calling sp, lr
+  THUMB(       mov     r8, sp                  )
+--- a/arch/arm/kernel/spectre.c
++++ b/arch/arm/kernel/spectre.c
+@@ -45,6 +45,10 @@ ssize_t cpu_show_spectre_v2(struct devic
+               method = "Firmware call";
+               break;
++      case SPECTRE_V2_METHOD_LOOP8:
++              method = "History overwrite";
++              break;
++
+       default:
+               method = "Multiple mitigations";
+               break;
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -30,6 +30,7 @@
+ #include <linux/atomic.h>
+ #include <asm/cacheflush.h>
+ #include <asm/exception.h>
++#include <asm/spectre.h>
+ #include <asm/unistd.h>
+ #include <asm/traps.h>
+ #include <asm/ptrace.h>
+@@ -801,6 +802,43 @@ static void flush_vectors(void *vma, siz
+       flush_icache_range(start, end);
+ }
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++int spectre_bhb_update_vectors(unsigned int method)
++{
++      extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
++      extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
++      void *vec_start, *vec_end;
++
++      if (system_state > SYSTEM_SCHEDULING) {
++              pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
++                     smp_processor_id());
++              return SPECTRE_VULNERABLE;
++      }
++
++      switch (method) {
++      case SPECTRE_V2_METHOD_LOOP8:
++              vec_start = __vectors_bhb_loop8_start;
++              vec_end = __vectors_bhb_loop8_end;
++              break;
++
++      case SPECTRE_V2_METHOD_BPIALL:
++              vec_start = __vectors_bhb_bpiall_start;
++              vec_end = __vectors_bhb_bpiall_end;
++              break;
++
++      default:
++              pr_err("CPU%u: unknown Spectre BHB state %d\n",
++                     smp_processor_id(), method);
++              return SPECTRE_VULNERABLE;
++      }
++
++      copy_from_lma(vectors_page, vec_start, vec_end);
++      flush_vectors(vectors_page, 0, vec_end - vec_start);
++
++      return SPECTRE_MITIGATED;
++}
++#endif
++
+ void __init early_trap_init(void *vectors_base)
+ {
+       extern char __stubs_start[], __stubs_end[];
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -851,6 +851,16 @@ config HARDEN_BRANCH_PREDICTOR
+          If unsure, say Y.
++config HARDEN_BRANCH_HISTORY
++      bool "Harden Spectre style attacks against branch history" if EXPERT
++      depends on CPU_SPECTRE
++      default y
++      help
++        Speculation attacks against some high-performance processors can
++        make use of branch history to influence future speculation. When
++        taking an exception, a sequence of branches overwrites the branch
++        history, or branch history is invalidated.
++
+ config TLS_REG_EMUL
+       bool
+       select NEED_KUSER_HELPERS
+--- a/arch/arm/mm/proc-v7-bugs.c
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -177,6 +177,81 @@ static void cpu_v7_spectre_v2_init(void)
+       spectre_v2_update_state(state, method);
+ }
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++static int spectre_bhb_method;
++
++static const char *spectre_bhb_method_name(int method)
++{
++      switch (method) {
++      case SPECTRE_V2_METHOD_LOOP8:
++              return "loop";
++
++      case SPECTRE_V2_METHOD_BPIALL:
++              return "BPIALL";
++
++      default:
++              return "unknown";
++      }
++}
++
++static int spectre_bhb_install_workaround(int method)
++{
++      if (spectre_bhb_method != method) {
++              if (spectre_bhb_method) {
++                      pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
++                             smp_processor_id());
++
++                      return SPECTRE_VULNERABLE;
++              }
++
++              if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
++                      return SPECTRE_VULNERABLE;
++
++              spectre_bhb_method = method;
++      }
++
++      pr_info("CPU%u: Spectre BHB: using %s workaround\n",
++              smp_processor_id(), spectre_bhb_method_name(method));
++
++      return SPECTRE_MITIGATED;
++}
++#else
++static int spectre_bhb_install_workaround(int method)
++{
++      return SPECTRE_VULNERABLE;
++}
++#endif
++
++static void cpu_v7_spectre_bhb_init(void)
++{
++      unsigned int state, method = 0;
++
++      switch (read_cpuid_part()) {
++      case ARM_CPU_PART_CORTEX_A15:
++      case ARM_CPU_PART_BRAHMA_B15:
++      case ARM_CPU_PART_CORTEX_A57:
++      case ARM_CPU_PART_CORTEX_A72:
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_LOOP8;
++              break;
++
++      case ARM_CPU_PART_CORTEX_A73:
++      case ARM_CPU_PART_CORTEX_A75:
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_BPIALL;
++              break;
++
++      default:
++              state = SPECTRE_UNAFFECTED;
++              break;
++      }
++
++      if (state == SPECTRE_MITIGATED)
++              state = spectre_bhb_install_workaround(method);
++
++      spectre_v2_update_state(state, method);
++}
++
+ static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
+                                                 u32 mask, const char *msg)
+ {
+@@ -217,4 +292,5 @@ void cpu_v7_ca15_ibe(void)
+ void cpu_v7_bugs_init(void)
+ {
+       cpu_v7_spectre_v2_init();
++      cpu_v7_spectre_bhb_init();
+ }
diff --git a/queue-5.15/arm-use-loadaddr-to-get-load-address-of-sections.patch b/queue-5.15/arm-use-loadaddr-to-get-load-address-of-sections.patch
new file mode 100644 (file)
index 0000000..8f760a8
--- /dev/null
@@ -0,0 +1,60 @@
+From foo@baz Tue Mar  8 08:32:37 PM CET 2022
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Fri, 11 Feb 2022 19:49:50 +0000
+Subject: ARM: use LOADADDR() to get load address of sections
+
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+
+commit 8d9d651ff2270a632e9dc497b142db31e8911315 upstream.
+
+Use the linker's LOADADDR() macro to get the load address of the
+sections, and provide a macro to set the start and end symbols.
+
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/vmlinux.lds.h |   19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+--- a/arch/arm/include/asm/vmlinux.lds.h
++++ b/arch/arm/include/asm/vmlinux.lds.h
+@@ -26,6 +26,11 @@
+ #define ARM_MMU_DISCARD(x)    x
+ #endif
++/* Set start/end symbol names to the LMA for the section */
++#define ARM_LMA(sym, section)                                         \
++      sym##_start = LOADADDR(section);                                \
++      sym##_end = LOADADDR(section) + SIZEOF(section)
++
+ #define PROC_INFO                                                     \
+               . = ALIGN(4);                                           \
+               __proc_info_begin = .;                                  \
+@@ -110,19 +115,19 @@
+  * only thing that matters is their relative offsets
+  */
+ #define ARM_VECTORS                                                   \
+-      __vectors_start = .;                                            \
++      __vectors_lma = .;                                              \
+       .vectors 0xffff0000 : AT(__vectors_start) {                     \
+               *(.vectors)                                             \
+       }                                                               \
+-      . = __vectors_start + SIZEOF(.vectors);                         \
+-      __vectors_end = .;                                              \
++      ARM_LMA(__vectors, .vectors);                                   \
++      . = __vectors_lma + SIZEOF(.vectors);                           \
+                                                                       \
+-      __stubs_start = .;                                              \
+-      .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {            \
++      __stubs_lma = .;                                                \
++      .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) {              \
+               *(.stubs)                                               \
+       }                                                               \
+-      . = __stubs_start + SIZEOF(.stubs);                             \
+-      __stubs_end = .;                                                \
++      ARM_LMA(__stubs, .stubs);                                       \
++      . = __stubs_lma + SIZEOF(.stubs);                               \
+                                                                       \
+       PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
index 332561de783630a2ab03640edf35e00937c8d11f..e3d58954937b87fbe6649cc97e78c5eaaeb8a903 100644 (file)
@@ -7,3 +7,8 @@ x86-speculation-use-generic-retpoline-by-default-on-amd.patch
 x86-speculation-update-link-to-amd-speculation-whitepaper.patch
 x86-speculation-warn-about-spectre-v2-lfence-mitigation.patch
 x86-speculation-warn-about-eibrs-lfence-unprivileged-ebpf-smt.patch
+arm-report-spectre-v2-status-through-sysfs.patch
+arm-early-traps-initialisation.patch
+arm-use-loadaddr-to-get-load-address-of-sections.patch
+arm-spectre-bhb-workaround.patch
+arm-include-unprivileged-bpf-status-in-spectre-v2-reporting.patch