]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Mar 2022 19:36:58 +0000 (20:36 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Mar 2022 19:36:58 +0000 (20:36 +0100)
added patches:
arm-arm64-provide-a-wrapper-for-smccc-1.1-calls.patch
arm-arm64-smccc-psci-add-arm_smccc_1_1_get_conduit.patch
arm-early-traps-initialisation.patch
arm-include-unprivileged-bpf-status-in-spectre-v2-reporting.patch
arm-report-spectre-v2-status-through-sysfs.patch
arm-spectre-bhb-workaround.patch
arm-use-loadaddr-to-get-load-address-of-sections.patch

queue-5.4/arm-arm64-provide-a-wrapper-for-smccc-1.1-calls.patch [new file with mode: 0644]
queue-5.4/arm-arm64-smccc-psci-add-arm_smccc_1_1_get_conduit.patch [new file with mode: 0644]
queue-5.4/arm-early-traps-initialisation.patch [new file with mode: 0644]
queue-5.4/arm-include-unprivileged-bpf-status-in-spectre-v2-reporting.patch [new file with mode: 0644]
queue-5.4/arm-report-spectre-v2-status-through-sysfs.patch [new file with mode: 0644]
queue-5.4/arm-spectre-bhb-workaround.patch [new file with mode: 0644]
queue-5.4/arm-use-loadaddr-to-get-load-address-of-sections.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/arm-arm64-provide-a-wrapper-for-smccc-1.1-calls.patch b/queue-5.4/arm-arm64-provide-a-wrapper-for-smccc-1.1-calls.patch
new file mode 100644 (file)
index 0000000..7429d11
--- /dev/null
@@ -0,0 +1,92 @@
+From foo@baz Tue Mar  8 08:34:57 PM CET 2022
+From: Steven Price <steven.price@arm.com>
+Date: Mon, 21 Oct 2019 16:28:21 +0100
+Subject: arm/arm64: Provide a wrapper for SMCCC 1.1 calls
+
+From: Steven Price <steven.price@arm.com>
+
+commit 541625ac47ce9d0835efaee0fcbaa251b0000a37 upstream.
+
+SMCCC 1.1 calls may use either HVC or SMC depending on the PSCI
+conduit. Rather than coding this in every call site, provide a macro
+which uses the correct instruction. The macro also handles the case
+where no conduit is configured/available returning a not supported error
+in res, along with returning the conduit used for the call.
+
+This allow us to remove some duplicated code and will be useful later
+when adding paravirtualized time hypervisor calls.
+
+Signed-off-by: Steven Price <steven.price@arm.com>
+Acked-by: Will Deacon <will@kernel.org>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h |   58 ++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 58 insertions(+)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -304,5 +304,63 @@ asmlinkage void __arm_smccc_hvc(unsigned
+ #define SMCCC_RET_NOT_SUPPORTED                       -1
+ #define SMCCC_RET_NOT_REQUIRED                        -2
++/*
++ * Like arm_smccc_1_1* but always returns SMCCC_RET_NOT_SUPPORTED.
++ * Used when the SMCCC conduit is not defined. The empty asm statement
++ * avoids compiler warnings about unused variables.
++ */
++#define __fail_smccc_1_1(...)                                         \
++      do {                                                            \
++              __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
++              asm ("" __constraints(__count_args(__VA_ARGS__)));      \
++              if (___res)                                             \
++                      ___res->a0 = SMCCC_RET_NOT_SUPPORTED;           \
++      } while (0)
++
++/*
++ * arm_smccc_1_1_invoke() - make an SMCCC v1.1 compliant call
++ *
++ * This is a variadic macro taking one to eight source arguments, and
++ * an optional return structure.
++ *
++ * @a0-a7: arguments passed in registers 0 to 7
++ * @res: result values from registers 0 to 3
++ *
++ * This macro will make either an HVC call or an SMC call depending on the
++ * current SMCCC conduit. If no valid conduit is available then -1
++ * (SMCCC_RET_NOT_SUPPORTED) is returned in @res.a0 (if supplied).
++ *
++ * The return value also provides the conduit that was used.
++ */
++#define arm_smccc_1_1_invoke(...) ({                                  \
++              int method = arm_smccc_1_1_get_conduit();               \
++              switch (method) {                                       \
++              case SMCCC_CONDUIT_HVC:                                 \
++                      arm_smccc_1_1_hvc(__VA_ARGS__);                 \
++                      break;                                          \
++              case SMCCC_CONDUIT_SMC:                                 \
++                      arm_smccc_1_1_smc(__VA_ARGS__);                 \
++                      break;                                          \
++              default:                                                \
++                      __fail_smccc_1_1(__VA_ARGS__);                  \
++                      method = SMCCC_CONDUIT_NONE;                    \
++                      break;                                          \
++              }                                                       \
++              method;                                                 \
++      })
++
++/* Paravirtualised time calls (defined by ARM DEN0057A) */
++#define ARM_SMCCC_HV_PV_TIME_FEATURES                         \
++      ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                 \
++                         ARM_SMCCC_SMC_64,                    \
++                         ARM_SMCCC_OWNER_STANDARD_HYP,        \
++                         0x20)
++
++#define ARM_SMCCC_HV_PV_TIME_ST                                       \
++      ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                 \
++                         ARM_SMCCC_SMC_64,                    \
++                         ARM_SMCCC_OWNER_STANDARD_HYP,        \
++                         0x21)
++
+ #endif /*__ASSEMBLY__*/
+ #endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/queue-5.4/arm-arm64-smccc-psci-add-arm_smccc_1_1_get_conduit.patch b/queue-5.4/arm-arm64-smccc-psci-add-arm_smccc_1_1_get_conduit.patch
new file mode 100644 (file)
index 0000000..abd15da
--- /dev/null
@@ -0,0 +1,79 @@
+From foo@baz Tue Mar  8 08:34:57 PM CET 2022
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Fri, 9 Aug 2019 14:22:40 +0100
+Subject: arm/arm64: smccc/psci: add arm_smccc_1_1_get_conduit()
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 6b7fe77c334ae59fed9500140e08f4f896b36871 upstream.
+
+SMCCC callers are currently amassing a collection of enums for the SMCCC
+conduit, and are having to dig into the PSCI driver's internals in order
+to figure out what to do.
+
+Let's clean this up, with common SMCCC_CONDUIT_* definitions, and an
+arm_smccc_1_1_get_conduit() helper that abstracts the PSCI driver's
+internal state.
+
+We can kill off the PSCI_CONDUIT_* definitions once we've migrated users
+over to the new interface.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/psci/psci.c |   15 +++++++++++++++
+ include/linux/arm-smccc.h    |   16 ++++++++++++++++
+ 2 files changed, 31 insertions(+)
+
+--- a/drivers/firmware/psci/psci.c
++++ b/drivers/firmware/psci/psci.c
+@@ -57,6 +57,21 @@ struct psci_operations psci_ops = {
+       .smccc_version = SMCCC_VERSION_1_0,
+ };
++enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
++{
++      if (psci_ops.smccc_version < SMCCC_VERSION_1_1)
++              return SMCCC_CONDUIT_NONE;
++
++      switch (psci_ops.conduit) {
++      case PSCI_CONDUIT_SMC:
++              return SMCCC_CONDUIT_SMC;
++      case PSCI_CONDUIT_HVC:
++              return SMCCC_CONDUIT_HVC;
++      default:
++              return SMCCC_CONDUIT_NONE;
++      }
++}
++
+ typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+                               unsigned long, unsigned long);
+ static psci_fn *invoke_psci_fn;
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -82,6 +82,22 @@
+ #include <linux/linkage.h>
+ #include <linux/types.h>
++
++enum arm_smccc_conduit {
++      SMCCC_CONDUIT_NONE,
++      SMCCC_CONDUIT_SMC,
++      SMCCC_CONDUIT_HVC,
++};
++
++/**
++ * arm_smccc_1_1_get_conduit()
++ *
++ * Returns the conduit to be used for SMCCCv1.1 or later.
++ *
++ * When SMCCCv1.1 is not present, returns SMCCC_CONDUIT_NONE.
++ */
++enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void);
++
+ /**
+  * struct arm_smccc_res - Result from SMC/HVC call
+  * @a0-a3 result values from registers 0 to 3
diff --git a/queue-5.4/arm-early-traps-initialisation.patch b/queue-5.4/arm-early-traps-initialisation.patch
new file mode 100644 (file)
index 0000000..cdfdfe5
--- /dev/null
@@ -0,0 +1,71 @@
+From foo@baz Tue Mar  8 08:34:57 PM CET 2022
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Fri, 11 Feb 2022 19:46:15 +0000
+Subject: ARM: early traps initialisation
+
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+
+commit 04e91b7324760a377a725e218b5ee783826d30f5 upstream.
+
+Provide a couple of helpers to copy the vectors and stubs, and also
+to flush the copied vectors and stubs.
+
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/traps.c |   27 +++++++++++++++++++++------
+ 1 file changed, 21 insertions(+), 6 deletions(-)
+
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -799,10 +799,22 @@ static inline void __init kuser_init(voi
+ }
+ #endif
++#ifndef CONFIG_CPU_V7M
++static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
++{
++      memcpy(vma, lma_start, lma_end - lma_start);
++}
++
++static void flush_vectors(void *vma, size_t offset, size_t size)
++{
++      unsigned long start = (unsigned long)vma + offset;
++      unsigned long end = start + size;
++
++      flush_icache_range(start, end);
++}
++
+ void __init early_trap_init(void *vectors_base)
+ {
+-#ifndef CONFIG_CPU_V7M
+-      unsigned long vectors = (unsigned long)vectors_base;
+       extern char __stubs_start[], __stubs_end[];
+       extern char __vectors_start[], __vectors_end[];
+       unsigned i;
+@@ -823,17 +835,20 @@ void __init early_trap_init(void *vector
+        * into the vector page, mapped at 0xffff0000, and ensure these
+        * are visible to the instruction stream.
+        */
+-      memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
+-      memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
++      copy_from_lma(vectors_base, __vectors_start, __vectors_end);
++      copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
+       kuser_init(vectors_base);
+-      flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
++      flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
++}
+ #else /* ifndef CONFIG_CPU_V7M */
++void __init early_trap_init(void *vectors_base)
++{
+       /*
+        * on V7-M there is no need to copy the vector table to a dedicated
+        * memory area. The address is configurable and so a table in the kernel
+        * image can be used.
+        */
+-#endif
+ }
++#endif
diff --git a/queue-5.4/arm-include-unprivileged-bpf-status-in-spectre-v2-reporting.patch b/queue-5.4/arm-include-unprivileged-bpf-status-in-spectre-v2-reporting.patch
new file mode 100644 (file)
index 0000000..0c7f2c7
--- /dev/null
@@ -0,0 +1,54 @@
+From 25875aa71dfefd1959f07e626c4d285b88b27ac2 Mon Sep 17 00:00:00 2001
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Mon, 7 Mar 2022 19:28:32 +0000
+Subject: ARM: include unprivileged BPF status in Spectre V2 reporting
+
+From: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+
+commit 25875aa71dfefd1959f07e626c4d285b88b27ac2 upstream.
+
+The mitigations for Spectre-BHB are only applied when an exception
+is taken, but when unprivileged BPF is enabled, userspace can
+load BPF programs that can be used to exploit the problem.
+
+When unprivileged BPF is enabled, report the vulnerable status via
+the spectre_v2 sysfs file.
+
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/spectre.c |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/arm/kernel/spectre.c
++++ b/arch/arm/kernel/spectre.c
+@@ -1,9 +1,19 @@
+ // SPDX-License-Identifier: GPL-2.0-only
++#include <linux/bpf.h>
+ #include <linux/cpu.h>
+ #include <linux/device.h>
+ #include <asm/spectre.h>
++static bool _unprivileged_ebpf_enabled(void)
++{
++#ifdef CONFIG_BPF_SYSCALL
++      return !sysctl_unprivileged_bpf_disabled;
++#else
++      return false
++#endif
++}
++
+ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+ {
+@@ -31,6 +41,9 @@ ssize_t cpu_show_spectre_v2(struct devic
+       if (spectre_v2_state != SPECTRE_MITIGATED)
+               return sprintf(buf, "%s\n", "Vulnerable");
++      if (_unprivileged_ebpf_enabled())
++              return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
++
+       switch (spectre_v2_methods) {
+       case SPECTRE_V2_METHOD_BPIALL:
+               method = "Branch predictor hardening";
diff --git a/queue-5.4/arm-report-spectre-v2-status-through-sysfs.patch b/queue-5.4/arm-report-spectre-v2-status-through-sysfs.patch
new file mode 100644 (file)
index 0000000..0928a6c
--- /dev/null
@@ -0,0 +1,348 @@
+From foo@baz Tue Mar  8 08:34:57 PM CET 2022
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Fri, 11 Feb 2022 16:45:54 +0000
+Subject: ARM: report Spectre v2 status through sysfs
+
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+
+commit 9dd78194a3722fa6712192cdd4f7032d45112a9a upstream.
+
+As per other architectures, add support for reporting the Spectre
+vulnerability status via sysfs CPU.
+
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+[ preserve res variable - gregkh ]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/spectre.h |   28 ++++++++
+ arch/arm/kernel/Makefile       |    2 
+ arch/arm/kernel/spectre.c      |   54 ++++++++++++++++
+ arch/arm/mm/Kconfig            |    1 
+ arch/arm/mm/proc-v7-bugs.c     |  131 +++++++++++++++++++++++++++++++----------
+ 5 files changed, 185 insertions(+), 31 deletions(-)
+ create mode 100644 arch/arm/include/asm/spectre.h
+ create mode 100644 arch/arm/kernel/spectre.c
+
+--- /dev/null
++++ b/arch/arm/include/asm/spectre.h
+@@ -0,0 +1,28 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++#ifndef __ASM_SPECTRE_H
++#define __ASM_SPECTRE_H
++
++enum {
++      SPECTRE_UNAFFECTED,
++      SPECTRE_MITIGATED,
++      SPECTRE_VULNERABLE,
++};
++
++enum {
++      __SPECTRE_V2_METHOD_BPIALL,
++      __SPECTRE_V2_METHOD_ICIALLU,
++      __SPECTRE_V2_METHOD_SMC,
++      __SPECTRE_V2_METHOD_HVC,
++};
++
++enum {
++      SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL),
++      SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
++      SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
++      SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
++};
++
++void spectre_v2_update_state(unsigned int state, unsigned int methods);
++
++#endif
+--- a/arch/arm/kernel/Makefile
++++ b/arch/arm/kernel/Makefile
+@@ -106,4 +106,6 @@ endif
+ obj-$(CONFIG_HAVE_ARM_SMCCC)  += smccc-call.o
++obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o
++
+ extra-y := $(head-y) vmlinux.lds
+--- /dev/null
++++ b/arch/arm/kernel/spectre.c
+@@ -0,0 +1,54 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#include <linux/cpu.h>
++#include <linux/device.h>
++
++#include <asm/spectre.h>
++
++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
++                          char *buf)
++{
++      return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++}
++
++static unsigned int spectre_v2_state;
++static unsigned int spectre_v2_methods;
++
++void spectre_v2_update_state(unsigned int state, unsigned int method)
++{
++      if (state > spectre_v2_state)
++              spectre_v2_state = state;
++      spectre_v2_methods |= method;
++}
++
++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
++                          char *buf)
++{
++      const char *method;
++
++      if (spectre_v2_state == SPECTRE_UNAFFECTED)
++              return sprintf(buf, "%s\n", "Not affected");
++
++      if (spectre_v2_state != SPECTRE_MITIGATED)
++              return sprintf(buf, "%s\n", "Vulnerable");
++
++      switch (spectre_v2_methods) {
++      case SPECTRE_V2_METHOD_BPIALL:
++              method = "Branch predictor hardening";
++              break;
++
++      case SPECTRE_V2_METHOD_ICIALLU:
++              method = "I-cache invalidation";
++              break;
++
++      case SPECTRE_V2_METHOD_SMC:
++      case SPECTRE_V2_METHOD_HVC:
++              method = "Firmware call";
++              break;
++
++      default:
++              method = "Multiple mitigations";
++              break;
++      }
++
++      return sprintf(buf, "Mitigation: %s\n", method);
++}
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -833,6 +833,7 @@ config CPU_BPREDICT_DISABLE
+ config CPU_SPECTRE
+       bool
++      select GENERIC_CPU_VULNERABILITIES
+ config HARDEN_BRANCH_PREDICTOR
+       bool "Harden the branch predictor against aliasing attacks" if EXPERT
+--- a/arch/arm/mm/proc-v7-bugs.c
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -7,8 +7,35 @@
+ #include <asm/cp15.h>
+ #include <asm/cputype.h>
+ #include <asm/proc-fns.h>
++#include <asm/spectre.h>
+ #include <asm/system_misc.h>
++#ifdef CONFIG_ARM_PSCI
++static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
++{
++      struct arm_smccc_res res;
++
++      arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++                           ARM_SMCCC_ARCH_WORKAROUND_1, &res);
++
++      switch ((int)res.a0) {
++      case SMCCC_RET_SUCCESS:
++              return SPECTRE_MITIGATED;
++
++      case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
++              return SPECTRE_UNAFFECTED;
++
++      default:
++              return SPECTRE_VULNERABLE;
++      }
++}
++#else
++static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
++{
++      return SPECTRE_VULNERABLE;
++}
++#endif
++
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
+@@ -37,13 +64,60 @@ static void __maybe_unused call_hvc_arch
+       arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+ }
+-static void cpu_v7_spectre_init(void)
++static unsigned int spectre_v2_install_workaround(unsigned int method)
+ {
+       const char *spectre_v2_method = NULL;
+       int cpu = smp_processor_id();
+       if (per_cpu(harden_branch_predictor_fn, cpu))
+-              return;
++              return SPECTRE_MITIGATED;
++
++      switch (method) {
++      case SPECTRE_V2_METHOD_BPIALL:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      harden_branch_predictor_bpiall;
++              spectre_v2_method = "BPIALL";
++              break;
++
++      case SPECTRE_V2_METHOD_ICIALLU:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      harden_branch_predictor_iciallu;
++              spectre_v2_method = "ICIALLU";
++              break;
++
++      case SPECTRE_V2_METHOD_HVC:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      call_hvc_arch_workaround_1;
++              cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
++              spectre_v2_method = "hypervisor";
++              break;
++
++      case SPECTRE_V2_METHOD_SMC:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      call_smc_arch_workaround_1;
++              cpu_do_switch_mm = cpu_v7_smc_switch_mm;
++              spectre_v2_method = "firmware";
++              break;
++      }
++
++      if (spectre_v2_method)
++              pr_info("CPU%u: Spectre v2: using %s workaround\n",
++                      smp_processor_id(), spectre_v2_method);
++
++      return SPECTRE_MITIGATED;
++}
++#else
++static unsigned int spectre_v2_install_workaround(unsigned int method)
++{
++      pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n");
++
++      return SPECTRE_VULNERABLE;
++}
++#endif
++
++static void cpu_v7_spectre_v2_init(void)
++{
++      unsigned int state, method = 0;
+       switch (read_cpuid_part()) {
+       case ARM_CPU_PART_CORTEX_A8:
+@@ -52,32 +126,37 @@ static void cpu_v7_spectre_init(void)
+       case ARM_CPU_PART_CORTEX_A17:
+       case ARM_CPU_PART_CORTEX_A73:
+       case ARM_CPU_PART_CORTEX_A75:
+-              per_cpu(harden_branch_predictor_fn, cpu) =
+-                      harden_branch_predictor_bpiall;
+-              spectre_v2_method = "BPIALL";
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_BPIALL;
+               break;
+       case ARM_CPU_PART_CORTEX_A15:
+       case ARM_CPU_PART_BRAHMA_B15:
+-              per_cpu(harden_branch_predictor_fn, cpu) =
+-                      harden_branch_predictor_iciallu;
+-              spectre_v2_method = "ICIALLU";
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_ICIALLU;
+               break;
+-#ifdef CONFIG_ARM_PSCI
+       case ARM_CPU_PART_BRAHMA_B53:
+               /* Requires no workaround */
++              state = SPECTRE_UNAFFECTED;
+               break;
++
+       default:
+               /* Other ARM CPUs require no workaround */
+-              if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
++              if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
++                      state = SPECTRE_UNAFFECTED;
+                       break;
++              }
+               /* fallthrough */
+-              /* Cortex A57/A72 require firmware workaround */
++      /* Cortex A57/A72 require firmware workaround */
+       case ARM_CPU_PART_CORTEX_A57:
+       case ARM_CPU_PART_CORTEX_A72: {
+               struct arm_smccc_res res;
++              state = spectre_v2_get_cpu_fw_mitigation_state();
++              if (state != SPECTRE_MITIGATED)
++                      break;
++
+               if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+                       break;
+@@ -87,10 +166,7 @@ static void cpu_v7_spectre_init(void)
+                                         ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+                       if ((int)res.a0 != 0)
+                               break;
+-                      per_cpu(harden_branch_predictor_fn, cpu) =
+-                              call_hvc_arch_workaround_1;
+-                      cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
+-                      spectre_v2_method = "hypervisor";
++                      method = SPECTRE_V2_METHOD_HVC;
+                       break;
+               case PSCI_CONDUIT_SMC:
+@@ -98,28 +174,21 @@ static void cpu_v7_spectre_init(void)
+                                         ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+                       if ((int)res.a0 != 0)
+                               break;
+-                      per_cpu(harden_branch_predictor_fn, cpu) =
+-                              call_smc_arch_workaround_1;
+-                      cpu_do_switch_mm = cpu_v7_smc_switch_mm;
+-                      spectre_v2_method = "firmware";
++                      method = SPECTRE_V2_METHOD_SMC;
+                       break;
+               default:
++                      state = SPECTRE_VULNERABLE;
+                       break;
+               }
+       }
+-#endif
+       }
+-      if (spectre_v2_method)
+-              pr_info("CPU%u: Spectre v2: using %s workaround\n",
+-                      smp_processor_id(), spectre_v2_method);
+-}
+-#else
+-static void cpu_v7_spectre_init(void)
+-{
++      if (state == SPECTRE_MITIGATED)
++              state = spectre_v2_install_workaround(method);
++
++      spectre_v2_update_state(state, method);
+ }
+-#endif
+ static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
+                                                 u32 mask, const char *msg)
+@@ -149,16 +218,16 @@ static bool check_spectre_auxcr(bool *wa
+ void cpu_v7_ca8_ibe(void)
+ {
+       if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
+-              cpu_v7_spectre_init();
++              cpu_v7_spectre_v2_init();
+ }
+ void cpu_v7_ca15_ibe(void)
+ {
+       if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
+-              cpu_v7_spectre_init();
++              cpu_v7_spectre_v2_init();
+ }
+ void cpu_v7_bugs_init(void)
+ {
+-      cpu_v7_spectre_init();
++      cpu_v7_spectre_v2_init();
+ }
diff --git a/queue-5.4/arm-spectre-bhb-workaround.patch b/queue-5.4/arm-spectre-bhb-workaround.patch
new file mode 100644 (file)
index 0000000..f6a3a59
--- /dev/null
@@ -0,0 +1,433 @@
+From foo@baz Tue Mar  8 08:34:57 PM CET 2022
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Thu, 10 Feb 2022 16:05:45 +0000
+Subject: ARM: Spectre-BHB workaround
+
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+
+comomit b9baf5c8c5c356757f4f9d8180b5e9d234065bc3 upstream.
+
+Workaround the Spectre BHB issues for Cortex-A15, Cortex-A57,
+Cortex-A72, Cortex-A73 and Cortex-A75. We also include Brahma B15 as
+well to be safe, which is affected by Spectre V2 in the same ways as
+Cortex-A15.
+
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+[changes due to lack of SYSTEM_FREEING_INITMEM - gregkh]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/assembler.h |   10 ++++
+ arch/arm/include/asm/spectre.h   |    4 +
+ arch/arm/kernel/entry-armv.S     |   79 ++++++++++++++++++++++++++++++++++++---
+ arch/arm/kernel/entry-common.S   |   24 +++++++++++
+ arch/arm/kernel/spectre.c        |    4 +
+ arch/arm/kernel/traps.c          |   38 ++++++++++++++++++
+ arch/arm/kernel/vmlinux.lds.h    |   18 +++++++-
+ arch/arm/mm/Kconfig              |   10 ++++
+ arch/arm/mm/proc-v7-bugs.c       |   76 +++++++++++++++++++++++++++++++++++++
+ 9 files changed, 254 insertions(+), 9 deletions(-)
+
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -107,6 +107,16 @@
+       .endm
+ #endif
++#if __LINUX_ARM_ARCH__ < 7
++      .macro  dsb, args
++      mcr     p15, 0, r0, c7, c10, 4
++      .endm
++
++      .macro  isb, args
++      mcr     p15, 0, r0, c7, r5, 4
++      .endm
++#endif
++
+       .macro asm_trace_hardirqs_off, save=1
+ #if defined(CONFIG_TRACE_IRQFLAGS)
+       .if \save
+--- a/arch/arm/include/asm/spectre.h
++++ b/arch/arm/include/asm/spectre.h
+@@ -14,6 +14,7 @@ enum {
+       __SPECTRE_V2_METHOD_ICIALLU,
+       __SPECTRE_V2_METHOD_SMC,
+       __SPECTRE_V2_METHOD_HVC,
++      __SPECTRE_V2_METHOD_LOOP8,
+ };
+ enum {
+@@ -21,8 +22,11 @@ enum {
+       SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
+       SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
+       SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
++      SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
+ };
+ void spectre_v2_update_state(unsigned int state, unsigned int methods);
++int spectre_bhb_update_vectors(unsigned int method);
++
+ #endif
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -1005,12 +1005,11 @@ vector_\name:
+       sub     lr, lr, #\correction
+       .endif
+-      @
+-      @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
+-      @ (parent CPSR)
+-      @
++      @ Save r0, lr_<exception> (parent PC)
+       stmia   sp, {r0, lr}            @ save r0, lr
+-      mrs     lr, spsr
++
++      @ Save spsr_<exception> (parent CPSR)
++2:    mrs     lr, spsr
+       str     lr, [sp, #8]            @ save spsr
+       @
+@@ -1031,6 +1030,44 @@ vector_\name:
+       movs    pc, lr                  @ branch to handler in SVC mode
+ ENDPROC(vector_\name)
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++      .subsection 1
++      .align 5
++vector_bhb_loop8_\name:
++      .if \correction
++      sub     lr, lr, #\correction
++      .endif
++
++      @ Save r0, lr_<exception> (parent PC)
++      stmia   sp, {r0, lr}
++
++      @ bhb workaround
++      mov     r0, #8
++1:    b       . + 4
++      subs    r0, r0, #1
++      bne     1b
++      dsb
++      isb
++      b       2b
++ENDPROC(vector_bhb_loop8_\name)
++
++vector_bhb_bpiall_\name:
++      .if \correction
++      sub     lr, lr, #\correction
++      .endif
++
++      @ Save r0, lr_<exception> (parent PC)
++      stmia   sp, {r0, lr}
++
++      @ bhb workaround
++      mcr     p15, 0, r0, c7, c5, 6   @ BPIALL
++      @ isb not needed due to "movs pc, lr" in the vector stub
++      @ which gives a "context synchronisation".
++      b       2b
++ENDPROC(vector_bhb_bpiall_\name)
++      .previous
++#endif
++
+       .align  2
+       @ handler addresses follow this label
+ 1:
+@@ -1039,6 +1076,10 @@ ENDPROC(vector_\name)
+       .section .stubs, "ax", %progbits
+       @ This must be the first word
+       .word   vector_swi
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++      .word   vector_bhb_loop8_swi
++      .word   vector_bhb_bpiall_swi
++#endif
+ vector_rst:
+  ARM( swi     SYS_ERROR0      )
+@@ -1153,8 +1194,10 @@ vector_addrexcptn:
+  * FIQ "NMI" handler
+  *-----------------------------------------------------------------------------
+  * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
+- * systems.
++ * systems. This must be the last vector stub, so lets place it in its own
++ * subsection.
+  */
++      .subsection 2
+       vector_stub     fiq, FIQ_MODE, 4
+       .long   __fiq_usr                       @  0  (USR_26 / USR_32)
+@@ -1187,6 +1230,30 @@ vector_addrexcptn:
+       W(b)    vector_irq
+       W(b)    vector_fiq
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++      .section .vectors.bhb.loop8, "ax", %progbits
++.L__vectors_bhb_loop8_start:
++      W(b)    vector_rst
++      W(b)    vector_bhb_loop8_und
++      W(ldr)  pc, .L__vectors_bhb_loop8_start + 0x1004
++      W(b)    vector_bhb_loop8_pabt
++      W(b)    vector_bhb_loop8_dabt
++      W(b)    vector_addrexcptn
++      W(b)    vector_bhb_loop8_irq
++      W(b)    vector_bhb_loop8_fiq
++
++      .section .vectors.bhb.bpiall, "ax", %progbits
++.L__vectors_bhb_bpiall_start:
++      W(b)    vector_rst
++      W(b)    vector_bhb_bpiall_und
++      W(ldr)  pc, .L__vectors_bhb_bpiall_start + 0x1008
++      W(b)    vector_bhb_bpiall_pabt
++      W(b)    vector_bhb_bpiall_dabt
++      W(b)    vector_addrexcptn
++      W(b)    vector_bhb_bpiall_irq
++      W(b)    vector_bhb_bpiall_fiq
++#endif
++
+       .data
+       .align  2
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -163,12 +163,36 @@ ENDPROC(ret_from_fork)
+  */
+       .align  5
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++ENTRY(vector_bhb_loop8_swi)
++      sub     sp, sp, #PT_REGS_SIZE
++      stmia   sp, {r0 - r12}
++      mov     r8, #8
++1:    b       2f
++2:    subs    r8, r8, #1
++      bne     1b
++      dsb
++      isb
++      b       3f
++ENDPROC(vector_bhb_loop8_swi)
++
++      .align  5
++ENTRY(vector_bhb_bpiall_swi)
++      sub     sp, sp, #PT_REGS_SIZE
++      stmia   sp, {r0 - r12}
++      mcr     p15, 0, r8, c7, c5, 6   @ BPIALL
++      isb
++      b       3f
++ENDPROC(vector_bhb_bpiall_swi)
++#endif
++      .align  5
+ ENTRY(vector_swi)
+ #ifdef CONFIG_CPU_V7M
+       v7m_exception_entry
+ #else
+       sub     sp, sp, #PT_REGS_SIZE
+       stmia   sp, {r0 - r12}                  @ Calling r0 - r12
++3:
+  ARM( add     r8, sp, #S_PC           )
+  ARM( stmdb   r8, {sp, lr}^           )       @ Calling sp, lr
+  THUMB(       mov     r8, sp                  )
+--- a/arch/arm/kernel/spectre.c
++++ b/arch/arm/kernel/spectre.c
+@@ -45,6 +45,10 @@ ssize_t cpu_show_spectre_v2(struct devic
+               method = "Firmware call";
+               break;
++      case SPECTRE_V2_METHOD_LOOP8:
++              method = "History overwrite";
++              break;
++
+       default:
+               method = "Multiple mitigations";
+               break;
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -30,6 +30,7 @@
+ #include <linux/atomic.h>
+ #include <asm/cacheflush.h>
+ #include <asm/exception.h>
++#include <asm/spectre.h>
+ #include <asm/unistd.h>
+ #include <asm/traps.h>
+ #include <asm/ptrace.h>
+@@ -813,6 +814,43 @@ static void flush_vectors(void *vma, siz
+       flush_icache_range(start, end);
+ }
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++int spectre_bhb_update_vectors(unsigned int method)
++{
++      extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
++      extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
++      void *vec_start, *vec_end;
++
++      if (system_state > SYSTEM_SCHEDULING) {
++              pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
++                     smp_processor_id());
++              return SPECTRE_VULNERABLE;
++      }
++
++      switch (method) {
++      case SPECTRE_V2_METHOD_LOOP8:
++              vec_start = __vectors_bhb_loop8_start;
++              vec_end = __vectors_bhb_loop8_end;
++              break;
++
++      case SPECTRE_V2_METHOD_BPIALL:
++              vec_start = __vectors_bhb_bpiall_start;
++              vec_end = __vectors_bhb_bpiall_end;
++              break;
++
++      default:
++              pr_err("CPU%u: unknown Spectre BHB state %d\n",
++                     smp_processor_id(), method);
++              return SPECTRE_VULNERABLE;
++      }
++
++      copy_from_lma(vectors_page, vec_start, vec_end);
++      flush_vectors(vectors_page, 0, vec_end - vec_start);
++
++      return SPECTRE_MITIGATED;
++}
++#endif
++
+ void __init early_trap_init(void *vectors_base)
+ {
+       extern char __stubs_start[], __stubs_end[];
+--- a/arch/arm/kernel/vmlinux.lds.h
++++ b/arch/arm/kernel/vmlinux.lds.h
+@@ -106,11 +106,23 @@
+  */
+ #define ARM_VECTORS                                                   \
+       __vectors_lma = .;                                              \
+-      .vectors 0xffff0000 : AT(__vectors_start) {                     \
+-              *(.vectors)                                             \
++      OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) {            \
++              .vectors {                                              \
++                      *(.vectors)                                     \
++              }                                                       \
++              .vectors.bhb.loop8 {                                    \
++                      *(.vectors.bhb.loop8)                           \
++              }                                                       \
++              .vectors.bhb.bpiall {                                   \
++                      *(.vectors.bhb.bpiall)                          \
++              }                                                       \
+       }                                                               \
+       ARM_LMA(__vectors, .vectors);                                   \
+-      . = __vectors_lma + SIZEOF(.vectors);                           \
++      ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8);               \
++      ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall);             \
++      . = __vectors_lma + SIZEOF(.vectors) +                          \
++              SIZEOF(.vectors.bhb.loop8) +                            \
++              SIZEOF(.vectors.bhb.bpiall);                            \
+                                                                       \
+       __stubs_lma = .;                                                \
+       .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) {              \
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -854,6 +854,16 @@ config HARDEN_BRANCH_PREDICTOR
+          If unsure, say Y.
++config HARDEN_BRANCH_HISTORY
++      bool "Harden Spectre style attacks against branch history" if EXPERT
++      depends on CPU_SPECTRE
++      default y
++      help
++        Speculation attacks against some high-performance processors can
++        make use of branch history to influence future speculation. When
++        taking an exception, a sequence of branches overwrites the branch
++        history, or branch history is invalidated.
++
+ config TLS_REG_EMUL
+       bool
+       select NEED_KUSER_HELPERS
+--- a/arch/arm/mm/proc-v7-bugs.c
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -190,6 +190,81 @@ static void cpu_v7_spectre_v2_init(void)
+       spectre_v2_update_state(state, method);
+ }
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++static int spectre_bhb_method;
++
++static const char *spectre_bhb_method_name(int method)
++{
++      switch (method) {
++      case SPECTRE_V2_METHOD_LOOP8:
++              return "loop";
++
++      case SPECTRE_V2_METHOD_BPIALL:
++              return "BPIALL";
++
++      default:
++              return "unknown";
++      }
++}
++
++static int spectre_bhb_install_workaround(int method)
++{
++      if (spectre_bhb_method != method) {
++              if (spectre_bhb_method) {
++                      pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
++                             smp_processor_id());
++
++                      return SPECTRE_VULNERABLE;
++              }
++
++              if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
++                      return SPECTRE_VULNERABLE;
++
++              spectre_bhb_method = method;
++      }
++
++      pr_info("CPU%u: Spectre BHB: using %s workaround\n",
++              smp_processor_id(), spectre_bhb_method_name(method));
++
++      return SPECTRE_MITIGATED;
++}
++#else
++static int spectre_bhb_install_workaround(int method)
++{
++      return SPECTRE_VULNERABLE;
++}
++#endif
++
++static void cpu_v7_spectre_bhb_init(void)
++{
++      unsigned int state, method = 0;
++
++      switch (read_cpuid_part()) {
++      case ARM_CPU_PART_CORTEX_A15:
++      case ARM_CPU_PART_BRAHMA_B15:
++      case ARM_CPU_PART_CORTEX_A57:
++      case ARM_CPU_PART_CORTEX_A72:
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_LOOP8;
++              break;
++
++      case ARM_CPU_PART_CORTEX_A73:
++      case ARM_CPU_PART_CORTEX_A75:
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_BPIALL;
++              break;
++
++      default:
++              state = SPECTRE_UNAFFECTED;
++              break;
++      }
++
++      if (state == SPECTRE_MITIGATED)
++              state = spectre_bhb_install_workaround(method);
++
++      spectre_v2_update_state(state, method);
++}
++
+ static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
+                                                 u32 mask, const char *msg)
+ {
+@@ -230,4 +305,5 @@ void cpu_v7_ca15_ibe(void)
+ void cpu_v7_bugs_init(void)
+ {
+       cpu_v7_spectre_v2_init();
++      cpu_v7_spectre_bhb_init();
+ }
diff --git a/queue-5.4/arm-use-loadaddr-to-get-load-address-of-sections.patch b/queue-5.4/arm-use-loadaddr-to-get-load-address-of-sections.patch
new file mode 100644 (file)
index 0000000..d3f67a7
--- /dev/null
@@ -0,0 +1,60 @@
+From foo@baz Tue Mar  8 08:34:57 PM CET 2022
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Fri, 11 Feb 2022 19:49:50 +0000
+Subject: ARM: use LOADADDR() to get load address of sections
+
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+
+commit 8d9d651ff2270a632e9dc497b142db31e8911315 upstream.
+
+Use the linker's LOADADDR() macro to get the load address of the
+sections, and provide a macro to set the start and end symbols.
+
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/vmlinux.lds.h |   19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+--- a/arch/arm/kernel/vmlinux.lds.h
++++ b/arch/arm/kernel/vmlinux.lds.h
+@@ -25,6 +25,11 @@
+ #define ARM_MMU_DISCARD(x)    x
+ #endif
++/* Set start/end symbol names to the LMA for the section */
++#define ARM_LMA(sym, section)                                         \
++      sym##_start = LOADADDR(section);                                \
++      sym##_end = LOADADDR(section) + SIZEOF(section)
++
+ #define PROC_INFO                                                     \
+               . = ALIGN(4);                                           \
+               __proc_info_begin = .;                                  \
+@@ -100,19 +105,19 @@
+  * only thing that matters is their relative offsets
+  */
+ #define ARM_VECTORS                                                   \
+-      __vectors_start = .;                                            \
++      __vectors_lma = .;                                              \
+       .vectors 0xffff0000 : AT(__vectors_start) {                     \
+               *(.vectors)                                             \
+       }                                                               \
+-      . = __vectors_start + SIZEOF(.vectors);                         \
+-      __vectors_end = .;                                              \
++      ARM_LMA(__vectors, .vectors);                                   \
++      . = __vectors_lma + SIZEOF(.vectors);                           \
+                                                                       \
+-      __stubs_start = .;                                              \
+-      .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {            \
++      __stubs_lma = .;                                                \
++      .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) {              \
+               *(.stubs)                                               \
+       }                                                               \
+-      . = __stubs_start + SIZEOF(.stubs);                             \
+-      __stubs_end = .;                                                \
++      ARM_LMA(__stubs, .stubs);                                       \
++      . = __stubs_lma + SIZEOF(.stubs);                               \
+                                                                       \
+       PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
index ac0a7ea9e9bb59cbe69c8aa5b062dccb25d9580b..90b2748756b137118833590a16111ddd017b1640 100644 (file)
@@ -8,3 +8,10 @@ x86-speculation-use-generic-retpoline-by-default-on-amd.patch
 x86-speculation-update-link-to-amd-speculation-whitepaper.patch
 x86-speculation-warn-about-spectre-v2-lfence-mitigation.patch
 x86-speculation-warn-about-eibrs-lfence-unprivileged-ebpf-smt.patch
+arm-arm64-provide-a-wrapper-for-smccc-1.1-calls.patch
+arm-arm64-smccc-psci-add-arm_smccc_1_1_get_conduit.patch
+arm-report-spectre-v2-status-through-sysfs.patch
+arm-early-traps-initialisation.patch
+arm-use-loadaddr-to-get-load-address-of-sections.patch
+arm-spectre-bhb-workaround.patch
+arm-include-unprivileged-bpf-status-in-spectre-v2-reporting.patch