--- /dev/null
+From foo@baz Fri Jul 20 11:55:21 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:47:09 +0100
+Subject: arm/arm64: smccc: Add SMCCC-specific return codes
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720094722.702-2-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit eff0e9e1078ea7dc1d794dc50e31baef984c46d7 upstream.
+
+We've so far used the PSCI return codes for SMCCC because they
+were extremely similar. But with the new ARM DEN 0070A specification,
+"NOT_REQUIRED" (-2) is clashing with PSCI's "PSCI_RET_INVALID_PARAMS".
+
+Let's bite the bullet and add SMCCC specific return codes. Users
+can be repainted as and when required.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -291,5 +291,10 @@ asmlinkage void __arm_smccc_hvc(unsigned
+ */
+ #define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
+
++/* Return codes defined in ARM DEN 0070A */
++#define SMCCC_RET_SUCCESS 0
++#define SMCCC_RET_NOT_SUPPORTED -1
++#define SMCCC_RET_NOT_REQUIRED -2
++
+ #endif /*__ASSEMBLY__*/
+ #endif /*__LINUX_ARM_SMCCC_H*/
--- /dev/null
+From foo@baz Fri Jul 20 11:55:21 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:47:12 +0100
+Subject: arm64: Add ARCH_WORKAROUND_2 probing
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720094722.702-5-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit a725e3dda1813ed306734823ac4c65ca04e38500 upstream.
+
+As for Spectre variant-2, we rely on SMCCC 1.1 to provide the
+discovery mechanism for detecting the SSBD mitigation.
+
+A new capability is also allocated for that purpose, and a
+config option.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig | 9 +++++
+ arch/arm64/include/asm/cpucaps.h | 3 +
+ arch/arm64/kernel/cpu_errata.c | 69 +++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 80 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -938,6 +938,15 @@ config HARDEN_EL2_VECTORS
+
+ If unsure, say Y.
+
++config ARM64_SSBD
++ bool "Speculative Store Bypass Disable" if EXPERT
++ default y
++ help
++ This enables mitigation of the bypassing of previous stores
++ by speculative loads.
++
++ If unsure, say Y.
++
+ menuconfig ARMV8_DEPRECATED
+ bool "Emulate deprecated/obsolete ARMv8 instructions"
+ depends on COMPAT
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -48,7 +48,8 @@
+ #define ARM64_HAS_CACHE_IDC 27
+ #define ARM64_HAS_CACHE_DIC 28
+ #define ARM64_HW_DBM 29
++#define ARM64_SSBD 30
+
+-#define ARM64_NCAPS 30
++#define ARM64_NCAPS 31
+
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -256,6 +256,67 @@ void __init arm64_update_smccc_conduit(s
+
+ *updptr = cpu_to_le32(insn);
+ }
++
++static void arm64_set_ssbd_mitigation(bool state)
++{
++ switch (psci_ops.conduit) {
++ case PSCI_CONDUIT_HVC:
++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
++ break;
++
++ case PSCI_CONDUIT_SMC:
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
++ break;
++
++ default:
++ WARN_ON_ONCE(1);
++ break;
++ }
++}
++
++static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
++ int scope)
++{
++ struct arm_smccc_res res;
++ bool supported = true;
++
++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++
++ if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
++ return false;
++
++ /*
++ * The probe function return value is either negative
++ * (unsupported or mitigated), positive (unaffected), or zero
++ * (requires mitigation). We only need to do anything in the
++ * last case.
++ */
++ switch (psci_ops.conduit) {
++ case PSCI_CONDUIT_HVC:
++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
++ if ((int)res.a0 != 0)
++ supported = false;
++ break;
++
++ case PSCI_CONDUIT_SMC:
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
++ if ((int)res.a0 != 0)
++ supported = false;
++ break;
++
++ default:
++ supported = false;
++ }
++
++ if (supported) {
++ __this_cpu_write(arm64_ssbd_callback_required, 1);
++ arm64_set_ssbd_mitigation(true);
++ }
++
++ return supported;
++}
+ #endif /* CONFIG_ARM64_SSBD */
+
+ #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
+@@ -514,6 +575,14 @@ const struct arm64_cpu_capabilities arm6
+ ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
+ },
+ #endif
++#ifdef CONFIG_ARM64_SSBD
++ {
++ .desc = "Speculative Store Bypass Disable",
++ .capability = ARM64_SSBD,
++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
++ .matches = has_ssbd_mitigation,
++ },
++#endif
+ {
+ }
+ };
--- /dev/null
+From foo@baz Fri Jul 20 11:55:21 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:47:11 +0100
+Subject: arm64: Add per-cpu infrastructure to call ARCH_WORKAROUND_2
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720094722.702-4-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 5cf9ce6e5ea50f805c6188c04ed0daaec7b6887d upstream.
+
+In a heterogeneous system, we can end up with both affected and
+unaffected CPUs. Let's check their status before calling into the
+firmware.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c | 2 ++
+ arch/arm64/kernel/entry.S | 11 +++++++----
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -233,6 +233,8 @@ enable_smccc_arch_workaround_1(const str
+ #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
+ #ifdef CONFIG_ARM64_SSBD
++DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
++
+ void __init arm64_update_smccc_conduit(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr,
+ int nr_inst)
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -140,8 +140,10 @@ alternative_else_nop_endif
+
+ // This macro corrupts x0-x3. It is the caller's duty
+ // to save/restore them if required.
+- .macro apply_ssbd, state
++ .macro apply_ssbd, state, targ, tmp1, tmp2
+ #ifdef CONFIG_ARM64_SSBD
++ ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
++ cbz \tmp2, \targ
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+ mov w1, #\state
+ alternative_cb arm64_update_smccc_conduit
+@@ -176,12 +178,13 @@ alternative_cb_end
+ ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
+ disable_step_tsk x19, x20 // exceptions when scheduling.
+
+- apply_ssbd 1
++ apply_ssbd 1, 1f, x22, x23
+
+ #ifdef CONFIG_ARM64_SSBD
+ ldp x0, x1, [sp, #16 * 0]
+ ldp x2, x3, [sp, #16 * 1]
+ #endif
++1:
+
+ mov x29, xzr // fp pointed to user-space
+ .else
+@@ -323,8 +326,8 @@ alternative_if ARM64_WORKAROUND_845719
+ alternative_else_nop_endif
+ #endif
+ 3:
+- apply_ssbd 0
+-
++ apply_ssbd 0, 5f, x0, x1
++5:
+ .endif
+
+ msr elr_el1, x21 // set up the return data
--- /dev/null
+From foo@baz Fri Jul 20 11:55:21 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:47:13 +0100
+Subject: arm64: Add 'ssbd' command-line option
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720094722.702-6-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit a43ae4dfe56a01f5b98ba0cb2f784b6a43bafcc6 upstream.
+
+On a system where the firmware implements ARCH_WORKAROUND_2,
+it may be useful to either permanently enable or disable the
+workaround for cases where the user decides that they'd rather
+not get a trap overhead, and keep the mitigation permanently
+on or off instead of switching it on exception entry/exit.
+
+In any case, default to the mitigation being enabled.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 17 +++
+ arch/arm64/include/asm/cpufeature.h | 6 +
+ arch/arm64/kernel/cpu_errata.c | 103 ++++++++++++++++++++----
+ 3 files changed, 110 insertions(+), 16 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -4092,6 +4092,23 @@
+ expediting. Set to zero to disable automatic
+ expediting.
+
++ ssbd= [ARM64,HW]
++ Speculative Store Bypass Disable control
++
++ On CPUs that are vulnerable to the Speculative
++ Store Bypass vulnerability and offer a
++ firmware based mitigation, this parameter
++ indicates how the mitigation should be used:
++
++ force-on: Unconditionally enable mitigation for
++ for both kernel and userspace
++ force-off: Unconditionally disable mitigation for
++ for both kernel and userspace
++ kernel: Always enable mitigation in the
++ kernel, and offer a prctl interface
++ to allow userspace to register its
++ interest in being mitigated too.
++
+ stack_guard_gap= [MM]
+ override the default stack gap protection. The value
+ is in page units and it defines how many pages prior
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -537,6 +537,12 @@ static inline u64 read_zcr_features(void
+ return zcr;
+ }
+
++#define ARM64_SSBD_UNKNOWN -1
++#define ARM64_SSBD_FORCE_DISABLE 0
++#define ARM64_SSBD_KERNEL 1
++#define ARM64_SSBD_FORCE_ENABLE 2
++#define ARM64_SSBD_MITIGATED 3
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -235,6 +235,38 @@ enable_smccc_arch_workaround_1(const str
+ #ifdef CONFIG_ARM64_SSBD
+ DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
++int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
++
++static const struct ssbd_options {
++ const char *str;
++ int state;
++} ssbd_options[] = {
++ { "force-on", ARM64_SSBD_FORCE_ENABLE, },
++ { "force-off", ARM64_SSBD_FORCE_DISABLE, },
++ { "kernel", ARM64_SSBD_KERNEL, },
++};
++
++static int __init ssbd_cfg(char *buf)
++{
++ int i;
++
++ if (!buf || !buf[0])
++ return -EINVAL;
++
++ for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
++ int len = strlen(ssbd_options[i].str);
++
++ if (strncmp(buf, ssbd_options[i].str, len))
++ continue;
++
++ ssbd_state = ssbd_options[i].state;
++ return 0;
++ }
++
++ return -EINVAL;
++}
++early_param("ssbd", ssbd_cfg);
++
+ void __init arm64_update_smccc_conduit(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr,
+ int nr_inst)
+@@ -278,44 +310,83 @@ static bool has_ssbd_mitigation(const st
+ int scope)
+ {
+ struct arm_smccc_res res;
+- bool supported = true;
++ bool required = true;
++ s32 val;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+- if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
++ if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
++ ssbd_state = ARM64_SSBD_UNKNOWN;
+ return false;
++ }
+
+- /*
+- * The probe function return value is either negative
+- * (unsupported or mitigated), positive (unaffected), or zero
+- * (requires mitigation). We only need to do anything in the
+- * last case.
+- */
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+- if ((int)res.a0 != 0)
+- supported = false;
+ break;
+
+ case PSCI_CONDUIT_SMC:
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+- if ((int)res.a0 != 0)
+- supported = false;
+ break;
+
+ default:
+- supported = false;
++ ssbd_state = ARM64_SSBD_UNKNOWN;
++ return false;
++ }
++
++ val = (s32)res.a0;
++
++ switch (val) {
++ case SMCCC_RET_NOT_SUPPORTED:
++ ssbd_state = ARM64_SSBD_UNKNOWN;
++ return false;
++
++ case SMCCC_RET_NOT_REQUIRED:
++ pr_info_once("%s mitigation not required\n", entry->desc);
++ ssbd_state = ARM64_SSBD_MITIGATED;
++ return false;
++
++ case SMCCC_RET_SUCCESS:
++ required = true;
++ break;
++
++ case 1: /* Mitigation not required on this CPU */
++ required = false;
++ break;
++
++ default:
++ WARN_ON(1);
++ return false;
+ }
+
+- if (supported) {
+- __this_cpu_write(arm64_ssbd_callback_required, 1);
++ switch (ssbd_state) {
++ case ARM64_SSBD_FORCE_DISABLE:
++ pr_info_once("%s disabled from command-line\n", entry->desc);
++ arm64_set_ssbd_mitigation(false);
++ required = false;
++ break;
++
++ case ARM64_SSBD_KERNEL:
++ if (required) {
++ __this_cpu_write(arm64_ssbd_callback_required, 1);
++ arm64_set_ssbd_mitigation(true);
++ }
++ break;
++
++ case ARM64_SSBD_FORCE_ENABLE:
++ pr_info_once("%s forced from command-line\n", entry->desc);
+ arm64_set_ssbd_mitigation(true);
++ required = true;
++ break;
++
++ default:
++ WARN_ON(1);
++ break;
+ }
+
+- return supported;
++ return required;
+ }
+ #endif /* CONFIG_ARM64_SSBD */
+
--- /dev/null
+From foo@baz Fri Jul 20 11:55:21 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:47:10 +0100
+Subject: arm64: Call ARCH_WORKAROUND_2 on transitions between EL0 and EL1
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720094722.702-3-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 8e2906245f1e3b0d027169d9f2e55ce0548cb96e upstream.
+
+In order for the kernel to protect itself, let's call the SSBD mitigation
+implemented by the higher exception level (either hypervisor or firmware)
+on each transition between userspace and kernel.
+
+We must take the PSCI conduit into account in order to target the
+right exception level, hence the introduction of a runtime patching
+callback.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c | 24 ++++++++++++++++++++++++
+ arch/arm64/kernel/entry.S | 22 ++++++++++++++++++++++
+ include/linux/arm-smccc.h | 5 +++++
+ 3 files changed, 51 insertions(+)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -232,6 +232,30 @@ enable_smccc_arch_workaround_1(const str
+ }
+ #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
++#ifdef CONFIG_ARM64_SSBD
++void __init arm64_update_smccc_conduit(struct alt_instr *alt,
++ __le32 *origptr, __le32 *updptr,
++ int nr_inst)
++{
++ u32 insn;
++
++ BUG_ON(nr_inst != 1);
++
++ switch (psci_ops.conduit) {
++ case PSCI_CONDUIT_HVC:
++ insn = aarch64_insn_get_hvc_value();
++ break;
++ case PSCI_CONDUIT_SMC:
++ insn = aarch64_insn_get_smc_value();
++ break;
++ default:
++ return;
++ }
++
++ *updptr = cpu_to_le32(insn);
++}
++#endif /* CONFIG_ARM64_SSBD */
++
+ #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
+ .matches = is_affected_midr_range, \
+ .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -18,6 +18,7 @@
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
++#include <linux/arm-smccc.h>
+ #include <linux/init.h>
+ #include <linux/linkage.h>
+
+@@ -137,6 +138,18 @@ alternative_else_nop_endif
+ add \dst, \dst, #(\sym - .entry.tramp.text)
+ .endm
+
++ // This macro corrupts x0-x3. It is the caller's duty
++ // to save/restore them if required.
++ .macro apply_ssbd, state
++#ifdef CONFIG_ARM64_SSBD
++ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
++ mov w1, #\state
++alternative_cb arm64_update_smccc_conduit
++ nop // Patched to SMC/HVC #0
++alternative_cb_end
++#endif
++ .endm
++
+ .macro kernel_entry, el, regsize = 64
+ .if \regsize == 32
+ mov w0, w0 // zero upper 32 bits of x0
+@@ -163,6 +176,13 @@ alternative_else_nop_endif
+ ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
+ disable_step_tsk x19, x20 // exceptions when scheduling.
+
++ apply_ssbd 1
++
++#ifdef CONFIG_ARM64_SSBD
++ ldp x0, x1, [sp, #16 * 0]
++ ldp x2, x3, [sp, #16 * 1]
++#endif
++
+ mov x29, xzr // fp pointed to user-space
+ .else
+ add x21, sp, #S_FRAME_SIZE
+@@ -303,6 +323,8 @@ alternative_if ARM64_WORKAROUND_845719
+ alternative_else_nop_endif
+ #endif
+ 3:
++ apply_ssbd 0
++
+ .endif
+
+ msr elr_el1, x21 // set up the return data
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -80,6 +80,11 @@
+ ARM_SMCCC_SMC_32, \
+ 0, 0x8000)
+
++#define ARM_SMCCC_ARCH_WORKAROUND_2 \
++ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
++ ARM_SMCCC_SMC_32, \
++ 0, 0x7fff)
++
+ #ifndef __ASSEMBLY__
+
+ #include <linux/linkage.h>
--- /dev/null
+From foo@baz Fri Jul 20 11:55:21 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:47:22 +0100
+Subject: arm64: KVM: Add ARCH_WORKAROUND_2 discovery through ARCH_FEATURES_FUNC_ID
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720094722.702-15-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 5d81f7dc9bca4f4963092433e27b508cbe524a32 upstream.
+
+Now that all our infrastructure is in place, let's expose the
+availability of ARCH_WORKAROUND_2 to guests. We take this opportunity
+to tidy up a couple of SMCCC constants.
+
+Acked-by: Christoffer Dall <christoffer.dall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/kvm_host.h | 12 ++++++++++++
+ arch/arm64/include/asm/kvm_host.h | 23 +++++++++++++++++++++++
+ arch/arm64/kvm/reset.c | 4 ++++
+ virt/kvm/arm/psci.c | 18 ++++++++++++++++--
+ 4 files changed, 55 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -315,6 +315,18 @@ static inline bool kvm_arm_harden_branch
+ return false;
+ }
+
++#define KVM_SSBD_UNKNOWN -1
++#define KVM_SSBD_FORCE_DISABLE 0
++#define KVM_SSBD_KERNEL 1
++#define KVM_SSBD_FORCE_ENABLE 2
++#define KVM_SSBD_MITIGATED 3
++
++static inline int kvm_arm_have_ssbd(void)
++{
++ /* No way to detect it yet, pretend it is not there. */
++ return KVM_SSBD_UNKNOWN;
++}
++
+ static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
+ static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -455,6 +455,29 @@ static inline bool kvm_arm_harden_branch
+ return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
+ }
+
++#define KVM_SSBD_UNKNOWN -1
++#define KVM_SSBD_FORCE_DISABLE 0
++#define KVM_SSBD_KERNEL 1
++#define KVM_SSBD_FORCE_ENABLE 2
++#define KVM_SSBD_MITIGATED 3
++
++static inline int kvm_arm_have_ssbd(void)
++{
++ switch (arm64_get_ssbd_state()) {
++ case ARM64_SSBD_FORCE_DISABLE:
++ return KVM_SSBD_FORCE_DISABLE;
++ case ARM64_SSBD_KERNEL:
++ return KVM_SSBD_KERNEL;
++ case ARM64_SSBD_FORCE_ENABLE:
++ return KVM_SSBD_FORCE_ENABLE;
++ case ARM64_SSBD_MITIGATED:
++ return KVM_SSBD_MITIGATED;
++ case ARM64_SSBD_UNKNOWN:
++ default:
++ return KVM_SSBD_UNKNOWN;
++ }
++}
++
+ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
+ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
+
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -122,6 +122,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu
+ /* Reset PMU */
+ kvm_pmu_vcpu_reset(vcpu);
+
++ /* Default workaround setup is enabled (if supported) */
++ if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
++ vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
++
+ /* Reset timer */
+ return kvm_timer_vcpu_reset(vcpu);
+ }
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -405,7 +405,7 @@ static int kvm_psci_call(struct kvm_vcpu
+ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+ {
+ u32 func_id = smccc_get_function(vcpu);
+- u32 val = PSCI_RET_NOT_SUPPORTED;
++ u32 val = SMCCC_RET_NOT_SUPPORTED;
+ u32 feature;
+
+ switch (func_id) {
+@@ -417,7 +417,21 @@ int kvm_hvc_call_handler(struct kvm_vcpu
+ switch(feature) {
+ case ARM_SMCCC_ARCH_WORKAROUND_1:
+ if (kvm_arm_harden_branch_predictor())
+- val = 0;
++ val = SMCCC_RET_SUCCESS;
++ break;
++ case ARM_SMCCC_ARCH_WORKAROUND_2:
++ switch (kvm_arm_have_ssbd()) {
++ case KVM_SSBD_FORCE_DISABLE:
++ case KVM_SSBD_UNKNOWN:
++ break;
++ case KVM_SSBD_KERNEL:
++ val = SMCCC_RET_SUCCESS;
++ break;
++ case KVM_SSBD_FORCE_ENABLE:
++ case KVM_SSBD_MITIGATED:
++ val = SMCCC_RET_NOT_REQUIRED;
++ break;
++ }
+ break;
+ }
+ break;
--- /dev/null
+From foo@baz Fri Jul 20 11:55:21 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:47:20 +0100
+Subject: arm64: KVM: Add ARCH_WORKAROUND_2 support for guests
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720094722.702-13-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 55e3748e8902ff641e334226bdcb432f9a5d78d3 upstream.
+
+In order to offer ARCH_WORKAROUND_2 support to guests, we need
+a bit of infrastructure.
+
+Let's add a flag indicating whether or not the guest uses
+SSBD mitigation. Depending on the state of this flag, allow
+KVM to disable ARCH_WORKAROUND_2 before entering the guest,
+and enable it when exiting it.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/kvm_mmu.h | 5 ++++
+ arch/arm64/include/asm/kvm_asm.h | 3 ++
+ arch/arm64/include/asm/kvm_host.h | 3 ++
+ arch/arm64/include/asm/kvm_mmu.h | 24 +++++++++++++++++++++
+ arch/arm64/kvm/hyp/switch.c | 42 ++++++++++++++++++++++++++++++++++++++
+ virt/kvm/arm/arm.c | 4 +++
+ 6 files changed, 81 insertions(+)
+
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -335,6 +335,11 @@ static inline int kvm_map_vectors(void)
+ return 0;
+ }
+
++static inline int hyp_map_aux_data(void)
++{
++ return 0;
++}
++
+ #define kvm_phys_to_vttbr(addr) (addr)
+
+ #endif /* !__ASSEMBLY__ */
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -33,6 +33,9 @@
+ #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
+ #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
+
++#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
++#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
++
+ /* Translate a kernel address of @sym into its equivalent linear mapping */
+ #define kvm_ksym_ref(sym) \
+ ({ \
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -216,6 +216,9 @@ struct kvm_vcpu_arch {
+ /* Exception Information */
+ struct kvm_vcpu_fault_info fault;
+
++ /* State of various workarounds, see kvm_asm.h for bit assignment */
++ u64 workaround_flags;
++
+ /* Guest debug state */
+ u64 debug_flags;
+
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -473,6 +473,30 @@ static inline int kvm_map_vectors(void)
+ }
+ #endif
+
++#ifdef CONFIG_ARM64_SSBD
++DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
++
++static inline int hyp_map_aux_data(void)
++{
++ int cpu, err;
++
++ for_each_possible_cpu(cpu) {
++ u64 *ptr;
++
++ ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
++ err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
++ if (err)
++ return err;
++ }
++ return 0;
++}
++#else
++static inline int hyp_map_aux_data(void)
++{
++ return 0;
++}
++#endif
++
+ #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
+
+ #endif /* __ASSEMBLY__ */
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -15,6 +15,7 @@
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
++#include <linux/arm-smccc.h>
+ #include <linux/types.h>
+ #include <linux/jump_label.h>
+ #include <uapi/linux/psci.h>
+@@ -389,6 +390,39 @@ static bool __hyp_text fixup_guest_exit(
+ return false;
+ }
+
++static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
++{
++ if (!cpus_have_const_cap(ARM64_SSBD))
++ return false;
++
++ return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
++}
++
++static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
++{
++#ifdef CONFIG_ARM64_SSBD
++ /*
++ * The host runs with the workaround always present. If the
++ * guest wants it disabled, so be it...
++ */
++ if (__needs_ssbd_off(vcpu) &&
++ __hyp_this_cpu_read(arm64_ssbd_callback_required))
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
++#endif
++}
++
++static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
++{
++#ifdef CONFIG_ARM64_SSBD
++ /*
++ * If the guest has disabled the workaround, bring it back on.
++ */
++ if (__needs_ssbd_off(vcpu) &&
++ __hyp_this_cpu_read(arm64_ssbd_callback_required))
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
++#endif
++}
++
+ /* Switch to the guest for VHE systems running in EL2 */
+ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
+ {
+@@ -409,6 +443,8 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vc
+ sysreg_restore_guest_state_vhe(guest_ctxt);
+ __debug_switch_to_guest(vcpu);
+
++ __set_guest_arch_workaround_state(vcpu);
++
+ do {
+ /* Jump in the fire! */
+ exit_code = __guest_enter(vcpu, host_ctxt);
+@@ -416,6 +452,8 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vc
+ /* And we're baaack! */
+ } while (fixup_guest_exit(vcpu, &exit_code));
+
++ __set_host_arch_workaround_state(vcpu);
++
+ fp_enabled = fpsimd_enabled_vhe();
+
+ sysreg_save_guest_state_vhe(guest_ctxt);
+@@ -465,6 +503,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struc
+ __sysreg_restore_state_nvhe(guest_ctxt);
+ __debug_switch_to_guest(vcpu);
+
++ __set_guest_arch_workaround_state(vcpu);
++
+ do {
+ /* Jump in the fire! */
+ exit_code = __guest_enter(vcpu, host_ctxt);
+@@ -472,6 +512,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struc
+ /* And we're baaack! */
+ } while (fixup_guest_exit(vcpu, &exit_code));
+
++ __set_host_arch_workaround_state(vcpu);
++
+ fp_enabled = __fpsimd_enabled_nvhe();
+
+ __sysreg_save_state_nvhe(guest_ctxt);
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -1490,6 +1490,10 @@ static int init_hyp_mode(void)
+ }
+ }
+
++ err = hyp_map_aux_data();
++ if (err)
++ kvm_err("Cannot map host auxilary data: %d\n", err);
++
+ return 0;
+
+ out_err:
--- /dev/null
+From foo@baz Fri Jul 20 11:55:21 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:47:19 +0100
+Subject: arm64: KVM: Add HYP per-cpu accessors
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720094722.702-12-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 85478bab409171de501b719971fd25a3d5d639f9 upstream.
+
+As we're going to require to access per-cpu variables at EL2,
+let's craft the minimum set of accessors required to implement
+reading a per-cpu variable, relying on tpidr_el2 to contain the
+per-cpu offset.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_asm.h | 27 +++++++++++++++++++++++++--
+ 1 file changed, 25 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -71,14 +71,37 @@ extern u32 __kvm_get_mdcr_el2(void);
+
+ extern u32 __init_stage2_translation(void);
+
++/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
++#define __hyp_this_cpu_ptr(sym) \
++ ({ \
++ void *__ptr = hyp_symbol_addr(sym); \
++ __ptr += read_sysreg(tpidr_el2); \
++ (typeof(&sym))__ptr; \
++ })
++
++#define __hyp_this_cpu_read(sym) \
++ ({ \
++ *__hyp_this_cpu_ptr(sym); \
++ })
++
+ #else /* __ASSEMBLY__ */
+
+-.macro get_host_ctxt reg, tmp
+- adr_l \reg, kvm_host_cpu_state
++.macro hyp_adr_this_cpu reg, sym, tmp
++ adr_l \reg, \sym
+ mrs \tmp, tpidr_el2
+ add \reg, \reg, \tmp
+ .endm
+
++.macro hyp_ldr_this_cpu reg, sym, tmp
++ adr_l \reg, \sym
++ mrs \tmp, tpidr_el2
++ ldr \reg, [\reg, \tmp]
++.endm
++
++.macro get_host_ctxt reg, tmp
++ hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
++.endm
++
+ .macro get_vcpu_ptr vcpu, ctxt
+ get_host_ctxt \ctxt, \vcpu
+ ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
--- /dev/null
+From foo@baz Fri Jul 20 11:55:21 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:47:21 +0100
+Subject: arm64: KVM: Handle guest's ARCH_WORKAROUND_2 requests
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720094722.702-14-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit b4f18c063a13dfb33e3a63fe1844823e19c2265e upstream.
+
+In order to forward the guest's ARCH_WORKAROUND_2 calls to EL3,
+add a small(-ish) sequence to handle it at EL2. Special care must
+be taken to track the state of the guest itself by updating the
+workaround flags. We also rely on patching to enable calls into
+the firmware.
+
+Note that since we need to execute branches, this always executes
+after the Spectre-v2 mitigation has been applied.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/asm-offsets.c | 1 +
+ arch/arm64/kvm/hyp/hyp-entry.S | 38 +++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 38 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -136,6 +136,7 @@ int main(void)
+ #ifdef CONFIG_KVM_ARM_HOST
+ DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
+ DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
++ DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
+ DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
+ DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
+ DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -106,8 +106,44 @@ el1_hvc_guest:
+ */
+ ldr x1, [sp] // Guest's x0
+ eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
++ cbz w1, wa_epilogue
++
++ /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
++ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
++ ARM_SMCCC_ARCH_WORKAROUND_2)
+ cbnz w1, el1_trap
+- mov x0, x1
++
++#ifdef CONFIG_ARM64_SSBD
++alternative_cb arm64_enable_wa2_handling
++ b wa2_end
++alternative_cb_end
++ get_vcpu_ptr x2, x0
++ ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
++
++ // Sanitize the argument and update the guest flags
++ ldr x1, [sp, #8] // Guest's x1
++ clz w1, w1 // Murphy's device:
++ lsr w1, w1, #5 // w1 = !!w1 without using
++ eor w1, w1, #1 // the flags...
++ bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
++ str x0, [x2, #VCPU_WORKAROUND_FLAGS]
++
++ /* Check that we actually need to perform the call */
++ hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
++ cbz x0, wa2_end
++
++ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
++ smc #0
++
++ /* Don't leak data from the SMC call */
++ mov x3, xzr
++wa2_end:
++ mov x2, xzr
++ mov x1, xzr
++#endif
++
++wa_epilogue:
++ mov x0, xzr
+ add sp, sp, #16
+ eret
+
--- /dev/null
+From foo@baz Fri Jul 20 11:55:21 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:47:14 +0100
+Subject: arm64: ssbd: Add global mitigation state accessor
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720094722.702-7-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit c32e1736ca03904c03de0e4459a673be194f56fd upstream.
+
+We're about to need the mitigation state in various parts of the
+kernel in order to do the right thing for userspace and guests.
+
+Let's expose an accessor that will let other subsystems know
+about the state.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -543,6 +543,16 @@ static inline u64 read_zcr_features(void
+ #define ARM64_SSBD_FORCE_ENABLE 2
+ #define ARM64_SSBD_MITIGATED 3
+
++static inline int arm64_get_ssbd_state(void)
++{
++#ifdef CONFIG_ARM64_SSBD
++ extern int ssbd_state;
++ return ssbd_state;
++#else
++ return ARM64_SSBD_UNKNOWN;
++#endif
++}
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif
--- /dev/null
+From foo@baz Fri Jul 20 11:55:21 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:47:18 +0100
+Subject: arm64: ssbd: Add prctl interface for per-thread mitigation
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720094722.702-11-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 9cdc0108baa8ef87c76ed834619886a46bd70cbe upstream.
+
+If running on a system that performs dynamic SSBD mitigation, allow
+userspace to request the mitigation for itself. This is implemented
+as a prctl call, allowing the mitigation to be enabled or disabled at
+will for this particular thread.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/Makefile | 1
+ arch/arm64/kernel/ssbd.c | 110 +++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 111 insertions(+)
+ create mode 100644 arch/arm64/kernel/ssbd.c
+
+--- a/arch/arm64/kernel/Makefile
++++ b/arch/arm64/kernel/Makefile
+@@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) +=
+ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
+ arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+ arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
++arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
+
+ obj-y += $(arm64-obj-y) vdso/ probes/
+ obj-m += $(arm64-obj-m)
+--- /dev/null
++++ b/arch/arm64/kernel/ssbd.c
+@@ -0,0 +1,110 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/thread_info.h>
++
++#include <asm/cpufeature.h>
++
++/*
++ * prctl interface for SSBD
++ * FIXME: Drop the below ifdefery once merged in 4.18.
++ */
++#ifdef PR_SPEC_STORE_BYPASS
++static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
++{
++ int state = arm64_get_ssbd_state();
++
++ /* Unsupported */
++ if (state == ARM64_SSBD_UNKNOWN)
++ return -EINVAL;
++
++ /* Treat the unaffected/mitigated state separately */
++ if (state == ARM64_SSBD_MITIGATED) {
++ switch (ctrl) {
++ case PR_SPEC_ENABLE:
++ return -EPERM;
++ case PR_SPEC_DISABLE:
++ case PR_SPEC_FORCE_DISABLE:
++ return 0;
++ }
++ }
++
++ /*
++ * Things are a bit backward here: the arm64 internal API
++ * *enables the mitigation* when the userspace API *disables
++ * speculation*. So much fun.
++ */
++ switch (ctrl) {
++ case PR_SPEC_ENABLE:
++ /* If speculation is force disabled, enable is not allowed */
++ if (state == ARM64_SSBD_FORCE_ENABLE ||
++ task_spec_ssb_force_disable(task))
++ return -EPERM;
++ task_clear_spec_ssb_disable(task);
++ clear_tsk_thread_flag(task, TIF_SSBD);
++ break;
++ case PR_SPEC_DISABLE:
++ if (state == ARM64_SSBD_FORCE_DISABLE)
++ return -EPERM;
++ task_set_spec_ssb_disable(task);
++ set_tsk_thread_flag(task, TIF_SSBD);
++ break;
++ case PR_SPEC_FORCE_DISABLE:
++ if (state == ARM64_SSBD_FORCE_DISABLE)
++ return -EPERM;
++ task_set_spec_ssb_disable(task);
++ task_set_spec_ssb_force_disable(task);
++ set_tsk_thread_flag(task, TIF_SSBD);
++ break;
++ default:
++ return -ERANGE;
++ }
++
++ return 0;
++}
++
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++ unsigned long ctrl)
++{
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssbd_prctl_set(task, ctrl);
++ default:
++ return -ENODEV;
++ }
++}
++
++static int ssbd_prctl_get(struct task_struct *task)
++{
++ switch (arm64_get_ssbd_state()) {
++ case ARM64_SSBD_UNKNOWN:
++ return -EINVAL;
++ case ARM64_SSBD_FORCE_ENABLE:
++ return PR_SPEC_DISABLE;
++ case ARM64_SSBD_KERNEL:
++ if (task_spec_ssb_force_disable(task))
++ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
++ if (task_spec_ssb_disable(task))
++ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
++ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
++ case ARM64_SSBD_FORCE_DISABLE:
++ return PR_SPEC_ENABLE;
++ default:
++ return PR_SPEC_NOT_AFFECTED;
++ }
++}
++
++int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
++{
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssbd_prctl_get(task);
++ default:
++ return -ENODEV;
++ }
++}
++#endif /* PR_SPEC_STORE_BYPASS */
--- /dev/null
+From foo@baz Fri Jul 20 11:55:21 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:47:17 +0100
+Subject: arm64: ssbd: Introduce thread flag to control userspace mitigation
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720094722.702-10-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 9dd9614f5476687abbff8d4b12cd08ae70d7c2ad upstream.
+
+In order to allow userspace to be mitigated on demand, let's
+introduce a new thread flag that prevents the mitigation from
+being turned off when exiting to userspace, and doesn't turn
+it on on entry into the kernel (with the assumption that the
+mitigation is always enabled in the kernel itself).
+
+This will be used by a prctl interface introduced in a later
+patch.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/thread_info.h | 1 +
+ arch/arm64/kernel/entry.S | 2 ++
+ 2 files changed, 3 insertions(+)
+
+--- a/arch/arm64/include/asm/thread_info.h
++++ b/arch/arm64/include/asm/thread_info.h
+@@ -94,6 +94,7 @@ void arch_release_task_struct(struct tas
+ #define TIF_32BIT 22 /* 32bit process */
+ #define TIF_SVE 23 /* Scalable Vector Extension in use */
+ #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
++#define TIF_SSBD 25 /* Wants SSB mitigation */
+
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -147,6 +147,8 @@ alternative_cb arm64_enable_wa2_handling
+ alternative_cb_end
+ ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
+ cbz \tmp2, \targ
++ ldr \tmp2, [tsk, #TSK_TI_FLAGS]
++ tbnz \tmp2, #TIF_SSBD, \targ
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+ mov w1, #\state
+ alternative_cb arm64_update_smccc_conduit
--- /dev/null
+From foo@baz Fri Jul 20 11:55:21 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:47:16 +0100
+Subject: arm64: ssbd: Restore mitigation status on CPU resume
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720094722.702-9-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 647d0519b53f440a55df163de21c52a8205431cc upstream.
+
+On a system where firmware can dynamically change the state of the
+mitigation, the CPU will always come up with the mitigation enabled,
+including when coming back from suspend.
+
+If the user has requested "no mitigation" via a command line option,
+let's enforce it by calling into the firmware again to disable it.
+
+Similarily, for a resume from hibernate, the mitigation could have
+been disabled by the boot kernel. Let's ensure that it is set
+back on in that case.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h | 6 ++++++
+ arch/arm64/kernel/cpu_errata.c | 2 +-
+ arch/arm64/kernel/hibernate.c | 11 +++++++++++
+ arch/arm64/kernel/suspend.c | 8 ++++++++
+ 4 files changed, 26 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -553,6 +553,12 @@ static inline int arm64_get_ssbd_state(v
+ #endif
+ }
+
++#ifdef CONFIG_ARM64_SSBD
++void arm64_set_ssbd_mitigation(bool state);
++#else
++static inline void arm64_set_ssbd_mitigation(bool state) {}
++#endif
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -303,7 +303,7 @@ void __init arm64_enable_wa2_handling(st
+ *updptr = cpu_to_le32(aarch64_insn_gen_nop());
+ }
+
+-static void arm64_set_ssbd_mitigation(bool state)
++void arm64_set_ssbd_mitigation(bool state)
+ {
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -313,6 +313,17 @@ int swsusp_arch_suspend(void)
+
+ sleep_cpu = -EINVAL;
+ __cpu_suspend_exit();
++
++ /*
++ * Just in case the boot kernel did turn the SSBD
++ * mitigation off behind our back, let's set the state
++ * to what we expect it to be.
++ */
++ switch (arm64_get_ssbd_state()) {
++ case ARM64_SSBD_FORCE_ENABLE:
++ case ARM64_SSBD_KERNEL:
++ arm64_set_ssbd_mitigation(true);
++ }
+ }
+
+ local_daif_restore(flags);
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -62,6 +62,14 @@ void notrace __cpu_suspend_exit(void)
+ */
+ if (hw_breakpoint_restore)
+ hw_breakpoint_restore(cpu);
++
++ /*
++ * On resume, firmware implementing dynamic mitigation will
++ * have turned the mitigation on. If the user has forcefully
++ * disabled it, make sure their wishes are obeyed.
++ */
++ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
++ arm64_set_ssbd_mitigation(false);
+ }
+
+ /*
--- /dev/null
+From foo@baz Fri Jul 20 11:55:21 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:47:15 +0100
+Subject: arm64: ssbd: Skip apply_ssbd if not using dynamic mitigation
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720094722.702-8-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 986372c4367f46b34a3c0f6918d7fb95cbdf39d6 upstream.
+
+In order to avoid checking arm64_ssbd_callback_required on each
+kernel entry/exit even if no mitigation is required, let's
+add yet another alternative that by default jumps over the mitigation,
+and that gets nop'ed out if we're doing dynamic mitigation.
+
+Think of it as a poor man's static key...
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c | 14 ++++++++++++++
+ arch/arm64/kernel/entry.S | 3 +++
+ 2 files changed, 17 insertions(+)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -289,6 +289,20 @@ void __init arm64_update_smccc_conduit(s
+ *updptr = cpu_to_le32(insn);
+ }
+
++void __init arm64_enable_wa2_handling(struct alt_instr *alt,
++ __le32 *origptr, __le32 *updptr,
++ int nr_inst)
++{
++ BUG_ON(nr_inst != 1);
++ /*
++ * Only allow mitigation on EL1 entry/exit and guest
++ * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
++ * be flipped.
++ */
++ if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
++ *updptr = cpu_to_le32(aarch64_insn_gen_nop());
++}
++
+ static void arm64_set_ssbd_mitigation(bool state)
+ {
+ switch (psci_ops.conduit) {
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -142,6 +142,9 @@ alternative_else_nop_endif
+ // to save/restore them if required.
+ .macro apply_ssbd, state, targ, tmp1, tmp2
+ #ifdef CONFIG_ARM64_SSBD
++alternative_cb arm64_enable_wa2_handling
++ b \targ
++alternative_cb_end
+ ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
+ cbz \tmp2, \targ
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
net-nfc-avoid-stalls-when-nfc_alloc_send_skb-returned-null.patch
ipvs-initialize-tbl-entries-after-allocation.patch
ipvs-initialize-tbl-entries-in-ip_vs_lblc_init_svc.patch
+arm-arm64-smccc-add-smccc-specific-return-codes.patch
+arm64-call-arch_workaround_2-on-transitions-between-el0-and-el1.patch
+arm64-add-per-cpu-infrastructure-to-call-arch_workaround_2.patch
+arm64-add-arch_workaround_2-probing.patch
+arm64-add-ssbd-command-line-option.patch
+arm64-ssbd-add-global-mitigation-state-accessor.patch
+arm64-ssbd-skip-apply_ssbd-if-not-using-dynamic-mitigation.patch
+arm64-ssbd-restore-mitigation-status-on-cpu-resume.patch
+arm64-ssbd-introduce-thread-flag-to-control-userspace-mitigation.patch
+arm64-ssbd-add-prctl-interface-for-per-thread-mitigation.patch
+arm64-kvm-add-hyp-per-cpu-accessors.patch
+arm64-kvm-add-arch_workaround_2-support-for-guests.patch
+arm64-kvm-handle-guest-s-arch_workaround_2-requests.patch
+arm64-kvm-add-arch_workaround_2-discovery-through-arch_features_func_id.patch