--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:21 +0100
+Subject: arm/arm64: smccc: Add SMCCC-specific return codes
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-11-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit eff0e9e1078ea7dc1d794dc50e31baef984c46d7 upstream.
+
+We've so far used the PSCI return codes for SMCCC because they
+were extremely similar. But with the new ARM DEN 0070A specification,
+"NOT_REQUIRED" (-2) is clashing with PSCI's "PSCI_RET_INVALID_PARAMS".
+
+Let's bite the bullet and add SMCCC specific return codes. Users
+can be repainted as and when required.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -291,5 +291,10 @@ asmlinkage void __arm_smccc_hvc(unsigned
+ */
+ #define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
+
++/* Return codes defined in ARM DEN 0070A */
++#define SMCCC_RET_SUCCESS 0
++#define SMCCC_RET_NOT_SUPPORTED -1
++#define SMCCC_RET_NOT_REQUIRED -2
++
+ #endif /*__ASSEMBLY__*/
+ #endif /*__LINUX_ARM_SMCCC_H*/
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:24 +0100
+Subject: arm64: Add ARCH_WORKAROUND_2 probing
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-14-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit a725e3dda1813ed306734823ac4c65ca04e38500 upstream.
+
+As for Spectre variant-2, we rely on SMCCC 1.1 to provide the
+discovery mechanism for detecting the SSBD mitigation.
+
+A new capability is also allocated for that purpose, and a
+config option.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig | 9 +++++
+ arch/arm64/include/asm/cpucaps.h | 3 +
+ arch/arm64/kernel/cpu_errata.c | 69 +++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 80 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -776,6 +776,15 @@ config HARDEN_BRANCH_PREDICTOR
+
+ If unsure, say Y.
+
++config ARM64_SSBD
++ bool "Speculative Store Bypass Disable" if EXPERT
++ default y
++ help
++ This enables mitigation of the bypassing of previous stores
++ by speculative loads.
++
++ If unsure, say Y.
++
+ menuconfig ARMV8_DEPRECATED
+ bool "Emulate deprecated/obsolete ARMv8 instructions"
+ depends on COMPAT
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -36,7 +36,8 @@
+ #define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
+ #define ARM64_UNMAP_KERNEL_AT_EL0 16
+ #define ARM64_HARDEN_BRANCH_PREDICTOR 17
++#define ARM64_SSBD 18
+
+-#define ARM64_NCAPS 18
++#define ARM64_NCAPS 19
+
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -211,6 +211,67 @@ void __init arm64_update_smccc_conduit(s
+
+ *updptr = cpu_to_le32(insn);
+ }
++
++static void arm64_set_ssbd_mitigation(bool state)
++{
++ switch (psci_ops.conduit) {
++ case PSCI_CONDUIT_HVC:
++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
++ break;
++
++ case PSCI_CONDUIT_SMC:
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
++ break;
++
++ default:
++ WARN_ON_ONCE(1);
++ break;
++ }
++}
++
++static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
++ int scope)
++{
++ struct arm_smccc_res res;
++ bool supported = true;
++
++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++
++ if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
++ return false;
++
++ /*
++ * The probe function return value is either negative
++ * (unsupported or mitigated), positive (unaffected), or zero
++ * (requires mitigation). We only need to do anything in the
++ * last case.
++ */
++ switch (psci_ops.conduit) {
++ case PSCI_CONDUIT_HVC:
++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
++ if ((int)res.a0 != 0)
++ supported = false;
++ break;
++
++ case PSCI_CONDUIT_SMC:
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
++ if ((int)res.a0 != 0)
++ supported = false;
++ break;
++
++ default:
++ supported = false;
++ }
++
++ if (supported) {
++ __this_cpu_write(arm64_ssbd_callback_required, 1);
++ arm64_set_ssbd_mitigation(true);
++ }
++
++ return supported;
++}
+ #endif /* CONFIG_ARM64_SSBD */
+
+ #define MIDR_RANGE(model, min, max) \
+@@ -336,6 +397,14 @@ const struct arm64_cpu_capabilities arm6
+ .enable = enable_smccc_arch_workaround_1,
+ },
+ #endif
++#ifdef CONFIG_ARM64_SSBD
++ {
++ .desc = "Speculative Store Bypass Disable",
++ .def_scope = SCOPE_LOCAL_CPU,
++ .capability = ARM64_SSBD,
++ .matches = has_ssbd_mitigation,
++ },
++#endif
+ {
+ }
+ };
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:23 +0100
+Subject: arm64: Add per-cpu infrastructure to call ARCH_WORKAROUND_2
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-13-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 5cf9ce6e5ea50f805c6188c04ed0daaec7b6887d upstream.
+
+In a heterogeneous system, we can end up with both affected and
+unaffected CPUs. Let's check their status before calling into the
+firmware.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c | 2 ++
+ arch/arm64/kernel/entry.S | 11 +++++++----
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -188,6 +188,8 @@ static int enable_smccc_arch_workaround_
+ #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
+ #ifdef CONFIG_ARM64_SSBD
++DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
++
+ void __init arm64_update_smccc_conduit(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr,
+ int nr_inst)
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -98,8 +98,10 @@ alternative_else_nop_endif
+
+ // This macro corrupts x0-x3. It is the caller's duty
+ // to save/restore them if required.
+- .macro apply_ssbd, state
++ .macro apply_ssbd, state, targ, tmp1, tmp2
+ #ifdef CONFIG_ARM64_SSBD
++ ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
++ cbz \tmp2, \targ
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+ mov w1, #\state
+ alternative_cb arm64_update_smccc_conduit
+@@ -135,12 +137,13 @@ alternative_cb_end
+ ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
+ disable_step_tsk x19, x20 // exceptions when scheduling.
+
+- apply_ssbd 1
++ apply_ssbd 1, 1f, x22, x23
+
+ #ifdef CONFIG_ARM64_SSBD
+ ldp x0, x1, [sp, #16 * 0]
+ ldp x2, x3, [sp, #16 * 1]
+ #endif
++1:
+
+ mov x29, xzr // fp pointed to user-space
+ .else
+@@ -210,8 +213,8 @@ alternative_if ARM64_WORKAROUND_845719
+ alternative_else_nop_endif
+ #endif
+ 3:
+- apply_ssbd 0
+-
++ apply_ssbd 0, 5f, x0, x1
++5:
+ .endif
+ msr elr_el1, x21 // set up the return data
+ msr spsr_el1, x22
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:25 +0100
+Subject: arm64: Add 'ssbd' command-line option
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-15-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit a43ae4dfe56a01f5b98ba0cb2f784b6a43bafcc6 upstream.
+
+On a system where the firmware implements ARCH_WORKAROUND_2,
+it may be useful to either permanently enable or disable the
+workaround for cases where the user decides that they'd rather
+not get a trap overhead, and keep the mitigation permanently
+on or off instead of switching it on exception entry/exit.
+
+In any case, default to the mitigation being enabled.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/kernel-parameters.txt | 17 +++++
+ arch/arm64/include/asm/cpufeature.h | 6 ++
+ arch/arm64/kernel/cpu_errata.c | 103 ++++++++++++++++++++++++++++++------
+ 3 files changed, 110 insertions(+), 16 deletions(-)
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -4023,6 +4023,23 @@ bytes respectively. Such letter suffixes
+ spia_pedr=
+ spia_peddr=
+
++ ssbd= [ARM64,HW]
++ Speculative Store Bypass Disable control
++
++ On CPUs that are vulnerable to the Speculative
++ Store Bypass vulnerability and offer a
++ firmware based mitigation, this parameter
++ indicates how the mitigation should be used:
++
++ force-on: Unconditionally enable mitigation for
++ for both kernel and userspace
++ force-off: Unconditionally disable mitigation for
++ for both kernel and userspace
++ kernel: Always enable mitigation in the
++ kernel, and offer a prctl interface
++ to allow userspace to register its
++ interest in being mitigated too.
++
+ stack_guard_gap= [MM]
+ override the default stack gap protection. The value
+ is in page units and it defines how many pages prior
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -221,6 +221,12 @@ static inline bool system_supports_mixed
+ return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
+ }
+
++#define ARM64_SSBD_UNKNOWN -1
++#define ARM64_SSBD_FORCE_DISABLE 0
++#define ARM64_SSBD_KERNEL 1
++#define ARM64_SSBD_FORCE_ENABLE 2
++#define ARM64_SSBD_MITIGATED 3
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -190,6 +190,38 @@ static int enable_smccc_arch_workaround_
+ #ifdef CONFIG_ARM64_SSBD
+ DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
++int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
++
++static const struct ssbd_options {
++ const char *str;
++ int state;
++} ssbd_options[] = {
++ { "force-on", ARM64_SSBD_FORCE_ENABLE, },
++ { "force-off", ARM64_SSBD_FORCE_DISABLE, },
++ { "kernel", ARM64_SSBD_KERNEL, },
++};
++
++static int __init ssbd_cfg(char *buf)
++{
++ int i;
++
++ if (!buf || !buf[0])
++ return -EINVAL;
++
++ for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
++ int len = strlen(ssbd_options[i].str);
++
++ if (strncmp(buf, ssbd_options[i].str, len))
++ continue;
++
++ ssbd_state = ssbd_options[i].state;
++ return 0;
++ }
++
++ return -EINVAL;
++}
++early_param("ssbd", ssbd_cfg);
++
+ void __init arm64_update_smccc_conduit(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr,
+ int nr_inst)
+@@ -233,44 +265,83 @@ static bool has_ssbd_mitigation(const st
+ int scope)
+ {
+ struct arm_smccc_res res;
+- bool supported = true;
++ bool required = true;
++ s32 val;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+- if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
++ if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
++ ssbd_state = ARM64_SSBD_UNKNOWN;
+ return false;
++ }
+
+- /*
+- * The probe function return value is either negative
+- * (unsupported or mitigated), positive (unaffected), or zero
+- * (requires mitigation). We only need to do anything in the
+- * last case.
+- */
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+- if ((int)res.a0 != 0)
+- supported = false;
+ break;
+
+ case PSCI_CONDUIT_SMC:
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+- if ((int)res.a0 != 0)
+- supported = false;
+ break;
+
+ default:
+- supported = false;
++ ssbd_state = ARM64_SSBD_UNKNOWN;
++ return false;
++ }
++
++ val = (s32)res.a0;
++
++ switch (val) {
++ case SMCCC_RET_NOT_SUPPORTED:
++ ssbd_state = ARM64_SSBD_UNKNOWN;
++ return false;
++
++ case SMCCC_RET_NOT_REQUIRED:
++ pr_info_once("%s mitigation not required\n", entry->desc);
++ ssbd_state = ARM64_SSBD_MITIGATED;
++ return false;
++
++ case SMCCC_RET_SUCCESS:
++ required = true;
++ break;
++
++ case 1: /* Mitigation not required on this CPU */
++ required = false;
++ break;
++
++ default:
++ WARN_ON(1);
++ return false;
+ }
+
+- if (supported) {
+- __this_cpu_write(arm64_ssbd_callback_required, 1);
++ switch (ssbd_state) {
++ case ARM64_SSBD_FORCE_DISABLE:
++ pr_info_once("%s disabled from command-line\n", entry->desc);
++ arm64_set_ssbd_mitigation(false);
++ required = false;
++ break;
++
++ case ARM64_SSBD_KERNEL:
++ if (required) {
++ __this_cpu_write(arm64_ssbd_callback_required, 1);
++ arm64_set_ssbd_mitigation(true);
++ }
++ break;
++
++ case ARM64_SSBD_FORCE_ENABLE:
++ pr_info_once("%s forced from command-line\n", entry->desc);
+ arm64_set_ssbd_mitigation(true);
++ required = true;
++ break;
++
++ default:
++ WARN_ON(1);
++ break;
+ }
+
+- return supported;
++ return required;
+ }
+ #endif /* CONFIG_ARM64_SSBD */
+
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:18 +0100
+Subject: arm64: alternatives: Add dynamic patching feature
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-8-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+Commit dea5e2a4c5bcf196f879a66cebdcca07793e8ba4 upstream.
+
+We've so far relied on a patching infrastructure that only gave us
+a single alternative, without any way to provide a range of potential
+replacement instructions. For a single feature, this is an all or
+nothing thing.
+
+It would be interesting to have a more flexible grained way of patching
+the kernel though, where we could dynamically tune the code that gets
+injected.
+
+In order to achive this, let's introduce a new form of dynamic patching,
+assiciating a callback to a patching site. This callback gets source and
+target locations of the patching request, as well as the number of
+instructions to be patched.
+
+Dynamic patching is declared with the new ALTERNATIVE_CB and alternative_cb
+directives:
+
+ asm volatile(ALTERNATIVE_CB("mov %0, #0\n", callback)
+ : "r" (v));
+or
+ alternative_cb callback
+ mov x0, #0
+ alternative_cb_end
+
+where callback is the C function computing the alternative.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/alternative.h | 41 ++++++++++++++++++++++++++++---
+ arch/arm64/kernel/alternative.c | 45 +++++++++++++++++++++++++----------
+ 2 files changed, 70 insertions(+), 16 deletions(-)
+
+--- a/arch/arm64/include/asm/alternative.h
++++ b/arch/arm64/include/asm/alternative.h
+@@ -4,6 +4,8 @@
+ #include <asm/cpucaps.h>
+ #include <asm/insn.h>
+
++#define ARM64_CB_PATCH ARM64_NCAPS
++
+ #ifndef __ASSEMBLY__
+
+ #include <linux/init.h>
+@@ -21,12 +23,19 @@ struct alt_instr {
+ u8 alt_len; /* size of new instruction(s), <= orig_len */
+ };
+
++typedef void (*alternative_cb_t)(struct alt_instr *alt,
++ __le32 *origptr, __le32 *updptr, int nr_inst);
++
+ void __init apply_alternatives_all(void);
+ void apply_alternatives(void *start, size_t length);
+
+-#define ALTINSTR_ENTRY(feature) \
++#define ALTINSTR_ENTRY(feature,cb) \
+ " .word 661b - .\n" /* label */ \
++ " .if " __stringify(cb) " == 0\n" \
+ " .word 663f - .\n" /* new instruction */ \
++ " .else\n" \
++ " .word " __stringify(cb) "- .\n" /* callback */ \
++ " .endif\n" \
+ " .hword " __stringify(feature) "\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* source len */ \
+ " .byte 664f-663f\n" /* replacement len */
+@@ -44,15 +53,18 @@ void apply_alternatives(void *start, siz
+ * but most assemblers die if insn1 or insn2 have a .inst. This should
+ * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
+ * containing commit 4e4d08cf7399b606 or c1baaddf8861).
++ *
++ * Alternatives with callbacks do not generate replacement instructions.
+ */
+-#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
++#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \
+ ".if "__stringify(cfg_enabled)" == 1\n" \
+ "661:\n\t" \
+ oldinstr "\n" \
+ "662:\n" \
+ ".pushsection .altinstructions,\"a\"\n" \
+- ALTINSTR_ENTRY(feature) \
++ ALTINSTR_ENTRY(feature,cb) \
+ ".popsection\n" \
++ " .if " __stringify(cb) " == 0\n" \
+ ".pushsection .altinstr_replacement, \"a\"\n" \
+ "663:\n\t" \
+ newinstr "\n" \
+@@ -60,11 +72,17 @@ void apply_alternatives(void *start, siz
+ ".popsection\n\t" \
+ ".org . - (664b-663b) + (662b-661b)\n\t" \
+ ".org . - (662b-661b) + (664b-663b)\n" \
++ ".else\n\t" \
++ "663:\n\t" \
++ "664:\n\t" \
++ ".endif\n" \
+ ".endif\n"
+
+ #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
+- __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
++ __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
+
++#define ALTERNATIVE_CB(oldinstr, cb) \
++ __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
+ #else
+
+ #include <asm/assembler.h>
+@@ -131,6 +149,14 @@ void apply_alternatives(void *start, siz
+ 661:
+ .endm
+
++.macro alternative_cb cb
++ .set .Lasm_alt_mode, 0
++ .pushsection .altinstructions, "a"
++ altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0
++ .popsection
++661:
++.endm
++
+ /*
+ * Provide the other half of the alternative code sequence.
+ */
+@@ -157,6 +183,13 @@ void apply_alternatives(void *start, siz
+ .endm
+
+ /*
++ * Callback-based alternative epilogue
++ */
++.macro alternative_cb_end
++662:
++.endm
++
++/*
+ * Provides a trivial alternative or default sequence consisting solely
+ * of NOPs. The number of NOPs is chosen automatically to match the
+ * previous case.
+--- a/arch/arm64/kernel/alternative.c
++++ b/arch/arm64/kernel/alternative.c
+@@ -28,7 +28,7 @@
+ #include <asm/sections.h>
+ #include <linux/stop_machine.h>
+
+-#define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f)
++#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
+ #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
+ #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
+
+@@ -107,31 +107,52 @@ static u32 get_alt_insn(struct alt_instr
+ return insn;
+ }
+
++static void patch_alternative(struct alt_instr *alt,
++ __le32 *origptr, __le32 *updptr, int nr_inst)
++{
++ __le32 *replptr;
++ int i;
++
++ replptr = ALT_REPL_PTR(alt);
++ for (i = 0; i < nr_inst; i++) {
++ u32 insn;
++
++ insn = get_alt_insn(alt, origptr + i, replptr + i);
++ updptr[i] = cpu_to_le32(insn);
++ }
++}
++
+ static void __apply_alternatives(void *alt_region)
+ {
+ struct alt_instr *alt;
+ struct alt_region *region = alt_region;
+- u32 *origptr, *replptr;
++ __le32 *origptr;
++ alternative_cb_t alt_cb;
+
+ for (alt = region->begin; alt < region->end; alt++) {
+- u32 insn;
+- int i, nr_inst;
++ int nr_inst;
+
+- if (!cpus_have_cap(alt->cpufeature))
++ /* Use ARM64_CB_PATCH as an unconditional patch */
++ if (alt->cpufeature < ARM64_CB_PATCH &&
++ !cpus_have_cap(alt->cpufeature))
+ continue;
+
+- BUG_ON(alt->alt_len != alt->orig_len);
++ if (alt->cpufeature == ARM64_CB_PATCH)
++ BUG_ON(alt->alt_len != 0);
++ else
++ BUG_ON(alt->alt_len != alt->orig_len);
+
+ pr_info_once("patching kernel code\n");
+
+ origptr = ALT_ORIG_PTR(alt);
+- replptr = ALT_REPL_PTR(alt);
+- nr_inst = alt->alt_len / sizeof(insn);
++ nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
+
+- for (i = 0; i < nr_inst; i++) {
+- insn = get_alt_insn(alt, origptr + i, replptr + i);
+- *(origptr + i) = cpu_to_le32(insn);
+- }
++ if (alt->cpufeature < ARM64_CB_PATCH)
++ alt_cb = patch_alternative;
++ else
++ alt_cb = ALT_REPL_PTR(alt);
++
++ alt_cb(alt, origptr, origptr, nr_inst);
+
+ flush_icache_range((uintptr_t)origptr,
+ (uintptr_t)(origptr + nr_inst));
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:16 +0100
+Subject: arm64: alternatives: use tpidr_el2 on VHE hosts
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-6-marc.zyngier@arm.com>
+
+From: James Morse <james.morse@arm.com>
+
+Commit 6d99b68933fbcf51f84fcbba49246ce1209ec193 upstream.
+
+Now that KVM uses tpidr_el2 in the same way as Linux's cpu_offset in
+tpidr_el1, merge the two. This saves KVM from save/restoring tpidr_el1
+on VHE hosts, and allows future code to blindly access per-cpu variables
+without triggering world-switch.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/alternative.h | 2 ++
+ arch/arm64/include/asm/assembler.h | 8 ++++++++
+ arch/arm64/include/asm/percpu.h | 12 ++++++++++--
+ arch/arm64/kernel/alternative.c | 9 +++++----
+ arch/arm64/kernel/cpufeature.c | 17 +++++++++++++++++
+ 5 files changed, 42 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/include/asm/alternative.h
++++ b/arch/arm64/include/asm/alternative.h
+@@ -11,6 +11,8 @@
+ #include <linux/stddef.h>
+ #include <linux/stringify.h>
+
++extern int alternatives_applied;
++
+ struct alt_instr {
+ s32 orig_offset; /* offset to original instruction */
+ s32 alt_offset; /* offset to replacement instruction */
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -245,7 +245,11 @@ lr .req x30 // link register
+ */
+ .macro adr_this_cpu, dst, sym, tmp
+ adr_l \dst, \sym
++alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ mrs \tmp, tpidr_el1
++alternative_else
++ mrs \tmp, tpidr_el2
++alternative_endif
+ add \dst, \dst, \tmp
+ .endm
+
+@@ -256,7 +260,11 @@ lr .req x30 // link register
+ */
+ .macro ldr_this_cpu dst, sym, tmp
+ adr_l \dst, \sym
++alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ mrs \tmp, tpidr_el1
++alternative_else
++ mrs \tmp, tpidr_el2
++alternative_endif
+ ldr \dst, [\dst, \tmp]
+ .endm
+
+--- a/arch/arm64/include/asm/percpu.h
++++ b/arch/arm64/include/asm/percpu.h
+@@ -16,9 +16,14 @@
+ #ifndef __ASM_PERCPU_H
+ #define __ASM_PERCPU_H
+
++#include <asm/alternative.h>
++
+ static inline void set_my_cpu_offset(unsigned long off)
+ {
+- asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
++ asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
++ "msr tpidr_el2, %0",
++ ARM64_HAS_VIRT_HOST_EXTN)
++ :: "r" (off) : "memory");
+ }
+
+ static inline unsigned long __my_cpu_offset(void)
+@@ -29,7 +34,10 @@ static inline unsigned long __my_cpu_off
+ * We want to allow caching the value, so avoid using volatile and
+ * instead use a fake stack read to hazard against barrier().
+ */
+- asm("mrs %0, tpidr_el1" : "=r" (off) :
++ asm(ALTERNATIVE("mrs %0, tpidr_el1",
++ "mrs %0, tpidr_el2",
++ ARM64_HAS_VIRT_HOST_EXTN)
++ : "=r" (off) :
+ "Q" (*(const unsigned long *)current_stack_pointer));
+
+ return off;
+--- a/arch/arm64/kernel/alternative.c
++++ b/arch/arm64/kernel/alternative.c
+@@ -32,6 +32,8 @@
+ #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
+ #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
+
++int alternatives_applied;
++
+ struct alt_region {
+ struct alt_instr *begin;
+ struct alt_instr *end;
+@@ -142,7 +144,6 @@ static void __apply_alternatives(void *a
+ */
+ static int __apply_alternatives_multi_stop(void *unused)
+ {
+- static int patched = 0;
+ struct alt_region region = {
+ .begin = (struct alt_instr *)__alt_instructions,
+ .end = (struct alt_instr *)__alt_instructions_end,
+@@ -150,14 +151,14 @@ static int __apply_alternatives_multi_st
+
+ /* We always have a CPU 0 at this point (__init) */
+ if (smp_processor_id()) {
+- while (!READ_ONCE(patched))
++ while (!READ_ONCE(alternatives_applied))
+ cpu_relax();
+ isb();
+ } else {
+- BUG_ON(patched);
++ BUG_ON(alternatives_applied);
+ __apply_alternatives(®ion);
+ /* Barriers provided by the cache flushing */
+- WRITE_ONCE(patched, 1);
++ WRITE_ONCE(alternatives_applied, 1);
+ }
+
+ return 0;
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -829,6 +829,22 @@ static int __init parse_kpti(char *str)
+ early_param("kpti", parse_kpti);
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
++static int cpu_copy_el2regs(void *__unused)
++{
++ /*
++ * Copy register values that aren't redirected by hardware.
++ *
++ * Before code patching, we only set tpidr_el1, all CPUs need to copy
++ * this value to tpidr_el2 before we patch the code. Once we've done
++ * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
++ * do anything here.
++ */
++ if (!alternatives_applied)
++ write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
++
++ return 0;
++}
++
+ static const struct arm64_cpu_capabilities arm64_features[] = {
+ {
+ .desc = "GIC system register CPU interface",
+@@ -895,6 +911,7 @@ static const struct arm64_cpu_capabiliti
+ .capability = ARM64_HAS_VIRT_HOST_EXTN,
+ .def_scope = SCOPE_SYSTEM,
+ .matches = runs_at_el2,
++ .enable = cpu_copy_el2regs,
+ },
+ {
+ .desc = "32-bit EL0 Support",
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:12 +0100
+Subject: arm64: assembler: introduce ldr_this_cpu
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-2-marc.zyngier@arm.com>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+Commit 1b7e2296a822dfd2349960addc42a139360ce769 upstream.
+
+Shortly we will want to load a percpu variable in the return from
+userspace path. We can save an instruction by folding the addition of
+the percpu offset into the load instruction, and this patch adds a new
+helper to do so.
+
+At the same time, we clean up this_cpu_ptr for consistency. As with
+{adr,ldr,str}_l, we change the template to take the destination register
+first, and name this dst. Secondly, we rename the macro to adr_this_cpu,
+following the scheme of adr_l, and matching the newly added
+ldr_this_cpu.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Laura Abbott <labbott@redhat.com>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/assembler.h | 19 +++++++++++++++----
+ arch/arm64/kernel/entry.S | 2 +-
+ 2 files changed, 16 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -239,14 +239,25 @@ lr .req x30 // link register
+ .endm
+
+ /*
++ * @dst: Result of per_cpu(sym, smp_processor_id())
+ * @sym: The name of the per-cpu variable
+- * @reg: Result of per_cpu(sym, smp_processor_id())
+ * @tmp: scratch register
+ */
+- .macro this_cpu_ptr, sym, reg, tmp
+- adr_l \reg, \sym
++ .macro adr_this_cpu, dst, sym, tmp
++ adr_l \dst, \sym
+ mrs \tmp, tpidr_el1
+- add \reg, \reg, \tmp
++ add \dst, \dst, \tmp
++ .endm
++
++ /*
++ * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
++ * @sym: The name of the per-cpu variable
++ * @tmp: scratch register
++ */
++ .macro ldr_this_cpu dst, sym, tmp
++ adr_l \dst, \sym
++ mrs \tmp, tpidr_el1
++ ldr \dst, [\dst, \tmp]
+ .endm
+
+ /*
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -243,7 +243,7 @@ alternative_insn eret, nop, ARM64_UNMAP_
+ cmp x25, tsk
+ b.ne 9998f
+
+- this_cpu_ptr irq_stack, x25, x26
++ adr_this_cpu x25, irq_stack, x26
+ mov x26, #IRQ_STACK_START_SP
+ add x26, x25, x26
+
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:22 +0100
+Subject: arm64: Call ARCH_WORKAROUND_2 on transitions between EL0 and EL1
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-12-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 8e2906245f1e3b0d027169d9f2e55ce0548cb96e upstream.
+
+In order for the kernel to protect itself, let's call the SSBD mitigation
+implemented by the higher exception level (either hypervisor or firmware)
+on each transition between userspace and kernel.
+
+We must take the PSCI conduit into account in order to target the
+right exception level, hence the introduction of a runtime patching
+callback.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c | 24 ++++++++++++++++++++++++
+ arch/arm64/kernel/entry.S | 22 ++++++++++++++++++++++
+ include/linux/arm-smccc.h | 5 +++++
+ 3 files changed, 51 insertions(+)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -187,6 +187,30 @@ static int enable_smccc_arch_workaround_
+ }
+ #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
++#ifdef CONFIG_ARM64_SSBD
++void __init arm64_update_smccc_conduit(struct alt_instr *alt,
++ __le32 *origptr, __le32 *updptr,
++ int nr_inst)
++{
++ u32 insn;
++
++ BUG_ON(nr_inst != 1);
++
++ switch (psci_ops.conduit) {
++ case PSCI_CONDUIT_HVC:
++ insn = aarch64_insn_get_hvc_value();
++ break;
++ case PSCI_CONDUIT_SMC:
++ insn = aarch64_insn_get_smc_value();
++ break;
++ default:
++ return;
++ }
++
++ *updptr = cpu_to_le32(insn);
++}
++#endif /* CONFIG_ARM64_SSBD */
++
+ #define MIDR_RANGE(model, min, max) \
+ .def_scope = SCOPE_LOCAL_CPU, \
+ .matches = is_affected_midr_range, \
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -18,6 +18,7 @@
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
++#include <linux/arm-smccc.h>
+ #include <linux/init.h>
+ #include <linux/linkage.h>
+
+@@ -95,6 +96,18 @@ alternative_else_nop_endif
+ add \dst, \dst, #(\sym - .entry.tramp.text)
+ .endm
+
++ // This macro corrupts x0-x3. It is the caller's duty
++ // to save/restore them if required.
++ .macro apply_ssbd, state
++#ifdef CONFIG_ARM64_SSBD
++ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
++ mov w1, #\state
++alternative_cb arm64_update_smccc_conduit
++ nop // Patched to SMC/HVC #0
++alternative_cb_end
++#endif
++ .endm
++
+ .macro kernel_entry, el, regsize = 64
+ .if \regsize == 32
+ mov w0, w0 // zero upper 32 bits of x0
+@@ -122,6 +135,13 @@ alternative_else_nop_endif
+ ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
+ disable_step_tsk x19, x20 // exceptions when scheduling.
+
++ apply_ssbd 1
++
++#ifdef CONFIG_ARM64_SSBD
++ ldp x0, x1, [sp, #16 * 0]
++ ldp x2, x3, [sp, #16 * 1]
++#endif
++
+ mov x29, xzr // fp pointed to user-space
+ .else
+ add x21, sp, #S_FRAME_SIZE
+@@ -190,6 +210,8 @@ alternative_if ARM64_WORKAROUND_845719
+ alternative_else_nop_endif
+ #endif
+ 3:
++ apply_ssbd 0
++
+ .endif
+ msr elr_el1, x21 // set up the return data
+ msr spsr_el1, x22
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -80,6 +80,11 @@
+ ARM_SMCCC_SMC_32, \
+ 0, 0x8000)
+
++#define ARM_SMCCC_ARCH_WORKAROUND_2 \
++ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
++ ARM_SMCCC_SMC_32, \
++ 0, 0x7fff)
++
+ #ifndef __ASSEMBLY__
+
+ #include <linux/linkage.h>
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:34 +0100
+Subject: arm64: KVM: Add ARCH_WORKAROUND_2 discovery through ARCH_FEATURES_FUNC_ID
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-24-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 5d81f7dc9bca4f4963092433e27b508cbe524a32 upstream.
+
+Now that all our infrastructure is in place, let's expose the
+availability of ARCH_WORKAROUND_2 to guests. We take this opportunity
+to tidy up a couple of SMCCC constants.
+
+Acked-by: Christoffer Dall <christoffer.dall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/kvm_host.h | 12 ++++++++++++
+ arch/arm/kvm/psci.c | 18 ++++++++++++++++--
+ arch/arm64/include/asm/kvm_host.h | 23 +++++++++++++++++++++++
+ arch/arm64/kvm/reset.c | 4 ++++
+ 4 files changed, 55 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -327,4 +327,16 @@ static inline bool kvm_arm_harden_branch
+ return false;
+ }
+
++#define KVM_SSBD_UNKNOWN -1
++#define KVM_SSBD_FORCE_DISABLE 0
++#define KVM_SSBD_KERNEL 1
++#define KVM_SSBD_FORCE_ENABLE 2
++#define KVM_SSBD_MITIGATED 3
++
++static inline int kvm_arm_have_ssbd(void)
++{
++ /* No way to detect it yet, pretend it is not there. */
++ return KVM_SSBD_UNKNOWN;
++}
++
+ #endif /* __ARM_KVM_HOST_H__ */
+--- a/arch/arm/kvm/psci.c
++++ b/arch/arm/kvm/psci.c
+@@ -403,7 +403,7 @@ static int kvm_psci_call(struct kvm_vcpu
+ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+ {
+ u32 func_id = smccc_get_function(vcpu);
+- u32 val = PSCI_RET_NOT_SUPPORTED;
++ u32 val = SMCCC_RET_NOT_SUPPORTED;
+ u32 feature;
+
+ switch (func_id) {
+@@ -415,7 +415,21 @@ int kvm_hvc_call_handler(struct kvm_vcpu
+ switch(feature) {
+ case ARM_SMCCC_ARCH_WORKAROUND_1:
+ if (kvm_arm_harden_branch_predictor())
+- val = 0;
++ val = SMCCC_RET_SUCCESS;
++ break;
++ case ARM_SMCCC_ARCH_WORKAROUND_2:
++ switch (kvm_arm_have_ssbd()) {
++ case KVM_SSBD_FORCE_DISABLE:
++ case KVM_SSBD_UNKNOWN:
++ break;
++ case KVM_SSBD_KERNEL:
++ val = SMCCC_RET_SUCCESS;
++ break;
++ case KVM_SSBD_FORCE_ENABLE:
++ case KVM_SSBD_MITIGATED:
++ val = SMCCC_RET_NOT_REQUIRED;
++ break;
++ }
+ break;
+ }
+ break;
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -425,4 +425,27 @@ static inline bool kvm_arm_harden_branch
+ return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
+ }
+
++#define KVM_SSBD_UNKNOWN -1
++#define KVM_SSBD_FORCE_DISABLE 0
++#define KVM_SSBD_KERNEL 1
++#define KVM_SSBD_FORCE_ENABLE 2
++#define KVM_SSBD_MITIGATED 3
++
++static inline int kvm_arm_have_ssbd(void)
++{
++ switch (arm64_get_ssbd_state()) {
++ case ARM64_SSBD_FORCE_DISABLE:
++ return KVM_SSBD_FORCE_DISABLE;
++ case ARM64_SSBD_KERNEL:
++ return KVM_SSBD_KERNEL;
++ case ARM64_SSBD_FORCE_ENABLE:
++ return KVM_SSBD_FORCE_ENABLE;
++ case ARM64_SSBD_MITIGATED:
++ return KVM_SSBD_MITIGATED;
++ case ARM64_SSBD_UNKNOWN:
++ default:
++ return KVM_SSBD_UNKNOWN;
++ }
++}
++
+ #endif /* __ARM64_KVM_HOST_H__ */
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -135,6 +135,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu
+ /* Reset PMU */
+ kvm_pmu_vcpu_reset(vcpu);
+
++ /* Default workaround setup is enabled (if supported) */
++ if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
++ vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
++
+ /* Reset timer */
+ return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
+ }
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:32 +0100
+Subject: arm64: KVM: Add ARCH_WORKAROUND_2 support for guests
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-22-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 55e3748e8902ff641e334226bdcb432f9a5d78d3 upstream.
+
+In order to offer ARCH_WORKAROUND_2 support to guests, we need
+a bit of infrastructure.
+
+Let's add a flag indicating whether or not the guest uses
+SSBD mitigation. Depending on the state of this flag, allow
+KVM to disable ARCH_WORKAROUND_2 before entering the guest,
+and enable it when exiting it.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/kvm_mmu.h | 5 +++++
+ arch/arm/kvm/arm.c | 6 ++++++
+ arch/arm64/include/asm/kvm_asm.h | 3 +++
+ arch/arm64/include/asm/kvm_host.h | 3 +++
+ arch/arm64/include/asm/kvm_mmu.h | 24 ++++++++++++++++++++++++
+ arch/arm64/kvm/hyp/switch.c | 38 ++++++++++++++++++++++++++++++++++++++
+ 6 files changed, 79 insertions(+)
+
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -256,6 +256,11 @@ static inline int kvm_map_vectors(void)
+ return 0;
+ }
+
++static inline int hyp_map_aux_data(void)
++{
++ return 0;
++}
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* __ARM_KVM_MMU_H__ */
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -1367,6 +1367,12 @@ static int init_hyp_mode(void)
+ }
+ }
+
++ err = hyp_map_aux_data();
++ if (err) {
++ kvm_err("Cannot map host auxilary data: %d\n", err);
++ goto out_err;
++ }
++
+ kvm_info("Hyp mode initialized successfully\n");
+
+ return 0;
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -33,6 +33,9 @@
+ #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
+ #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
+
++#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
++#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
++
+ /* Translate a kernel address of @sym into its equivalent linear mapping */
+ #define kvm_ksym_ref(sym) \
+ ({ \
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -213,6 +213,9 @@ struct kvm_vcpu_arch {
+ /* Exception Information */
+ struct kvm_vcpu_fault_info fault;
+
++ /* State of various workarounds, see kvm_asm.h for bit assignment */
++ u64 workaround_flags;
++
+ /* Guest debug state */
+ u64 debug_flags;
+
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -387,5 +387,29 @@ static inline int kvm_map_vectors(void)
+ }
+ #endif
+
++#ifdef CONFIG_ARM64_SSBD
++DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
++
++static inline int hyp_map_aux_data(void)
++{
++ int cpu, err;
++
++ for_each_possible_cpu(cpu) {
++ u64 *ptr;
++
++ ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
++ err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
++ if (err)
++ return err;
++ }
++ return 0;
++}
++#else
++static inline int hyp_map_aux_data(void)
++{
++ return 0;
++}
++#endif
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __ARM64_KVM_MMU_H__ */
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -15,6 +15,7 @@
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
++#include <linux/arm-smccc.h>
+ #include <linux/types.h>
+ #include <linux/jump_label.h>
+ #include <uapi/linux/psci.h>
+@@ -267,6 +268,39 @@ static void __hyp_text __skip_instr(stru
+ write_sysreg_el2(*vcpu_pc(vcpu), elr);
+ }
+
++static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
++{
++ if (!cpus_have_cap(ARM64_SSBD))
++ return false;
++
++ return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
++}
++
++static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
++{
++#ifdef CONFIG_ARM64_SSBD
++ /*
++ * The host runs with the workaround always present. If the
++ * guest wants it disabled, so be it...
++ */
++ if (__needs_ssbd_off(vcpu) &&
++ __hyp_this_cpu_read(arm64_ssbd_callback_required))
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
++#endif
++}
++
++static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
++{
++#ifdef CONFIG_ARM64_SSBD
++ /*
++ * If the guest has disabled the workaround, bring it back on.
++ */
++ if (__needs_ssbd_off(vcpu) &&
++ __hyp_this_cpu_read(arm64_ssbd_callback_required))
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
++#endif
++}
++
+ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
+ {
+ struct kvm_cpu_context *host_ctxt;
+@@ -297,6 +331,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm
+ __sysreg_restore_guest_state(guest_ctxt);
+ __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
+
++ __set_guest_arch_workaround_state(vcpu);
++
+ /* Jump in the fire! */
+ again:
+ exit_code = __guest_enter(vcpu, host_ctxt);
+@@ -339,6 +375,8 @@ again:
+ }
+ }
+
++ __set_host_arch_workaround_state(vcpu);
++
+ fp_enabled = __fpsimd_enabled();
+
+ __sysreg_save_guest_state(guest_ctxt);
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:31 +0100
+Subject: arm64: KVM: Add HYP per-cpu accessors
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-21-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 85478bab409171de501b719971fd25a3d5d639f9 upstream.
+
+As we're going to require to access per-cpu variables at EL2,
+let's craft the minimum set of accessors required to implement
+reading a per-cpu variable, relying on tpidr_el2 to contain the
+per-cpu offset.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_asm.h | 27 +++++++++++++++++++++++++--
+ 1 file changed, 25 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -66,14 +66,37 @@ extern u32 __kvm_get_mdcr_el2(void);
+
+ extern u32 __init_stage2_translation(void);
+
++/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
++#define __hyp_this_cpu_ptr(sym) \
++ ({ \
++ void *__ptr = hyp_symbol_addr(sym); \
++ __ptr += read_sysreg(tpidr_el2); \
++ (typeof(&sym))__ptr; \
++ })
++
++#define __hyp_this_cpu_read(sym) \
++ ({ \
++ *__hyp_this_cpu_ptr(sym); \
++ })
++
+ #else /* __ASSEMBLY__ */
+
+-.macro get_host_ctxt reg, tmp
+- adr_l \reg, kvm_host_cpu_state
++.macro hyp_adr_this_cpu reg, sym, tmp
++ adr_l \reg, \sym
+ mrs \tmp, tpidr_el2
+ add \reg, \reg, \tmp
+ .endm
+
++.macro hyp_ldr_this_cpu reg, sym, tmp
++ adr_l \reg, \sym
++ mrs \tmp, tpidr_el2
++ ldr \reg, [\reg, \tmp]
++.endm
++
++.macro get_host_ctxt reg, tmp
++ hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
++.endm
++
+ .macro get_vcpu_ptr vcpu, ctxt
+ get_host_ctxt \ctxt, \vcpu
+ ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:33 +0100
+Subject: arm64: KVM: Handle guest's ARCH_WORKAROUND_2 requests
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-23-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit b4f18c063a13dfb33e3a63fe1844823e19c2265e upstream.
+
+In order to forward the guest's ARCH_WORKAROUND_2 calls to EL3,
+add a small(-ish) sequence to handle it at EL2. Special care must
+be taken to track the state of the guest itself by updating the
+workaround flags. We also rely on patching to enable calls into
+the firmware.
+
+Note that since we need to execute branches, this always executes
+after the Spectre-v2 mitigation has been applied.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/asm-offsets.c | 1 +
+ arch/arm64/kvm/hyp/hyp-entry.S | 38 +++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 38 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -127,6 +127,7 @@ int main(void)
+ BLANK();
+ #ifdef CONFIG_KVM_ARM_HOST
+ DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
++ DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
+ DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
+ DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
+ DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -107,8 +107,44 @@ el1_hvc_guest:
+ */
+ ldr x1, [sp] // Guest's x0
+ eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
++ cbz w1, wa_epilogue
++
++ /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
++ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
++ ARM_SMCCC_ARCH_WORKAROUND_2)
+ cbnz w1, el1_trap
+- mov x0, x1
++
++#ifdef CONFIG_ARM64_SSBD
++alternative_cb arm64_enable_wa2_handling
++ b wa2_end
++alternative_cb_end
++ get_vcpu_ptr x2, x0
++ ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
++
++ // Sanitize the argument and update the guest flags
++ ldr x1, [sp, #8] // Guest's x1
++ clz w1, w1 // Murphy's device:
++ lsr w1, w1, #5 // w1 = !!w1 without using
++ eor w1, w1, #1 // the flags...
++ bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
++ str x0, [x2, #VCPU_WORKAROUND_FLAGS]
++
++ /* Check that we actually need to perform the call */
++ hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
++ cbz x0, wa2_end
++
++ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
++ smc #0
++
++ /* Don't leak data from the SMC call */
++ mov x3, xzr
++wa2_end:
++ mov x2, xzr
++ mov x1, xzr
++#endif
++
++wa_epilogue:
++ mov x0, xzr
+ add sp, sp, #16
+ eret
+
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:26 +0100
+Subject: arm64: ssbd: Add global mitigation state accessor
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-16-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit c32e1736ca03904c03de0e4459a673be194f56fd upstream.
+
+We're about to need the mitigation state in various parts of the
+kernel in order to do the right thing for userspace and guests.
+
+Let's expose an accessor that will let other subsystems know
+about the state.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -227,6 +227,16 @@ static inline bool system_supports_mixed
+ #define ARM64_SSBD_FORCE_ENABLE 2
+ #define ARM64_SSBD_MITIGATED 3
+
++static inline int arm64_get_ssbd_state(void)
++{
++#ifdef CONFIG_ARM64_SSBD
++ extern int ssbd_state;
++ return ssbd_state;
++#else
++ return ARM64_SSBD_UNKNOWN;
++#endif
++}
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:30 +0100
+Subject: arm64: ssbd: Add prctl interface for per-thread mitigation
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-20-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 9cdc0108baa8ef87c76ed834619886a46bd70cbe upstream.
+
+If running on a system that performs dynamic SSBD mitigation, allow
+userspace to request the mitigation for itself. This is implemented
+as a prctl call, allowing the mitigation to be enabled or disabled at
+will for this particular thread.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/Makefile | 1
+ arch/arm64/kernel/ssbd.c | 108 +++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 109 insertions(+)
+ create mode 100644 arch/arm64/kernel/ssbd.c
+
+--- a/arch/arm64/kernel/Makefile
++++ b/arch/arm64/kernel/Makefile
+@@ -50,6 +50,7 @@ arm64-obj-$(CONFIG_RANDOMIZE_BASE) += ka
+ arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
+ arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
+ cpu-reset.o
++arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
+
+ ifeq ($(CONFIG_KVM),y)
+ arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
+--- /dev/null
++++ b/arch/arm64/kernel/ssbd.c
+@@ -0,0 +1,108 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
++ */
++
++#include <linux/errno.h>
++#include <linux/prctl.h>
++#include <linux/sched.h>
++#include <linux/thread_info.h>
++
++#include <asm/cpufeature.h>
++
++/*
++ * prctl interface for SSBD
++ */
++static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
++{
++ int state = arm64_get_ssbd_state();
++
++ /* Unsupported */
++ if (state == ARM64_SSBD_UNKNOWN)
++ return -EINVAL;
++
++ /* Treat the unaffected/mitigated state separately */
++ if (state == ARM64_SSBD_MITIGATED) {
++ switch (ctrl) {
++ case PR_SPEC_ENABLE:
++ return -EPERM;
++ case PR_SPEC_DISABLE:
++ case PR_SPEC_FORCE_DISABLE:
++ return 0;
++ }
++ }
++
++ /*
++ * Things are a bit backward here: the arm64 internal API
++ * *enables the mitigation* when the userspace API *disables
++ * speculation*. So much fun.
++ */
++ switch (ctrl) {
++ case PR_SPEC_ENABLE:
++ /* If speculation is force disabled, enable is not allowed */
++ if (state == ARM64_SSBD_FORCE_ENABLE ||
++ task_spec_ssb_force_disable(task))
++ return -EPERM;
++ task_clear_spec_ssb_disable(task);
++ clear_tsk_thread_flag(task, TIF_SSBD);
++ break;
++ case PR_SPEC_DISABLE:
++ if (state == ARM64_SSBD_FORCE_DISABLE)
++ return -EPERM;
++ task_set_spec_ssb_disable(task);
++ set_tsk_thread_flag(task, TIF_SSBD);
++ break;
++ case PR_SPEC_FORCE_DISABLE:
++ if (state == ARM64_SSBD_FORCE_DISABLE)
++ return -EPERM;
++ task_set_spec_ssb_disable(task);
++ task_set_spec_ssb_force_disable(task);
++ set_tsk_thread_flag(task, TIF_SSBD);
++ break;
++ default:
++ return -ERANGE;
++ }
++
++ return 0;
++}
++
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++ unsigned long ctrl)
++{
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssbd_prctl_set(task, ctrl);
++ default:
++ return -ENODEV;
++ }
++}
++
++static int ssbd_prctl_get(struct task_struct *task)
++{
++ switch (arm64_get_ssbd_state()) {
++ case ARM64_SSBD_UNKNOWN:
++ return -EINVAL;
++ case ARM64_SSBD_FORCE_ENABLE:
++ return PR_SPEC_DISABLE;
++ case ARM64_SSBD_KERNEL:
++ if (task_spec_ssb_force_disable(task))
++ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
++ if (task_spec_ssb_disable(task))
++ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
++ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
++ case ARM64_SSBD_FORCE_DISABLE:
++ return PR_SPEC_ENABLE;
++ default:
++ return PR_SPEC_NOT_AFFECTED;
++ }
++}
++
++int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
++{
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssbd_prctl_get(task);
++ default:
++ return -ENODEV;
++ }
++}
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:29 +0100
+Subject: arm64: ssbd: Introduce thread flag to control userspace mitigation
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-19-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 9dd9614f5476687abbff8d4b12cd08ae70d7c2ad upstream.
+
+In order to allow userspace to be mitigated on demand, let's
+introduce a new thread flag that prevents the mitigation from
+being turned off when exiting to userspace, and doesn't turn
+it on on entry into the kernel (with the assumption that the
+mitigation is always enabled in the kernel itself).
+
+This will be used by a prctl interface introduced in a later
+patch.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/thread_info.h | 1 +
+ arch/arm64/kernel/entry.S | 2 ++
+ 2 files changed, 3 insertions(+)
+
+--- a/arch/arm64/include/asm/thread_info.h
++++ b/arch/arm64/include/asm/thread_info.h
+@@ -122,6 +122,7 @@ static inline struct thread_info *curren
+ #define TIF_RESTORE_SIGMASK 20
+ #define TIF_SINGLESTEP 21
+ #define TIF_32BIT 22 /* 32bit process */
++#define TIF_SSBD 23 /* Wants SSB mitigation */
+
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -105,6 +105,8 @@ alternative_cb arm64_enable_wa2_handling
+ alternative_cb_end
+ ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
+ cbz \tmp2, \targ
++ ldr \tmp2, [tsk, #TI_FLAGS]
++ tbnz \tmp2, #TIF_SSBD, \targ
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+ mov w1, #\state
+ alternative_cb arm64_update_smccc_conduit
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:28 +0100
+Subject: arm64: ssbd: Restore mitigation status on CPU resume
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-18-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 647d0519b53f440a55df163de21c52a8205431cc upstream.
+
+On a system where firmware can dynamically change the state of the
+mitigation, the CPU will always come up with the mitigation enabled,
+including when coming back from suspend.
+
+If the user has requested "no mitigation" via a command line option,
+let's enforce it by calling into the firmware again to disable it.
+
+Similarily, for a resume from hibernate, the mitigation could have
+been disabled by the boot kernel. Let's ensure that it is set
+back on in that case.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h | 6 ++++++
+ arch/arm64/kernel/cpu_errata.c | 2 +-
+ arch/arm64/kernel/hibernate.c | 11 +++++++++++
+ arch/arm64/kernel/suspend.c | 8 ++++++++
+ 4 files changed, 26 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -237,6 +237,12 @@ static inline int arm64_get_ssbd_state(v
+ #endif
+ }
+
++#ifdef CONFIG_ARM64_SSBD
++void arm64_set_ssbd_mitigation(bool state);
++#else
++static inline void arm64_set_ssbd_mitigation(bool state) {}
++#endif
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -258,7 +258,7 @@ void __init arm64_enable_wa2_handling(st
+ *updptr = cpu_to_le32(aarch64_insn_gen_nop());
+ }
+
+-static void arm64_set_ssbd_mitigation(bool state)
++void arm64_set_ssbd_mitigation(bool state)
+ {
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -308,6 +308,17 @@ int swsusp_arch_suspend(void)
+
+ sleep_cpu = -EINVAL;
+ __cpu_suspend_exit();
++
++ /*
++ * Just in case the boot kernel did turn the SSBD
++ * mitigation off behind our back, let's set the state
++ * to what we expect it to be.
++ */
++ switch (arm64_get_ssbd_state()) {
++ case ARM64_SSBD_FORCE_ENABLE:
++ case ARM64_SSBD_KERNEL:
++ arm64_set_ssbd_mitigation(true);
++ }
+ }
+
+ local_dbg_restore(flags);
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -67,6 +67,14 @@ void notrace __cpu_suspend_exit(void)
+ */
+ if (hw_breakpoint_restore)
+ hw_breakpoint_restore(cpu);
++
++ /*
++ * On resume, firmware implementing dynamic mitigation will
++ * have turned the mitigation on. If the user has forcefully
++ * disabled it, make sure their wishes are obeyed.
++ */
++ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
++ arm64_set_ssbd_mitigation(false);
+ }
+
+ /*
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:27 +0100
+Subject: arm64: ssbd: Skip apply_ssbd if not using dynamic mitigation
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-17-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 986372c4367f46b34a3c0f6918d7fb95cbdf39d6 upstream.
+
+In order to avoid checking arm64_ssbd_callback_required on each
+kernel entry/exit even if no mitigation is required, let's
+add yet another alternative that by default jumps over the mitigation,
+and that gets nop'ed out if we're doing dynamic mitigation.
+
+Think of it as a poor man's static key...
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c | 14 ++++++++++++++
+ arch/arm64/kernel/entry.S | 3 +++
+ 2 files changed, 17 insertions(+)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -244,6 +244,20 @@ void __init arm64_update_smccc_conduit(s
+ *updptr = cpu_to_le32(insn);
+ }
+
++void __init arm64_enable_wa2_handling(struct alt_instr *alt,
++ __le32 *origptr, __le32 *updptr,
++ int nr_inst)
++{
++ BUG_ON(nr_inst != 1);
++ /*
++ * Only allow mitigation on EL1 entry/exit and guest
++ * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
++ * be flipped.
++ */
++ if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
++ *updptr = cpu_to_le32(aarch64_insn_gen_nop());
++}
++
+ static void arm64_set_ssbd_mitigation(bool state)
+ {
+ switch (psci_ops.conduit) {
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -100,6 +100,9 @@ alternative_else_nop_endif
+ // to save/restore them if required.
+ .macro apply_ssbd, state, targ, tmp1, tmp2
+ #ifdef CONFIG_ARM64_SSBD
++alternative_cb arm64_enable_wa2_handling
++ b \targ
++alternative_cb_end
+ ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
+ cbz \tmp2, \targ
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:14 +0100
+Subject: KVM: arm/arm64: Convert kvm_host_cpu_state to a static per-cpu allocation
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-4-marc.zyngier@arm.com>
+
+From: James Morse <james.morse@arm.com>
+
+Commit 36989e7fd386a9a5822c48691473863f8fbb404d upstream.
+
+kvm_host_cpu_state is a per-cpu allocation made from kvm_arch_init()
+used to store the host EL1 registers when KVM switches to a guest.
+
+Make it easier for ASM to generate pointers into this per-cpu memory
+by making it a static allocation.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Acked-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kvm/arm.c | 18 +++---------------
+ 1 file changed, 3 insertions(+), 15 deletions(-)
+
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -51,8 +51,8 @@
+ __asm__(".arch_extension virt");
+ #endif
+
++DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
+ static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+-static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
+ static unsigned long hyp_default_vectors;
+
+ /* Per-CPU variable containing the currently running vcpu. */
+@@ -338,7 +338,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu
+ }
+
+ vcpu->cpu = cpu;
+- vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
++ vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state);
+
+ kvm_arm_set_running_vcpu(vcpu);
+ }
+@@ -1199,19 +1199,8 @@ static inline void hyp_cpu_pm_exit(void)
+ }
+ #endif
+
+-static void teardown_common_resources(void)
+-{
+- free_percpu(kvm_host_cpu_state);
+-}
+-
+ static int init_common_resources(void)
+ {
+- kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
+- if (!kvm_host_cpu_state) {
+- kvm_err("Cannot allocate host CPU state\n");
+- return -ENOMEM;
+- }
+-
+ /* set size of VMID supported by CPU */
+ kvm_vmid_bits = kvm_get_vmid_bits();
+ kvm_info("%d-bit VMID\n", kvm_vmid_bits);
+@@ -1369,7 +1358,7 @@ static int init_hyp_mode(void)
+ for_each_possible_cpu(cpu) {
+ kvm_cpu_context_t *cpu_ctxt;
+
+- cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
++ cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
+ err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
+
+ if (err) {
+@@ -1447,7 +1436,6 @@ int kvm_arch_init(void *opaque)
+ out_hyp:
+ teardown_hyp_mode();
+ out_err:
+- teardown_common_resources();
+ return err;
+ }
+
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:19 +0100
+Subject: KVM: arm/arm64: Do not use kern_hyp_va() with kvm_vgic_global_state
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-9-marc.zyngier@arm.com>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+Commit 44a497abd621a71c645f06d3d545ae2f46448830 upstream.
+
+kvm_vgic_global_state is part of the read-only section, and is
+usually accessed using a PC-relative address generation (adrp + add).
+
+It is thus useless to use kern_hyp_va() on it, and actively problematic
+if kern_hyp_va() becomes non-idempotent. On the other hand, there is
+no way that the compiler is going to guarantee that such access is
+always PC relative.
+
+So let's bite the bullet and provide our own accessor.
+
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: James Morse <james.morse@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/kvm_mmu.h | 7 +++++++
+ arch/arm64/include/asm/kvm_mmu.h | 20 ++++++++++++++++++++
+ virt/kvm/arm/hyp/vgic-v2-sr.c | 2 +-
+ 3 files changed, 28 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -28,6 +28,13 @@
+ */
+ #define kern_hyp_va(kva) (kva)
+
++/* Contrary to arm64, there is no need to generate a PC-relative address */
++#define hyp_symbol_addr(s) \
++ ({ \
++ typeof(s) *addr = &(s); \
++ addr; \
++ })
++
+ /*
+ * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
+ */
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -131,6 +131,26 @@ static inline unsigned long __kern_hyp_v
+ #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
+
+ /*
++ * Obtain the PC-relative address of a kernel symbol
++ * s: symbol
++ *
++ * The goal of this macro is to return a symbol's address based on a
++ * PC-relative computation, as opposed to a loading the VA from a
++ * constant pool or something similar. This works well for HYP, as an
++ * absolute VA is guaranteed to be wrong. Only use this if trying to
++ * obtain the address of a symbol (i.e. not something you obtained by
++ * following a pointer).
++ */
++#define hyp_symbol_addr(s) \
++ ({ \
++ typeof(s) *addr; \
++ asm("adrp %0, %1\n" \
++ "add %0, %0, :lo12:%1\n" \
++ : "=r" (addr) : "S" (&s)); \
++ addr; \
++ })
++
++/*
+ * We currently only support a 40bit IPA.
+ */
+ #define KVM_PHYS_SHIFT (40)
+--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
++++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
+@@ -203,7 +203,7 @@ int __hyp_text __vgic_v2_perform_cpuif_a
+ return -1;
+
+ rd = kvm_vcpu_dabt_get_rd(vcpu);
+- addr = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va);
++ addr = kern_hyp_va(hyp_symbol_addr(kvm_vgic_global_state)->vcpu_base_va);
+ addr += fault_ipa - vgic->vgic_cpu_base;
+
+ if (kvm_vcpu_dabt_iswrite(vcpu)) {
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:20 +0100
+Subject: KVM: arm64: Avoid storing the vcpu pointer on the stack
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-10-marc.zyngier@arm.com>
+
+From: Christoffer Dall <christoffer.dall@linaro.org>
+
+Commit 4464e210de9e80e38de59df052fe09ea2ff80b1b upstream.
+
+We already have the percpu area for the host cpu state, which points to
+the VCPU, so there's no need to store the VCPU pointer on the stack on
+every context switch. We can be a little more clever and just use
+tpidr_el2 for the percpu offset and load the VCPU pointer from the host
+context.
+
+This has the benefit of being able to retrieve the host context even
+when our stack is corrupted, and it has a potential performance benefit
+because we trade a store plus a load for an mrs and a load on a round
+trip to the guest.
+
+This does require us to calculate the percpu offset without including
+the offset from the kernel mapping of the percpu array to the linear
+mapping of the array (which is what we store in tpidr_el1), because a
+PC-relative generated address in EL2 is already giving us the hyp alias
+of the linear mapping of a kernel address. We do this in
+__cpu_init_hyp_mode() by using kvm_ksym_ref().
+
+The code that accesses ESR_EL2 was previously using an alternative to
+use the _EL1 accessor on VHE systems, but this was actually unnecessary
+as the _EL1 accessor aliases the ESR_EL2 register on VHE, and the _EL2
+accessor does the same thing on both systems.
+
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Andrew Jones <drjones@redhat.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_asm.h | 15 +++++++++++++++
+ arch/arm64/include/asm/kvm_host.h | 15 +++++++++++++++
+ arch/arm64/kernel/asm-offsets.c | 1 +
+ arch/arm64/kvm/hyp/entry.S | 6 +-----
+ arch/arm64/kvm/hyp/hyp-entry.S | 28 ++++++++++------------------
+ arch/arm64/kvm/hyp/switch.c | 5 +----
+ arch/arm64/kvm/hyp/sysreg-sr.c | 5 +++++
+ 7 files changed, 48 insertions(+), 27 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -33,6 +33,7 @@
+ #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
+ #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
+
++/* Translate a kernel address of @sym into its equivalent linear mapping */
+ #define kvm_ksym_ref(sym) \
+ ({ \
+ void *val = &sym; \
+@@ -65,6 +66,20 @@ extern u32 __kvm_get_mdcr_el2(void);
+
+ extern u32 __init_stage2_translation(void);
+
++#else /* __ASSEMBLY__ */
++
++.macro get_host_ctxt reg, tmp
++ adr_l \reg, kvm_host_cpu_state
++ mrs \tmp, tpidr_el2
++ add \reg, \reg, \tmp
++.endm
++
++.macro get_vcpu_ptr vcpu, ctxt
++ get_host_ctxt \ctxt, \vcpu
++ ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
++ kern_hyp_va \vcpu
++.endm
++
+ #endif
+
+ #endif /* __ARM_KVM_ASM_H__ */
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -356,10 +356,15 @@ int kvm_perf_teardown(void);
+
+ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
+
++void __kvm_set_tpidr_el2(u64 tpidr_el2);
++DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
++
+ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
+ unsigned long hyp_stack_ptr,
+ unsigned long vector_ptr)
+ {
++ u64 tpidr_el2;
++
+ /*
+ * Call initialization code, and switch to the full blown HYP code.
+ * If the cpucaps haven't been finalized yet, something has gone very
+@@ -368,6 +373,16 @@ static inline void __cpu_init_hyp_mode(p
+ */
+ BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
+ __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
++
++ /*
++ * Calculate the raw per-cpu offset without a translation from the
++ * kernel's mapping to the linear mapping, and store it in tpidr_el2
++ * so that we can use adr_l to access per-cpu variables in EL2.
++ */
++ tpidr_el2 = (u64)this_cpu_ptr(&kvm_host_cpu_state)
++ - (u64)kvm_ksym_ref(kvm_host_cpu_state);
++
++ kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
+ }
+
+ void __kvm_hyp_teardown(void);
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -132,6 +132,7 @@ int main(void)
+ DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
+ DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
+ DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
++ DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
+ #endif
+ #ifdef CONFIG_CPU_PM
+ DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
+--- a/arch/arm64/kvm/hyp/entry.S
++++ b/arch/arm64/kvm/hyp/entry.S
+@@ -62,9 +62,6 @@ ENTRY(__guest_enter)
+ // Store the host regs
+ save_callee_saved_regs x1
+
+- // Store host_ctxt and vcpu for use at exit time
+- stp x1, x0, [sp, #-16]!
+-
+ add x18, x0, #VCPU_CONTEXT
+
+ // Restore guest regs x0-x17
+@@ -118,8 +115,7 @@ ENTRY(__guest_exit)
+ // Store the guest regs x19-x29, lr
+ save_callee_saved_regs x1
+
+- // Restore the host_ctxt from the stack
+- ldr x2, [sp], #16
++ get_host_ctxt x2, x3
+
+ // Now restore the host regs
+ restore_callee_saved_regs x2
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -72,13 +72,8 @@ ENDPROC(__kvm_hyp_teardown)
+ el1_sync: // Guest trapped into EL2
+ stp x0, x1, [sp, #-16]!
+
+-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+- mrs x1, esr_el2
+-alternative_else
+- mrs x1, esr_el1
+-alternative_endif
+- lsr x0, x1, #ESR_ELx_EC_SHIFT
+-
++ mrs x0, esr_el2
++ lsr x0, x0, #ESR_ELx_EC_SHIFT
+ cmp x0, #ESR_ELx_EC_HVC64
+ ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
+ b.ne el1_trap
+@@ -118,10 +113,14 @@ el1_hvc_guest:
+ eret
+
+ el1_trap:
++ get_vcpu_ptr x1, x0
++
++ mrs x0, esr_el2
++ lsr x0, x0, #ESR_ELx_EC_SHIFT
+ /*
+ * x0: ESR_EC
++ * x1: vcpu pointer
+ */
+- ldr x1, [sp, #16 + 8] // vcpu stored by __guest_enter
+
+ /* Guest accessed VFP/SIMD registers, save host, restore Guest */
+ cmp x0, #ESR_ELx_EC_FP_ASIMD
+@@ -132,13 +131,13 @@ el1_trap:
+
+ el1_irq:
+ stp x0, x1, [sp, #-16]!
+- ldr x1, [sp, #16 + 8]
++ get_vcpu_ptr x1, x0
+ mov x0, #ARM_EXCEPTION_IRQ
+ b __guest_exit
+
+ el1_error:
+ stp x0, x1, [sp, #-16]!
+- ldr x1, [sp, #16 + 8]
++ get_vcpu_ptr x1, x0
+ mov x0, #ARM_EXCEPTION_EL1_SERROR
+ b __guest_exit
+
+@@ -174,14 +173,7 @@ ENTRY(__hyp_do_panic)
+ ENDPROC(__hyp_do_panic)
+
+ ENTRY(__hyp_panic)
+- /*
+- * '=kvm_host_cpu_state' is a host VA from the constant pool, it may
+- * not be accessible by this address from EL2, hyp_panic() converts
+- * it with kern_hyp_va() before use.
+- */
+- ldr x0, =kvm_host_cpu_state
+- mrs x1, tpidr_el2
+- add x0, x0, x1
++ get_host_ctxt x0, x1
+ b hyp_panic
+ ENDPROC(__hyp_panic)
+
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -395,7 +395,7 @@ static hyp_alternate_select(__hyp_call_p
+ __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
+-void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *__host_ctxt)
++void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
+ {
+ struct kvm_vcpu *vcpu = NULL;
+
+@@ -404,9 +404,6 @@ void __hyp_text __noreturn hyp_panic(str
+ u64 par = read_sysreg(par_el1);
+
+ if (read_sysreg(vttbr_el2)) {
+- struct kvm_cpu_context *host_ctxt;
+-
+- host_ctxt = kern_hyp_va(__host_ctxt);
+ vcpu = host_ctxt->__hyp_running_vcpu;
+ __timer_save_state(vcpu);
+ __deactivate_traps(vcpu);
+--- a/arch/arm64/kvm/hyp/sysreg-sr.c
++++ b/arch/arm64/kvm/hyp/sysreg-sr.c
+@@ -183,3 +183,8 @@ void __hyp_text __sysreg32_restore_state
+ if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+ write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
+ }
++
++void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2)
++{
++ asm("msr tpidr_el2, %0": : "r" (tpidr_el2));
++}
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:15 +0100
+Subject: KVM: arm64: Change hyp_panic()s dependency on tpidr_el2
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-5-marc.zyngier@arm.com>
+
+From: James Morse <james.morse@arm.com>
+
+Commit c97e166e54b662717d20ec2e36761758d2b6a7c2 upstream.
+
+Make tpidr_el2 a cpu-offset for per-cpu variables in the same way the
+host uses tpidr_el1. This lets tpidr_el{1,2} have the same value, and
+on VHE they can be the same register.
+
+KVM calls hyp_panic() when anything unexpected happens. This may occur
+while a guest owns the EL1 registers. KVM stashes the vcpu pointer in
+tpidr_el2, which it uses to find the host context in order to restore
+the host EL1 registers before parachuting into the host's panic().
+
+The host context is a struct kvm_cpu_context allocated in the per-cpu
+area, and mapped to hyp. Given the per-cpu offset for this CPU, this is
+easy to find. Change hyp_panic() to take a pointer to the
+struct kvm_cpu_context. Wrap these calls with an asm function that
+retrieves the struct kvm_cpu_context from the host's per-cpu area.
+
+Copy the per-cpu offset from the hosts tpidr_el1 into tpidr_el2 during
+kvm init. (Later patches will make this unnecessary for VHE hosts)
+
+We print out the vcpu pointer as part of the panic message. Add a back
+reference to the 'running vcpu' in the host cpu context to preserve this.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h | 2 ++
+ arch/arm64/kvm/hyp/hyp-entry.S | 12 ++++++++++++
+ arch/arm64/kvm/hyp/s2-setup.c | 3 +++
+ arch/arm64/kvm/hyp/switch.c | 25 +++++++++++++------------
+ 4 files changed, 30 insertions(+), 12 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -197,6 +197,8 @@ struct kvm_cpu_context {
+ u64 sys_regs[NR_SYS_REGS];
+ u32 copro[NR_COPRO_REGS];
+ };
++
++ struct kvm_vcpu *__hyp_running_vcpu;
+ };
+
+ typedef struct kvm_cpu_context kvm_cpu_context_t;
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -173,6 +173,18 @@ ENTRY(__hyp_do_panic)
+ eret
+ ENDPROC(__hyp_do_panic)
+
++ENTRY(__hyp_panic)
++ /*
++ * '=kvm_host_cpu_state' is a host VA from the constant pool, it may
++ * not be accessible by this address from EL2, hyp_panic() converts
++ * it with kern_hyp_va() before use.
++ */
++ ldr x0, =kvm_host_cpu_state
++ mrs x1, tpidr_el2
++ add x0, x0, x1
++ b hyp_panic
++ENDPROC(__hyp_panic)
++
+ .macro invalid_vector label, target = __hyp_panic
+ .align 2
+ \label:
+--- a/arch/arm64/kvm/hyp/s2-setup.c
++++ b/arch/arm64/kvm/hyp/s2-setup.c
+@@ -84,5 +84,8 @@ u32 __hyp_text __init_stage2_translation
+
+ write_sysreg(val, vtcr_el2);
+
++ /* copy tpidr_el1 into tpidr_el2 for use by HYP */
++ write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
++
+ return parange;
+ }
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -275,9 +275,9 @@ int __hyp_text __kvm_vcpu_run(struct kvm
+ u64 exit_code;
+
+ vcpu = kern_hyp_va(vcpu);
+- write_sysreg(vcpu, tpidr_el2);
+
+ host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
++ host_ctxt->__hyp_running_vcpu = vcpu;
+ guest_ctxt = &vcpu->arch.ctxt;
+
+ __sysreg_save_host_state(host_ctxt);
+@@ -364,7 +364,8 @@ again:
+
+ static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
+
+-static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
++static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
++ struct kvm_vcpu *vcpu)
+ {
+ unsigned long str_va;
+
+@@ -378,35 +379,35 @@ static void __hyp_text __hyp_call_panic_
+ __hyp_do_panic(str_va,
+ spsr, elr,
+ read_sysreg(esr_el2), read_sysreg_el2(far),
+- read_sysreg(hpfar_el2), par,
+- (void *)read_sysreg(tpidr_el2));
++ read_sysreg(hpfar_el2), par, vcpu);
+ }
+
+-static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
++static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
++ struct kvm_vcpu *vcpu)
+ {
+ panic(__hyp_panic_string,
+ spsr, elr,
+ read_sysreg_el2(esr), read_sysreg_el2(far),
+- read_sysreg(hpfar_el2), par,
+- (void *)read_sysreg(tpidr_el2));
++ read_sysreg(hpfar_el2), par, vcpu);
+ }
+
+ static hyp_alternate_select(__hyp_call_panic,
+ __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
+-void __hyp_text __noreturn __hyp_panic(void)
++void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *__host_ctxt)
+ {
++ struct kvm_vcpu *vcpu = NULL;
++
+ u64 spsr = read_sysreg_el2(spsr);
+ u64 elr = read_sysreg_el2(elr);
+ u64 par = read_sysreg(par_el1);
+
+ if (read_sysreg(vttbr_el2)) {
+- struct kvm_vcpu *vcpu;
+ struct kvm_cpu_context *host_ctxt;
+
+- vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
+- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
++ host_ctxt = kern_hyp_va(__host_ctxt);
++ vcpu = host_ctxt->__hyp_running_vcpu;
+ __timer_save_state(vcpu);
+ __deactivate_traps(vcpu);
+ __deactivate_vm(vcpu);
+@@ -414,7 +415,7 @@ void __hyp_text __noreturn __hyp_panic(v
+ }
+
+ /* Call panic for real */
+- __hyp_call_panic()(spsr, elr, par);
++ __hyp_call_panic()(spsr, elr, par, vcpu);
+
+ unreachable();
+ }
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:17 +0100
+Subject: KVM: arm64: Stop save/restoring host tpidr_el1 on VHE
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-7-marc.zyngier@arm.com>
+
+From: James Morse <james.morse@arm.com>
+
+Commit 1f742679c33bc083722cb0b442a95d458c491b56 upstream.
+
+Now that a VHE host uses tpidr_el2 for the cpu offset we no longer
+need KVM to save/restore tpidr_el1. Move this from the 'common' code
+into the non-vhe code. While we're at it, on VHE we don't need to
+save the ELR or SPSR as kernel_entry in entry.S will have pushed these
+onto the kernel stack, and will restore them from there. Move these
+to the non-vhe code as we need them to get back to the host.
+
+Finally remove the always-copy-tpidr we hid in the stage2 setup
+code, cpufeature's enable callback will do this for VHE, we only
+need KVM to do it for non-vhe. Add the copy into kvm-init instead.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/hyp-init.S | 4 ++++
+ arch/arm64/kvm/hyp/s2-setup.c | 3 ---
+ arch/arm64/kvm/hyp/sysreg-sr.c | 16 ++++++++--------
+ 3 files changed, 12 insertions(+), 11 deletions(-)
+
+--- a/arch/arm64/kvm/hyp-init.S
++++ b/arch/arm64/kvm/hyp-init.S
+@@ -118,6 +118,10 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
+ kern_hyp_va x2
+ msr vbar_el2, x2
+
++ /* copy tpidr_el1 into tpidr_el2 for use by HYP */
++ mrs x1, tpidr_el1
++ msr tpidr_el2, x1
++
+ /* Hello, World! */
+ eret
+ ENDPROC(__kvm_hyp_init)
+--- a/arch/arm64/kvm/hyp/s2-setup.c
++++ b/arch/arm64/kvm/hyp/s2-setup.c
+@@ -84,8 +84,5 @@ u32 __hyp_text __init_stage2_translation
+
+ write_sysreg(val, vtcr_el2);
+
+- /* copy tpidr_el1 into tpidr_el2 for use by HYP */
+- write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
+-
+ return parange;
+ }
+--- a/arch/arm64/kvm/hyp/sysreg-sr.c
++++ b/arch/arm64/kvm/hyp/sysreg-sr.c
+@@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothi
+ /*
+ * Non-VHE: Both host and guest must save everything.
+ *
+- * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc,
+- * pstate, and guest must save everything.
++ * VHE: Host must save tpidr*_el0, actlr_el1, mdscr_el1, sp_el0,
++ * and guest must save everything.
+ */
+
+ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
+@@ -36,11 +36,8 @@ static void __hyp_text __sysreg_save_com
+ ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
+ ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
+ ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
+- ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
+ ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
+ ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
+- ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
+- ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
+ }
+
+ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+@@ -62,10 +59,13 @@ static void __hyp_text __sysreg_save_sta
+ ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
+ ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
+ ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
++ ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
+
+ ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
+ ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
+ ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
++ ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
++ ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
+ }
+
+ static hyp_alternate_select(__sysreg_call_save_host_state,
+@@ -89,11 +89,8 @@ static void __hyp_text __sysreg_restore_
+ write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
+ write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
+ write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
+- write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
+ write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
+ write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
+- write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
+- write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
+ }
+
+ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+@@ -115,10 +112,13 @@ static void __hyp_text __sysreg_restore_
+ write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
+ write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
+ write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
++ write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
+
+ write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
+ write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
+ write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
++ write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
++ write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
+ }
+
+ static hyp_alternate_select(__sysreg_call_restore_host_state,
--- /dev/null
+From foo@baz Fri Jul 20 12:10:19 CEST 2018
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 20 Jul 2018 10:56:13 +0100
+Subject: KVM: arm64: Store vcpu on the stack during __guest_enter()
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Mark Rutland <mark.rutland@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20180720095634.2173-3-marc.zyngier@arm.com>
+
+From: James Morse <james.morse@arm.com>
+
+Commit 32b03d1059667a39e089c45ee38ec9c16332430f upstream.
+
+KVM uses tpidr_el2 as its private vcpu register, which makes sense for
+non-vhe world switch as only KVM can access this register. This means
+vhe Linux has to use tpidr_el1, which KVM has to save/restore as part
+of the host context.
+
+If the SDEI handler code runs behind KVMs back, it mustn't access any
+per-cpu variables. To allow this on systems with vhe we need to make
+the host use tpidr_el2, saving KVM from save/restoring it.
+
+__guest_enter() stores the host_ctxt on the stack, do the same with
+the vcpu.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/hyp/entry.S | 10 +++++++---
+ arch/arm64/kvm/hyp/hyp-entry.S | 6 +++---
+ 2 files changed, 10 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/kvm/hyp/entry.S
++++ b/arch/arm64/kvm/hyp/entry.S
+@@ -62,8 +62,8 @@ ENTRY(__guest_enter)
+ // Store the host regs
+ save_callee_saved_regs x1
+
+- // Store the host_ctxt for use at exit time
+- str x1, [sp, #-16]!
++ // Store host_ctxt and vcpu for use at exit time
++ stp x1, x0, [sp, #-16]!
+
+ add x18, x0, #VCPU_CONTEXT
+
+@@ -159,6 +159,10 @@ abort_guest_exit_end:
+ ENDPROC(__guest_exit)
+
+ ENTRY(__fpsimd_guest_restore)
++ // x0: esr
++ // x1: vcpu
++ // x2-x29,lr: vcpu regs
++ // vcpu x0-x1 on the stack
+ stp x2, x3, [sp, #-16]!
+ stp x4, lr, [sp, #-16]!
+
+@@ -173,7 +177,7 @@ alternative_else
+ alternative_endif
+ isb
+
+- mrs x3, tpidr_el2
++ mov x3, x1
+
+ ldr x0, [x3, #VCPU_HOST_CONTEXT]
+ kern_hyp_va x0
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -121,24 +121,24 @@ el1_trap:
+ /*
+ * x0: ESR_EC
+ */
++ ldr x1, [sp, #16 + 8] // vcpu stored by __guest_enter
+
+ /* Guest accessed VFP/SIMD registers, save host, restore Guest */
+ cmp x0, #ESR_ELx_EC_FP_ASIMD
+ b.eq __fpsimd_guest_restore
+
+- mrs x1, tpidr_el2
+ mov x0, #ARM_EXCEPTION_TRAP
+ b __guest_exit
+
+ el1_irq:
+ stp x0, x1, [sp, #-16]!
+- mrs x1, tpidr_el2
++ ldr x1, [sp, #16 + 8]
+ mov x0, #ARM_EXCEPTION_IRQ
+ b __guest_exit
+
+ el1_error:
+ stp x0, x1, [sp, #-16]!
+- mrs x1, tpidr_el2
++ ldr x1, [sp, #16 + 8]
+ mov x0, #ARM_EXCEPTION_EL1_SERROR
+ b __guest_exit
+
netfilter-ipv6-nf_defrag-drop-skb-dst-before-queueing.patch
rds-avoid-unenecessary-cong_update-in-loop-transport.patch
net-nfc-avoid-stalls-when-nfc_alloc_send_skb-returned-null.patch
+arm64-assembler-introduce-ldr_this_cpu.patch
+kvm-arm64-store-vcpu-on-the-stack-during-__guest_enter.patch
+kvm-arm-arm64-convert-kvm_host_cpu_state-to-a-static-per-cpu-allocation.patch
+kvm-arm64-change-hyp_panic-s-dependency-on-tpidr_el2.patch
+arm64-alternatives-use-tpidr_el2-on-vhe-hosts.patch
+kvm-arm64-stop-save-restoring-host-tpidr_el1-on-vhe.patch
+arm64-alternatives-add-dynamic-patching-feature.patch
+kvm-arm-arm64-do-not-use-kern_hyp_va-with-kvm_vgic_global_state.patch
+kvm-arm64-avoid-storing-the-vcpu-pointer-on-the-stack.patch
+arm-arm64-smccc-add-smccc-specific-return-codes.patch
+arm64-call-arch_workaround_2-on-transitions-between-el0-and-el1.patch
+arm64-add-per-cpu-infrastructure-to-call-arch_workaround_2.patch
+arm64-add-arch_workaround_2-probing.patch
+arm64-add-ssbd-command-line-option.patch
+arm64-ssbd-add-global-mitigation-state-accessor.patch
+arm64-ssbd-skip-apply_ssbd-if-not-using-dynamic-mitigation.patch
+arm64-ssbd-restore-mitigation-status-on-cpu-resume.patch
+arm64-ssbd-introduce-thread-flag-to-control-userspace-mitigation.patch
+arm64-ssbd-add-prctl-interface-for-per-thread-mitigation.patch
+arm64-kvm-add-hyp-per-cpu-accessors.patch
+arm64-kvm-add-arch_workaround_2-support-for-guests.patch
+arm64-kvm-handle-guest-s-arch_workaround_2-requests.patch
+arm64-kvm-add-arch_workaround_2-discovery-through-arch_features_func_id.patch