--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 8 May 2018 15:43:45 +0200
+Subject: Documentation/spec_ctrl: Do some minor cleanups
+
+From: Borislav Petkov <bp@suse.de>
+
+commit dd0792699c4058e63c0715d9a7c2d40226fcdddc upstream
+
+Fix some typos, improve formulations, end sentences with a fullstop.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/spec_ctrl.txt | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/Documentation/spec_ctrl.txt
++++ b/Documentation/spec_ctrl.txt
+@@ -2,13 +2,13 @@
+ Speculation Control
+ ===================
+
+-Quite some CPUs have speculation related misfeatures which are in fact
+-vulnerabilites causing data leaks in various forms even accross privilege
+-domains.
++Quite some CPUs have speculation-related misfeatures which are in
++fact vulnerabilities causing data leaks in various forms even across
++privilege domains.
+
+ The kernel provides mitigation for such vulnerabilities in various
+-forms. Some of these mitigations are compile time configurable and some on
+-the kernel command line.
++forms. Some of these mitigations are compile-time configurable and some
++can be supplied on the kernel command line.
+
+ There is also a class of mitigations which are very expensive, but they can
+ be restricted to a certain set of processes or tasks in controlled
+@@ -32,18 +32,18 @@ the following meaning:
+ Bit Define Description
+ ==== ===================== ===================================================
+ 0 PR_SPEC_PRCTL Mitigation can be controlled per task by
+- PR_SET_SPECULATION_CTRL
++ PR_SET_SPECULATION_CTRL.
+ 1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
+- disabled
++ disabled.
+ 2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
+- enabled
++ enabled.
+ 3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
+ subsequent prctl(..., PR_SPEC_ENABLE) will fail.
+ ==== ===================== ===================================================
+
+ If all bits are 0 the CPU is not affected by the speculation misfeature.
+
+-If PR_SPEC_PRCTL is set, then the per task control of the mitigation is
++If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
+ available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
+ misfeature will fail.
+
+@@ -61,9 +61,9 @@ Common error codes
+ Value Meaning
+ ======= =================================================================
+ EINVAL The prctl is not implemented by the architecture or unused
+- prctl(2) arguments are not 0
++ prctl(2) arguments are not 0.
+
+-ENODEV arg2 is selecting a not supported speculation misfeature
++ENODEV arg2 is selecting a not supported speculation misfeature.
+ ======= =================================================================
+
+ PR_SET_SPECULATION_CTRL error codes
+@@ -74,7 +74,7 @@ Value Meaning
+ 0 Success
+
+ ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
+- PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE
++ PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
+
+ ENXIO Control of the selected speculation misfeature is not possible.
+ See PR_GET_SPECULATION_CTRL.
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Thu, 10 May 2018 22:06:39 +0200
+Subject: KVM: SVM: Implement VIRT_SPEC_CTRL support for SSBD
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit bc226f07dcd3c9ef0b7f6236fe356ea4a9cb4769 upstream
+
+Expose the new virtualized architectural mechanism, VIRT_SSBD, for using
+speculative store bypass disable (SSBD) under SVM. This will allow guests
+to use SSBD on hardware that uses non-architectural mechanisms for enabling
+SSBD.
+
+[ tglx: Folded the migration fixup from Paolo Bonzini ]
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm_host.h | 2 +-
+ arch/x86/kernel/cpu/common.c | 3 ++-
+ arch/x86/kvm/cpuid.c | 11 +++++++++--
+ arch/x86/kvm/cpuid.h | 9 +++++++++
+ arch/x86/kvm/svm.c | 21 +++++++++++++++++++--
+ arch/x86/kvm/vmx.c | 18 +++++++++++++++---
+ arch/x86/kvm/x86.c | 13 ++++---------
+ 7 files changed, 59 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -864,7 +864,7 @@ struct kvm_x86_ops {
+ int (*hardware_setup)(void); /* __init */
+ void (*hardware_unsetup)(void); /* __exit */
+ bool (*cpu_has_accelerated_tpr)(void);
+- bool (*cpu_has_high_real_mode_segbase)(void);
++ bool (*has_emulated_msr)(int index);
+ void (*cpuid_update)(struct kvm_vcpu *vcpu);
+
+ int (*vm_init)(struct kvm *kvm);
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -735,7 +735,8 @@ static void init_speculation_control(str
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+
+- if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
++ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
++ cpu_has(c, X86_FEATURE_VIRT_SSBD))
+ set_cpu_cap(c, X86_FEATURE_SSBD);
+
+ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -357,7 +357,7 @@ static inline int __do_cpuid_ent(struct
+
+ /* cpuid 0x80000008.ebx */
+ const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+- F(AMD_IBPB) | F(AMD_IBRS);
++ F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
+
+ /* cpuid 0xC0000001.edx */
+ const u32 kvm_cpuid_C000_0001_edx_x86_features =
+@@ -618,13 +618,20 @@ static inline int __do_cpuid_ent(struct
+ g_phys_as = phys_as;
+ entry->eax = g_phys_as | (virt_as << 8);
+ entry->edx = 0;
+- /* IBRS and IBPB aren't necessarily present in hardware cpuid */
++ /*
++ * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
++ * hardware cpuid
++ */
+ if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
+ entry->ebx |= F(AMD_IBPB);
+ if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
+ entry->ebx |= F(AMD_IBRS);
++ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
++ entry->ebx |= F(VIRT_SSBD);
+ entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
+ cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
++ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
++ entry->ebx |= F(VIRT_SSBD);
+ break;
+ }
+ case 0x80000019:
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -190,6 +190,15 @@ static inline bool guest_cpuid_has_arch_
+ return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
+ }
+
++static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu)
++{
++ struct kvm_cpuid_entry2 *best;
++
++ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
++ return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD));
++}
++
++
+
+ /*
+ * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3557,6 +3557,13 @@ static int svm_get_msr(struct kvm_vcpu *
+
+ msr_info->data = svm->spec_ctrl;
+ break;
++ case MSR_AMD64_VIRT_SPEC_CTRL:
++ if (!msr_info->host_initiated &&
++ !guest_cpuid_has_virt_ssbd(vcpu))
++ return 1;
++
++ msr_info->data = svm->virt_spec_ctrl;
++ break;
+ case MSR_IA32_UCODE_REV:
+ msr_info->data = 0x01000065;
+ break;
+@@ -3691,6 +3698,16 @@ static int svm_set_msr(struct kvm_vcpu *
+ break;
+ set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
+ break;
++ case MSR_AMD64_VIRT_SPEC_CTRL:
++ if (!msr->host_initiated &&
++ !guest_cpuid_has_virt_ssbd(vcpu))
++ return 1;
++
++ if (data & ~SPEC_CTRL_SSBD)
++ return 1;
++
++ svm->virt_spec_ctrl = data;
++ break;
+ case MSR_STAR:
+ svm->vmcb->save.star = data;
+ break;
+@@ -5150,7 +5167,7 @@ static bool svm_cpu_has_accelerated_tpr(
+ return false;
+ }
+
+-static bool svm_has_high_real_mode_segbase(void)
++static bool svm_has_emulated_msr(int index)
+ {
+ return true;
+ }
+@@ -5467,7 +5484,7 @@ static struct kvm_x86_ops svm_x86_ops __
+ .hardware_enable = svm_hardware_enable,
+ .hardware_disable = svm_hardware_disable,
+ .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
+- .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
++ .has_emulated_msr = svm_has_emulated_msr,
+
+ .vcpu_create = svm_create_vcpu,
+ .vcpu_free = svm_free_vcpu,
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8691,9 +8691,21 @@ static void vmx_handle_external_intr(str
+ }
+ }
+
+-static bool vmx_has_high_real_mode_segbase(void)
++static bool vmx_has_emulated_msr(int index)
+ {
+- return enable_unrestricted_guest || emulate_invalid_guest_state;
++ switch (index) {
++ case MSR_IA32_SMBASE:
++ /*
++ * We cannot do SMM unless we can run the guest in big
++ * real mode.
++ */
++ return enable_unrestricted_guest || emulate_invalid_guest_state;
++ case MSR_AMD64_VIRT_SPEC_CTRL:
++ /* This is AMD only. */
++ return false;
++ default:
++ return true;
++ }
+ }
+
+ static bool vmx_mpx_supported(void)
+@@ -11346,7 +11358,7 @@ static struct kvm_x86_ops vmx_x86_ops __
+ .hardware_enable = hardware_enable,
+ .hardware_disable = hardware_disable,
+ .cpu_has_accelerated_tpr = report_flexpriority,
+- .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
++ .has_emulated_msr = vmx_has_emulated_msr,
+
+ .vcpu_create = vmx_create_vcpu,
+ .vcpu_free = vmx_free_vcpu,
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1002,6 +1002,7 @@ static u32 emulated_msrs[] = {
+ MSR_IA32_MCG_CTL,
+ MSR_IA32_MCG_EXT_CTL,
+ MSR_IA32_SMBASE,
++ MSR_AMD64_VIRT_SPEC_CTRL,
+ };
+
+ static unsigned num_emulated_msrs;
+@@ -2664,7 +2665,7 @@ int kvm_vm_ioctl_check_extension(struct
+ * fringe case that is not enabled except via specific settings
+ * of the module parameters.
+ */
+- r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
++ r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
+ break;
+ case KVM_CAP_COALESCED_MMIO:
+ r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+@@ -4226,14 +4227,8 @@ static void kvm_init_msr_list(void)
+ num_msrs_to_save = j;
+
+ for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
+- switch (emulated_msrs[i]) {
+- case MSR_IA32_SMBASE:
+- if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
+- continue;
+- break;
+- default:
+- break;
+- }
++ if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
++ continue;
+
+ if (j < i)
+ emulated_msrs[j] = emulated_msrs[i];
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 11 May 2018 15:21:01 +0200
+Subject: KVM: SVM: Move spec control call after restore of GS
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 15e6c22fd8e5a42c5ed6d487b7c9fe44c2517765 upstream
+
+svm_vcpu_run() invokes x86_spec_ctrl_restore_host() after VMEXIT, but
+before the host GS is restored. x86_spec_ctrl_restore_host() uses 'current'
+to determine the host SSBD state of the thread. 'current' is GS based, but
+host GS is not yet restored and the access causes a triple fault.
+
+Move the call after the host GS restore.
+
+Fixes: 885f82bfbc6f x86/process: Allow runtime control of Speculative Store Bypass
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -5011,6 +5011,18 @@ static void svm_vcpu_run(struct kvm_vcpu
+ #endif
+ );
+
++ /* Eliminate branch target predictions from guest mode */
++ vmexit_fill_RSB();
++
++#ifdef CONFIG_X86_64
++ wrmsrl(MSR_GS_BASE, svm->host.gs_base);
++#else
++ loadsegment(fs, svm->host.fs);
++#ifndef CONFIG_X86_32_LAZY_GS
++ loadsegment(gs, svm->host.gs);
++#endif
++#endif
++
+ /*
+ * We do not use IBRS in the kernel. If this vCPU has used the
+ * SPEC_CTRL MSR it may have left it on; save the value and
+@@ -5031,18 +5043,6 @@ static void svm_vcpu_run(struct kvm_vcpu
+
+ x86_spec_ctrl_restore_host(svm->spec_ctrl);
+
+- /* Eliminate branch target predictions from guest mode */
+- vmexit_fill_RSB();
+-
+-#ifdef CONFIG_X86_64
+- wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+-#else
+- loadsegment(fs, svm->host.fs);
+-#ifndef CONFIG_X86_32_LAZY_GS
+- loadsegment(gs, svm->host.gs);
+-#endif
+-#endif
+-
+ reload_tss(vcpu);
+
+ local_irq_disable();
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 1 May 2018 15:19:04 -0700
+Subject: nospec: Allow getting/setting on non-current task
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 7bbf1373e228840bb0295a2ca26d548ef37f448e upstream
+
+Adjust arch_prctl_get/set_spec_ctrl() to operate on tasks other than
+current.
+
+This is needed both for /proc/$pid/status queries and for seccomp (since
+thread-syncing can trigger seccomp in non-current threads).
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 27 ++++++++++++++++-----------
+ include/linux/nospec.h | 7 +++++--
+ kernel/sys.c | 9 +++++----
+ 3 files changed, 26 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -529,31 +529,35 @@ static void ssb_select_mitigation()
+
+ #undef pr_fmt
+
+-static int ssb_prctl_set(unsigned long ctrl)
++static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+ {
+- bool rds = !!test_tsk_thread_flag(current, TIF_RDS);
++ bool rds = !!test_tsk_thread_flag(task, TIF_RDS);
+
+ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
+ return -ENXIO;
+
+ if (ctrl == PR_SPEC_ENABLE)
+- clear_tsk_thread_flag(current, TIF_RDS);
++ clear_tsk_thread_flag(task, TIF_RDS);
+ else
+- set_tsk_thread_flag(current, TIF_RDS);
++ set_tsk_thread_flag(task, TIF_RDS);
+
+- if (rds != !!test_tsk_thread_flag(current, TIF_RDS))
++ /*
++ * If being set on non-current task, delay setting the CPU
++ * mitigation until it is next scheduled.
++ */
++ if (task == current && rds != !!test_tsk_thread_flag(task, TIF_RDS))
+ speculative_store_bypass_update();
+
+ return 0;
+ }
+
+-static int ssb_prctl_get(void)
++static int ssb_prctl_get(struct task_struct *task)
+ {
+ switch (ssb_mode) {
+ case SPEC_STORE_BYPASS_DISABLE:
+ return PR_SPEC_DISABLE;
+ case SPEC_STORE_BYPASS_PRCTL:
+- if (test_tsk_thread_flag(current, TIF_RDS))
++ if (test_tsk_thread_flag(task, TIF_RDS))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+ default:
+@@ -563,24 +567,25 @@ static int ssb_prctl_get(void)
+ }
+ }
+
+-int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++ unsigned long ctrl)
+ {
+ if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
+ return -ERANGE;
+
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+- return ssb_prctl_set(ctrl);
++ return ssb_prctl_set(task, ctrl);
+ default:
+ return -ENODEV;
+ }
+ }
+
+-int arch_prctl_spec_ctrl_get(unsigned long which)
++int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+ {
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+- return ssb_prctl_get();
++ return ssb_prctl_get(task);
+ default:
+ return -ENODEV;
+ }
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -7,6 +7,8 @@
+ #define _LINUX_NOSPEC_H
+ #include <asm/barrier.h>
+
++struct task_struct;
++
+ /**
+ * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+ * @index: array element index
+@@ -57,7 +59,8 @@ static inline unsigned long array_index_
+ })
+
+ /* Speculation control prctl */
+-int arch_prctl_spec_ctrl_get(unsigned long which);
+-int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl);
++int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++ unsigned long ctrl);
+
+ #endif /* _LINUX_NOSPEC_H */
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -2074,12 +2074,13 @@ static int prctl_get_tid_address(struct
+ }
+ #endif
+
+-int __weak arch_prctl_spec_ctrl_get(unsigned long which)
++int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
+ {
+ return -EINVAL;
+ }
+
+-int __weak arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
++int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
++ unsigned long ctrl)
+ {
+ return -EINVAL;
+ }
+@@ -2285,12 +2286,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
+ case PR_GET_SPECULATION_CTRL:
+ if (arg3 || arg4 || arg5)
+ return -EINVAL;
+- error = arch_prctl_spec_ctrl_get(arg2);
++ error = arch_prctl_spec_ctrl_get(me, arg2);
+ break;
+ case PR_SET_SPECULATION_CTRL:
+ if (arg4 || arg5)
+ return -EINVAL;
+- error = arch_prctl_spec_ctrl_set(arg2, arg3);
++ error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
+ break;
+ default:
+ error = -EINVAL;
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 3 May 2018 22:09:15 +0200
+Subject: prctl: Add force disable speculation
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 356e4bfff2c5489e016fdb925adbf12a1e3950ee upstream
+
+For certain use cases it is desired to enforce mitigations so they cannot
+be undone afterwards. That's important for loader stubs which want to
+prevent a child from disabling the mitigation again. Will also be used for
+seccomp(). The extra state preserving of the prctl state for SSB is a
+preparatory step for EBPF dymanic speculation control.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/spec_ctrl.txt | 34 +++++++++++++++++++++-------------
+ arch/x86/kernel/cpu/bugs.c | 35 +++++++++++++++++++++++++----------
+ fs/proc/array.c | 3 +++
+ include/linux/sched.h | 9 +++++++++
+ include/uapi/linux/prctl.h | 1 +
+ 5 files changed, 59 insertions(+), 23 deletions(-)
+
+--- a/Documentation/spec_ctrl.txt
++++ b/Documentation/spec_ctrl.txt
+@@ -25,19 +25,21 @@ PR_GET_SPECULATION_CTRL
+ -----------------------
+
+ PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
+-which is selected with arg2 of prctl(2). The return value uses bits 0-2 with
++which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
+ the following meaning:
+
+-==== ================ ===================================================
+-Bit Define Description
+-==== ================ ===================================================
+-0 PR_SPEC_PRCTL Mitigation can be controlled per task by
+- PR_SET_SPECULATION_CTRL
+-1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
+- disabled
+-2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
+- enabled
+-==== ================ ===================================================
++==== ===================== ===================================================
++Bit Define Description
++==== ===================== ===================================================
++0 PR_SPEC_PRCTL Mitigation can be controlled per task by
++ PR_SET_SPECULATION_CTRL
++1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
++ disabled
++2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
++ enabled
++3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
++ subsequent prctl(..., PR_SPEC_ENABLE) will fail.
++==== ===================== ===================================================
+
+ If all bits are 0 the CPU is not affected by the speculation misfeature.
+
+@@ -47,9 +49,11 @@ misfeature will fail.
+
+ PR_SET_SPECULATION_CTRL
+ -----------------------
++
+ PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
+ is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
+-in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE.
++in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
++PR_SPEC_FORCE_DISABLE.
+
+ Common error codes
+ ------------------
+@@ -70,10 +74,13 @@ Value Meaning
+ 0 Success
+
+ ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
+- PR_SPEC_DISABLE
++ PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE
+
+ ENXIO Control of the selected speculation misfeature is not possible.
+ See PR_GET_SPECULATION_CTRL.
++
++EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
++ tried to enable it again.
+ ======= =================================================================
+
+ Speculation misfeature controls
+@@ -84,3 +91,4 @@ Speculation misfeature controls
+ * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -532,21 +532,37 @@ static void ssb_select_mitigation()
+
+ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+ {
+- bool rds = !!test_tsk_thread_flag(task, TIF_RDS);
++ bool update;
+
+ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
+ return -ENXIO;
+
+- if (ctrl == PR_SPEC_ENABLE)
+- clear_tsk_thread_flag(task, TIF_RDS);
+- else
+- set_tsk_thread_flag(task, TIF_RDS);
++ switch (ctrl) {
++ case PR_SPEC_ENABLE:
++ /* If speculation is force disabled, enable is not allowed */
++ if (task_spec_ssb_force_disable(task))
++ return -EPERM;
++ task_clear_spec_ssb_disable(task);
++ update = test_and_clear_tsk_thread_flag(task, TIF_RDS);
++ break;
++ case PR_SPEC_DISABLE:
++ task_set_spec_ssb_disable(task);
++ update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
++ break;
++ case PR_SPEC_FORCE_DISABLE:
++ task_set_spec_ssb_disable(task);
++ task_set_spec_ssb_force_disable(task);
++ update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
++ break;
++ default:
++ return -ERANGE;
++ }
+
+ /*
+ * If being set on non-current task, delay setting the CPU
+ * mitigation until it is next scheduled.
+ */
+- if (task == current && rds != !!test_tsk_thread_flag(task, TIF_RDS))
++ if (task == current && update)
+ speculative_store_bypass_update();
+
+ return 0;
+@@ -558,7 +574,9 @@ static int ssb_prctl_get(struct task_str
+ case SPEC_STORE_BYPASS_DISABLE:
+ return PR_SPEC_DISABLE;
+ case SPEC_STORE_BYPASS_PRCTL:
+- if (test_tsk_thread_flag(task, TIF_RDS))
++ if (task_spec_ssb_force_disable(task))
++ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
++ if (task_spec_ssb_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+ default:
+@@ -571,9 +589,6 @@ static int ssb_prctl_get(struct task_str
+ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl)
+ {
+- if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
+- return -ERANGE;
+-
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssb_prctl_set(task, ctrl);
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -355,6 +355,9 @@ static inline void task_seccomp(struct s
+ case PR_SPEC_NOT_AFFECTED:
+ seq_printf(m, "not vulnerable");
+ break;
++ case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
++ seq_printf(m, "thread force mitigated");
++ break;
+ case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
+ seq_printf(m, "thread mitigated");
+ break;
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2354,6 +2354,8 @@ static inline void memalloc_noio_restore
+ #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
+ #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
+ #define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
++#define PFA_SPEC_SSB_DISABLE 4 /* Speculative Store Bypass disabled */
++#define PFA_SPEC_SSB_FORCE_DISABLE 5 /* Speculative Store Bypass force disabled*/
+
+
+ #define TASK_PFA_TEST(name, func) \
+@@ -2380,6 +2382,13 @@ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
+ TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
+ TASK_PFA_SET(LMK_WAITING, lmk_waiting)
+
++TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
++TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
++TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
++
++TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
++TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
++
+ /*
+ * task->jobctl flags
+ */
+--- a/include/uapi/linux/prctl.h
++++ b/include/uapi/linux/prctl.h
+@@ -207,5 +207,6 @@ struct prctl_mm_map {
+ # define PR_SPEC_PRCTL (1UL << 0)
+ # define PR_SPEC_ENABLE (1UL << 1)
+ # define PR_SPEC_DISABLE (1UL << 2)
++# define PR_SPEC_FORCE_DISABLE (1UL << 3)
+
+ #endif /* _LINUX_PRCTL_H */
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 29 Apr 2018 15:20:11 +0200
+Subject: prctl: Add speculation control prctls
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit b617cfc858161140d69cc0b5cc211996b557a1c7 upstream
+
+Add two new prctls to control aspects of speculation related vulnerabilites
+and their mitigations to provide finer grained control over performance
+impacting mitigations.
+
+PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
+which is selected with arg2 of prctl(2). The return value uses bit 0-2 with
+the following meaning:
+
+Bit Define Description
+0 PR_SPEC_PRCTL Mitigation can be controlled per task by
+ PR_SET_SPECULATION_CTRL
+1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
+ disabled
+2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
+ enabled
+
+If all bits are 0 the CPU is not affected by the speculation misfeature.
+
+If PR_SPEC_PRCTL is set, then the per task control of the mitigation is
+available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
+misfeature will fail.
+
+PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
+is selected by arg2 of prctl(2) per task. arg3 is used to hand in the
+control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE.
+
+The common return values are:
+
+EINVAL prctl is not implemented by the architecture or the unused prctl()
+ arguments are not 0
+ENODEV arg2 is selecting a not supported speculation misfeature
+
+PR_SET_SPECULATION_CTRL has these additional return values:
+
+ERANGE arg3 is incorrect, i.e. it's not either PR_SPEC_ENABLE or PR_SPEC_DISABLE
+ENXIO prctl control of the selected speculation misfeature is disabled
+
+The first supported controlable speculation misfeature is
+PR_SPEC_STORE_BYPASS. Add the define so this can be shared between
+architectures.
+
+Based on an initial patch from Tim Chen and mostly rewritten.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/spec_ctrl.txt | 86 ++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/nospec.h | 5 ++
+ include/uapi/linux/prctl.h | 11 +++++
+ kernel/sys.c | 22 +++++++++++
+ 4 files changed, 124 insertions(+)
+ create mode 100644 Documentation/spec_ctrl.txt
+
+--- /dev/null
++++ b/Documentation/spec_ctrl.txt
+@@ -0,0 +1,86 @@
++===================
++Speculation Control
++===================
++
++Quite some CPUs have speculation related misfeatures which are in fact
++vulnerabilites causing data leaks in various forms even accross privilege
++domains.
++
++The kernel provides mitigation for such vulnerabilities in various
++forms. Some of these mitigations are compile time configurable and some on
++the kernel command line.
++
++There is also a class of mitigations which are very expensive, but they can
++be restricted to a certain set of processes or tasks in controlled
++environments. The mechanism to control these mitigations is via
++:manpage:`prctl(2)`.
++
++There are two prctl options which are related to this:
++
++ * PR_GET_SPECULATION_CTRL
++
++ * PR_SET_SPECULATION_CTRL
++
++PR_GET_SPECULATION_CTRL
++-----------------------
++
++PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
++which is selected with arg2 of prctl(2). The return value uses bits 0-2 with
++the following meaning:
++
++==== ================ ===================================================
++Bit Define Description
++==== ================ ===================================================
++0 PR_SPEC_PRCTL Mitigation can be controlled per task by
++ PR_SET_SPECULATION_CTRL
++1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
++ disabled
++2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
++ enabled
++==== ================ ===================================================
++
++If all bits are 0 the CPU is not affected by the speculation misfeature.
++
++If PR_SPEC_PRCTL is set, then the per task control of the mitigation is
++available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
++misfeature will fail.
++
++PR_SET_SPECULATION_CTRL
++-----------------------
++PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
++is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
++in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE.
++
++Common error codes
++------------------
++======= =================================================================
++Value Meaning
++======= =================================================================
++EINVAL The prctl is not implemented by the architecture or unused
++ prctl(2) arguments are not 0
++
++ENODEV arg2 is selecting a not supported speculation misfeature
++======= =================================================================
++
++PR_SET_SPECULATION_CTRL error codes
++-----------------------------------
++======= =================================================================
++Value Meaning
++======= =================================================================
++0 Success
++
++ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
++ PR_SPEC_DISABLE
++
++ENXIO Control of the selected speculation misfeature is not possible.
++ See PR_GET_SPECULATION_CTRL.
++======= =================================================================
++
++Speculation misfeature controls
++-------------------------------
++- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
++
++ Invocations:
++ * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -55,4 +55,9 @@ static inline unsigned long array_index_
+ \
+ (typeof(_i)) (_i & _mask); \
+ })
++
++/* Speculation control prctl */
++int arch_prctl_spec_ctrl_get(unsigned long which);
++int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl);
++
+ #endif /* _LINUX_NOSPEC_H */
+--- a/include/uapi/linux/prctl.h
++++ b/include/uapi/linux/prctl.h
+@@ -197,4 +197,15 @@ struct prctl_mm_map {
+ # define PR_CAP_AMBIENT_LOWER 3
+ # define PR_CAP_AMBIENT_CLEAR_ALL 4
+
++/* Per task speculation control */
++#define PR_GET_SPECULATION_CTRL 52
++#define PR_SET_SPECULATION_CTRL 53
++/* Speculation control variants */
++# define PR_SPEC_STORE_BYPASS 0
++/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
++# define PR_SPEC_NOT_AFFECTED 0
++# define PR_SPEC_PRCTL (1UL << 0)
++# define PR_SPEC_ENABLE (1UL << 1)
++# define PR_SPEC_DISABLE (1UL << 2)
++
+ #endif /* _LINUX_PRCTL_H */
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -53,6 +53,8 @@
+ #include <linux/uidgid.h>
+ #include <linux/cred.h>
+
++#include <linux/nospec.h>
++
+ #include <linux/kmsg_dump.h>
+ /* Move somewhere else to avoid recompiling? */
+ #include <generated/utsrelease.h>
+@@ -2072,6 +2074,16 @@ static int prctl_get_tid_address(struct
+ }
+ #endif
+
++int __weak arch_prctl_spec_ctrl_get(unsigned long which)
++{
++ return -EINVAL;
++}
++
++int __weak arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
++{
++ return -EINVAL;
++}
++
+ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+ unsigned long, arg4, unsigned long, arg5)
+ {
+@@ -2270,6 +2282,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
+ case PR_GET_FP_MODE:
+ error = GET_FP_MODE(me);
+ break;
++ case PR_GET_SPECULATION_CTRL:
++ if (arg3 || arg4 || arg5)
++ return -EINVAL;
++ error = arch_prctl_spec_ctrl_get(arg2);
++ break;
++ case PR_SET_SPECULATION_CTRL:
++ if (arg4 || arg5)
++ return -EINVAL;
++ error = arch_prctl_spec_ctrl_set(arg2, arg3);
++ break;
+ default:
+ error = -EINVAL;
+ break;
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 1 May 2018 15:31:45 -0700
+Subject: proc: Provide details on speculation flaw mitigations
+
+From: Kees Cook <keescook@chromium.org>
+
+commit fae1fa0fc6cca8beee3ab8ed71d54f9a78fa3f64 upstream
+
+As done with seccomp and no_new_privs, also show speculation flaw
+mitigation state in /proc/$pid/status.
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/array.c | 24 +++++++++++++++++++++++-
+ 1 file changed, 23 insertions(+), 1 deletion(-)
+
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -80,6 +80,7 @@
+ #include <linux/delayacct.h>
+ #include <linux/seq_file.h>
+ #include <linux/pid_namespace.h>
++#include <linux/prctl.h>
+ #include <linux/ptrace.h>
+ #include <linux/tracehook.h>
+ #include <linux/string_helpers.h>
+@@ -345,8 +346,29 @@ static inline void task_seccomp(struct s
+ {
+ #ifdef CONFIG_SECCOMP
+ seq_put_decimal_ull(m, "Seccomp:\t", p->seccomp.mode);
+- seq_putc(m, '\n');
+ #endif
++ seq_printf(m, "\nSpeculation Store Bypass:\t");
++ switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
++ case -EINVAL:
++ seq_printf(m, "unknown");
++ break;
++ case PR_SPEC_NOT_AFFECTED:
++ seq_printf(m, "not vulnerable");
++ break;
++ case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
++ seq_printf(m, "thread mitigated");
++ break;
++ case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
++ seq_printf(m, "thread vulnerable");
++ break;
++ case PR_SPEC_DISABLE:
++ seq_printf(m, "globally mitigated");
++ break;
++ default:
++ seq_printf(m, "vulnerable");
++ break;
++ }
++ seq_putc(m, '\n');
+ }
+
+ static inline void task_context_switch_counts(struct seq_file *m,
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 9 May 2018 21:41:38 +0200
+Subject: proc: Use underscores for SSBD in 'status'
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit e96f46ee8587607a828f783daa6eb5b44d25004d upstream
+
+The style for the 'status' file is CamelCase or this. _.
+
+Fixes: fae1fa0fc ("proc: Provide details on speculation flaw mitigations")
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/array.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -347,7 +347,7 @@ static inline void task_seccomp(struct s
+ #ifdef CONFIG_SECCOMP
+ seq_put_decimal_ull(m, "Seccomp:\t", p->seccomp.mode);
+ #endif
+- seq_printf(m, "\nSpeculation Store Bypass:\t");
++ seq_printf(m, "\nSpeculation_Store_Bypass:\t");
+ switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
+ case -EINVAL:
+ seq_printf(m, "unknown");
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 3 May 2018 14:56:12 -0700
+Subject: seccomp: Add filter flag to opt-out of SSB mitigation
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 00a02d0c502a06d15e07b857f8ff921e3e402675 upstream
+
+If a seccomp user is not interested in Speculative Store Bypass mitigation
+by default, it can set the new SECCOMP_FILTER_FLAG_SPEC_ALLOW flag when
+adding filters.
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/seccomp.h | 3 -
+ include/uapi/linux/seccomp.h | 4 +
+ kernel/seccomp.c | 19 +++---
+ tools/testing/selftests/seccomp/seccomp_bpf.c | 78 +++++++++++++++++++++++++-
+ 4 files changed, 93 insertions(+), 11 deletions(-)
+
+--- a/include/linux/seccomp.h
++++ b/include/linux/seccomp.h
+@@ -3,7 +3,8 @@
+
+ #include <uapi/linux/seccomp.h>
+
+-#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC)
++#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
++ SECCOMP_FILTER_FLAG_SPEC_ALLOW)
+
+ #ifdef CONFIG_SECCOMP
+
+--- a/include/uapi/linux/seccomp.h
++++ b/include/uapi/linux/seccomp.h
+@@ -15,7 +15,9 @@
+ #define SECCOMP_SET_MODE_FILTER 1
+
+ /* Valid flags for SECCOMP_SET_MODE_FILTER */
+-#define SECCOMP_FILTER_FLAG_TSYNC 1
++#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
++/* In v4.14+ SECCOMP_FILTER_FLAG_LOG is (1UL << 1) */
++#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
+
+ /*
+ * All BPF programs must return a 32-bit value.
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -230,7 +230,8 @@ static inline void spec_mitigate(struct
+ }
+
+ static inline void seccomp_assign_mode(struct task_struct *task,
+- unsigned long seccomp_mode)
++ unsigned long seccomp_mode,
++ unsigned long flags)
+ {
+ assert_spin_locked(&task->sighand->siglock);
+
+@@ -240,8 +241,9 @@ static inline void seccomp_assign_mode(s
+ * filter) is set.
+ */
+ smp_mb__before_atomic();
+- /* Assume seccomp processes want speculation flaw mitigation. */
+- spec_mitigate(task, PR_SPEC_STORE_BYPASS);
++ /* Assume default seccomp processes want spec flaw mitigation. */
++ if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
++ spec_mitigate(task, PR_SPEC_STORE_BYPASS);
+ set_tsk_thread_flag(task, TIF_SECCOMP);
+ }
+
+@@ -309,7 +311,7 @@ static inline pid_t seccomp_can_sync_thr
+ * without dropping the locks.
+ *
+ */
+-static inline void seccomp_sync_threads(void)
++static inline void seccomp_sync_threads(unsigned long flags)
+ {
+ struct task_struct *thread, *caller;
+
+@@ -350,7 +352,8 @@ static inline void seccomp_sync_threads(
+ * allow one thread to transition the other.
+ */
+ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
+- seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
++ seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
++ flags);
+ }
+ }
+
+@@ -469,7 +472,7 @@ static long seccomp_attach_filter(unsign
+
+ /* Now that the new filter is in place, synchronize to all threads. */
+ if (flags & SECCOMP_FILTER_FLAG_TSYNC)
+- seccomp_sync_threads();
++ seccomp_sync_threads(flags);
+
+ return 0;
+ }
+@@ -729,7 +732,7 @@ static long seccomp_set_mode_strict(void
+ #ifdef TIF_NOTSC
+ disable_TSC();
+ #endif
+- seccomp_assign_mode(current, seccomp_mode);
++ seccomp_assign_mode(current, seccomp_mode, 0);
+ ret = 0;
+
+ out:
+@@ -787,7 +790,7 @@ static long seccomp_set_mode_filter(unsi
+ /* Do not free the successfully attached filter. */
+ prepared = NULL;
+
+- seccomp_assign_mode(current, seccomp_mode);
++ seccomp_assign_mode(current, seccomp_mode, flags);
+ out:
+ spin_unlock_irq(¤t->sighand->siglock);
+ if (flags & SECCOMP_FILTER_FLAG_TSYNC)
+--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
+@@ -1692,7 +1692,11 @@ TEST_F_SIGNAL(TRACE_syscall, kill_after_
+ #endif
+
+ #ifndef SECCOMP_FILTER_FLAG_TSYNC
+-#define SECCOMP_FILTER_FLAG_TSYNC 1
++#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
++#endif
++
++#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
++#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
+ #endif
+
+ #ifndef seccomp
+@@ -1791,6 +1795,78 @@ TEST(seccomp_syscall_mode_lock)
+ }
+ }
+
++/*
++ * Test detection of known and unknown filter flags. Userspace needs to be able
++ * to check if a filter flag is supported by the current kernel and a good way
++ * of doing that is by attempting to enter filter mode, with the flag bit in
++ * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates
++ * that the flag is valid and EINVAL indicates that the flag is invalid.
++ */
++TEST(detect_seccomp_filter_flags)
++{
++ unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
++ SECCOMP_FILTER_FLAG_SPEC_ALLOW };
++ unsigned int flag, all_flags;
++ int i;
++ long ret;
++
++ /* Test detection of known-good filter flags */
++ for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
++ int bits = 0;
++
++ flag = flags[i];
++ /* Make sure the flag is a single bit! */
++ while (flag) {
++ if (flag & 0x1)
++ bits ++;
++ flag >>= 1;
++ }
++ ASSERT_EQ(1, bits);
++ flag = flags[i];
++
++ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
++ ASSERT_NE(ENOSYS, errno) {
++ TH_LOG("Kernel does not support seccomp syscall!");
++ }
++ EXPECT_EQ(-1, ret);
++ EXPECT_EQ(EFAULT, errno) {
++ TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!",
++ flag);
++ }
++
++ all_flags |= flag;
++ }
++
++ /* Test detection of all known-good filter flags */
++ ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL);
++ EXPECT_EQ(-1, ret);
++ EXPECT_EQ(EFAULT, errno) {
++ TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
++ all_flags);
++ }
++
++ /* Test detection of an unknown filter flag */
++ flag = -1;
++ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
++ EXPECT_EQ(-1, ret);
++ EXPECT_EQ(EINVAL, errno) {
++ TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!",
++ flag);
++ }
++
++ /*
++ * Test detection of an unknown filter flag that may simply need to be
++ * added to this test
++ */
++ flag = flags[ARRAY_SIZE(flags) - 1] << 1;
++ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
++ EXPECT_EQ(-1, ret);
++ EXPECT_EQ(EINVAL, errno) {
++ TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?",
++ flag);
++ }
++}
++
+ TEST(TSYNC_first)
+ {
+ struct sock_filter filter[] = {
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 1 May 2018 15:07:31 -0700
+Subject: seccomp: Enable speculation flaw mitigations
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 5c3070890d06ff82eecb808d02d2ca39169533ef upstream
+
+When speculation flaw mitigations are opt-in (via prctl), using seccomp
+will automatically opt-in to these protections, since using seccomp
+indicates at least some level of sandboxing is desired.
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/seccomp.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -16,6 +16,8 @@
+ #include <linux/atomic.h>
+ #include <linux/audit.h>
+ #include <linux/compat.h>
++#include <linux/nospec.h>
++#include <linux/prctl.h>
+ #include <linux/sched.h>
+ #include <linux/seccomp.h>
+ #include <linux/slab.h>
+@@ -214,6 +216,19 @@ static inline bool seccomp_may_assign_mo
+ return true;
+ }
+
++/*
++ * If a given speculation mitigation is opt-in (prctl()-controlled),
++ * select it, by disabling speculation (enabling mitigation).
++ */
++static inline void spec_mitigate(struct task_struct *task,
++ unsigned long which)
++{
++ int state = arch_prctl_spec_ctrl_get(task, which);
++
++ if (state > 0 && (state & PR_SPEC_PRCTL))
++ arch_prctl_spec_ctrl_set(task, which, PR_SPEC_DISABLE);
++}
++
+ static inline void seccomp_assign_mode(struct task_struct *task,
+ unsigned long seccomp_mode)
+ {
+@@ -225,6 +240,8 @@ static inline void seccomp_assign_mode(s
+ * filter) is set.
+ */
+ smp_mb__before_atomic();
++ /* Assume seccomp processes want speculation flaw mitigation. */
++ spec_mitigate(task, PR_SPEC_STORE_BYPASS);
+ set_tsk_thread_flag(task, TIF_SECCOMP);
+ }
+
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 4 May 2018 15:12:06 +0200
+Subject: seccomp: Move speculation migitation control to arch code
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 8bf37d8c067bb7eb8e7c381bdadf9bd89182b6bc upstream
+
+The migitation control is simpler to implement in architecture code as it
+avoids the extra function call to check the mode. Aside of that having an
+explicit seccomp enabled mode in the architecture mitigations would require
+even more workarounds.
+
+Move it into architecture code and provide a weak function in the seccomp
+code. Remove the 'which' argument as this allows the architecture to decide
+which mitigations are relevant for seccomp.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 29 ++++++++++++++++++-----------
+ include/linux/nospec.h | 2 ++
+ kernel/seccomp.c | 15 ++-------------
+ 3 files changed, 22 insertions(+), 24 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -568,6 +568,24 @@ static int ssb_prctl_set(struct task_str
+ return 0;
+ }
+
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++ unsigned long ctrl)
++{
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssb_prctl_set(task, ctrl);
++ default:
++ return -ENODEV;
++ }
++}
++
++#ifdef CONFIG_SECCOMP
++void arch_seccomp_spec_mitigate(struct task_struct *task)
++{
++ ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
++}
++#endif
++
+ static int ssb_prctl_get(struct task_struct *task)
+ {
+ switch (ssb_mode) {
+@@ -586,17 +604,6 @@ static int ssb_prctl_get(struct task_str
+ }
+ }
+
+-int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+- unsigned long ctrl)
+-{
+- switch (which) {
+- case PR_SPEC_STORE_BYPASS:
+- return ssb_prctl_set(task, ctrl);
+- default:
+- return -ENODEV;
+- }
+-}
+-
+ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+ {
+ switch (which) {
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -62,5 +62,7 @@ static inline unsigned long array_index_
+ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
+ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl);
++/* Speculation control for seccomp enforced mitigation */
++void arch_seccomp_spec_mitigate(struct task_struct *task);
+
+ #endif /* _LINUX_NOSPEC_H */
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -216,18 +216,7 @@ static inline bool seccomp_may_assign_mo
+ return true;
+ }
+
+-/*
+- * If a given speculation mitigation is opt-in (prctl()-controlled),
+- * select it, by disabling speculation (enabling mitigation).
+- */
+-static inline void spec_mitigate(struct task_struct *task,
+- unsigned long which)
+-{
+- int state = arch_prctl_spec_ctrl_get(task, which);
+-
+- if (state > 0 && (state & PR_SPEC_PRCTL))
+- arch_prctl_spec_ctrl_set(task, which, PR_SPEC_FORCE_DISABLE);
+-}
++void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
+
+ static inline void seccomp_assign_mode(struct task_struct *task,
+ unsigned long seccomp_mode,
+@@ -243,7 +232,7 @@ static inline void seccomp_assign_mode(s
+ smp_mb__before_atomic();
+ /* Assume default seccomp processes want spec flaw mitigation. */
+ if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
+- spec_mitigate(task, PR_SPEC_STORE_BYPASS);
++ arch_seccomp_spec_mitigate(task);
+ set_tsk_thread_flag(task, TIF_SECCOMP);
+ }
+
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 4 May 2018 09:40:03 +0200
+Subject: seccomp: Use PR_SPEC_FORCE_DISABLE
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit b849a812f7eb92e96d1c8239b06581b2cfd8b275 upstream
+
+Use PR_SPEC_FORCE_DISABLE in seccomp() because seccomp does not allow to
+widen restrictions.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/seccomp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -226,7 +226,7 @@ static inline void spec_mitigate(struct
+ int state = arch_prctl_spec_ctrl_get(task, which);
+
+ if (state > 0 && (state & PR_SPEC_PRCTL))
+- arch_prctl_spec_ctrl_set(task, which, PR_SPEC_DISABLE);
++ arch_prctl_spec_ctrl_set(task, which, PR_SPEC_FORCE_DISABLE);
+ }
+
+ static inline void seccomp_assign_mode(struct task_struct *task,
btrfs-fix-crash-when-trying-to-resume-balance-without-the-resume-flag.patch
x86-amd-don-t-set-x86_bug_sysret_ss_attrs-when-running.patch
btrfs-fix-reading-stale-metadata-blocks-after-degraded-raid1-mounts.patch
+x86-nospec-simplify-alternative_msr_write.patch
+x86-bugs-concentrate-bug-detection-into-a-separate-function.patch
+x86-bugs-concentrate-bug-reporting-into-a-separate-function.patch
+x86-bugs-read-spec_ctrl-msr-during-boot-and-re-use-reserved-bits.patch
+x86-bugs-kvm-support-the-combination-of-guest-and-host-ibrs.patch
+x86-bugs-expose-sys-..-spec_store_bypass.patch
+x86-cpufeatures-add-x86_feature_rds.patch
+x86-bugs-provide-boot-parameters-for-the-spec_store_bypass_disable-mitigation.patch
+x86-bugs-intel-set-proper-cpu-features-and-setup-rds.patch
+x86-bugs-whitelist-allowed-spec_ctrl-msr-values.patch
+x86-bugs-amd-add-support-to-disable-rds-on-famh-if-requested.patch
+x86-kvm-vmx-expose-spec_ctrl-bit-2-to-the-guest.patch
+x86-speculation-create-spec-ctrl.h-to-avoid-include-hell.patch
+prctl-add-speculation-control-prctls.patch
+x86-process-optimize-tif-checks-in-__switch_to_xtra.patch
+x86-process-correct-and-optimize-tif_blockstep-switch.patch
+x86-process-optimize-tif_notsc-switch.patch
+x86-process-allow-runtime-control-of-speculative-store-bypass.patch
+x86-speculation-add-prctl-for-speculative-store-bypass-mitigation.patch
+nospec-allow-getting-setting-on-non-current-task.patch
+proc-provide-details-on-speculation-flaw-mitigations.patch
+seccomp-enable-speculation-flaw-mitigations.patch
+x86-bugs-make-boot-modes-__ro_after_init.patch
+prctl-add-force-disable-speculation.patch
+seccomp-use-pr_spec_force_disable.patch
+seccomp-add-filter-flag-to-opt-out-of-ssb-mitigation.patch
+seccomp-move-speculation-migitation-control-to-arch-code.patch
+x86-speculation-make-seccomp-the-default-mode-for-speculative-store-bypass.patch
+x86-bugs-rename-_rds-to-_ssbd.patch
+proc-use-underscores-for-ssbd-in-status.patch
+documentation-spec_ctrl-do-some-minor-cleanups.patch
+x86-bugs-fix-__ssb_select_mitigation-return-type.patch
+x86-bugs-make-cpu_show_common-static.patch
+x86-bugs-fix-the-parameters-alignment-and-missing-void.patch
+x86-cpu-make-alternative_msr_write-work-for-32-bit-code.patch
+kvm-svm-move-spec-control-call-after-restore-of-gs.patch
+x86-speculation-use-synthetic-bits-for-ibrs-ibpb-stibp.patch
+x86-cpufeatures-disentangle-msr_spec_ctrl-enumeration-from-ibrs.patch
+x86-cpufeatures-disentangle-ssbd-enumeration.patch
+x86-cpu-amd-fix-erratum-1076-cpb-bit.patch
+x86-cpufeatures-add-feature_zen.patch
+x86-speculation-handle-ht-correctly-on-amd.patch
+x86-bugs-kvm-extend-speculation-control-for-virt_spec_ctrl.patch
+x86-speculation-add-virtualized-speculative-store-bypass-disable-support.patch
+x86-speculation-rework-speculative_store_bypass_update.patch
+x86-bugs-unify-x86_spec_ctrl_-set_guest-restore_host.patch
+x86-bugs-expose-x86_spec_ctrl_base-directly.patch
+x86-bugs-remove-x86_spec_ctrl_set.patch
+x86-bugs-rework-spec_ctrl-base-and-mask-logic.patch
+x86-speculation-kvm-implement-support-for-virt_spec_ctrl-ls_cfg.patch
+kvm-svm-implement-virt_spec_ctrl-support-for-ssbd.patch
+x86-bugs-rename-ssbd_no-to-ssb_no.patch
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Sun, 20 May 2018 20:52:05 +0100
+Subject: x86/bugs/AMD: Add support to disable RDS on Fam[15,16,17]h if requested
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 764f3c21588a059cd783c6ba0734d4db2d72822d upstream
+
+AMD does not need the Speculative Store Bypass mitigation to be enabled.
+
+The parameters for this are already available and can be done via MSR
+C001_1020. Each family uses a different bit in that MSR for this.
+
+[ tglx: Expose the bit mask via a variable and move the actual MSR fiddling
+ into the bugs code as that's the right thing to do and also required
+ to prepare for dynamic enable/disable ]
+
+Suggested-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/include/asm/nospec-branch.h | 4 ++++
+ arch/x86/kernel/cpu/amd.c | 26 ++++++++++++++++++++++++++
+ arch/x86/kernel/cpu/bugs.c | 27 ++++++++++++++++++++++++++-
+ arch/x86/kernel/cpu/common.c | 4 ++++
+ 5 files changed, 61 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -205,6 +205,7 @@
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
++#define X86_FEATURE_AMD_RDS (7*32+24) /* "" AMD RDS implementation */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -244,6 +244,10 @@ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_DISABLE,
+ };
+
++/* AMD specific Speculative Store Bypass MSR data */
++extern u64 x86_amd_ls_cfg_base;
++extern u64 x86_amd_ls_cfg_rds_mask;
++
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -9,6 +9,7 @@
+ #include <asm/processor.h>
+ #include <asm/apic.h>
+ #include <asm/cpu.h>
++#include <asm/nospec-branch.h>
+ #include <asm/smp.h>
+ #include <asm/pci-direct.h>
+ #include <asm/delay.h>
+@@ -542,6 +543,26 @@ static void bsp_init_amd(struct cpuinfo_
+ rdmsrl(MSR_FAM10H_NODE_ID, value);
+ nodes_per_socket = ((value >> 3) & 7) + 1;
+ }
++
++ if (c->x86 >= 0x15 && c->x86 <= 0x17) {
++ unsigned int bit;
++
++ switch (c->x86) {
++ case 0x15: bit = 54; break;
++ case 0x16: bit = 33; break;
++ case 0x17: bit = 10; break;
++ default: return;
++ }
++ /*
++ * Try to cache the base value so further operations can
++ * avoid RMW. If that faults, do not enable RDS.
++ */
++ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
++ setup_force_cpu_cap(X86_FEATURE_RDS);
++ setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
++ x86_amd_ls_cfg_rds_mask = 1ULL << bit;
++ }
++ }
+ }
+
+ static void early_init_amd(struct cpuinfo_x86 *c)
+@@ -827,6 +848,11 @@ static void init_amd(struct cpuinfo_x86
+ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+ if (!cpu_has(c, X86_FEATURE_XENPV))
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
++
++ if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
++ set_cpu_cap(c, X86_FEATURE_RDS);
++ set_cpu_cap(c, X86_FEATURE_AMD_RDS);
++ }
+ }
+
+ #ifdef CONFIG_X86_32
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -40,6 +40,13 @@ static u64 __ro_after_init x86_spec_ctrl
+ */
+ static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
+
++/*
++ * AMD specific MSR info for Speculative Store Bypass control.
++ * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
++ */
++u64 __ro_after_init x86_amd_ls_cfg_base;
++u64 __ro_after_init x86_amd_ls_cfg_rds_mask;
++
+ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+@@ -51,7 +58,8 @@ void __init check_bugs(void)
+
+ /*
+ * Read the SPEC_CTRL MSR to account for reserved bits which may
+- * have unknown values.
++ * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
++ * init code as it is not enumerated and depends on the family.
+ */
+ if (boot_cpu_has(X86_FEATURE_IBRS))
+ rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+@@ -153,6 +161,14 @@ void x86_spec_ctrl_restore_host(u64 gues
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+
++static void x86_amd_rds_enable(void)
++{
++ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
++
++ if (boot_cpu_has(X86_FEATURE_AMD_RDS))
++ wrmsrl(MSR_AMD64_LS_CFG, msrval);
++}
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+@@ -442,6 +458,11 @@ static enum ssb_mitigation_cmd __init __
+
+ switch (cmd) {
+ case SPEC_STORE_BYPASS_CMD_AUTO:
++ /*
++ * AMD platforms by default don't need SSB mitigation.
++ */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
++ break;
+ case SPEC_STORE_BYPASS_CMD_ON:
+ mode = SPEC_STORE_BYPASS_DISABLE;
+ break;
+@@ -468,6 +489,7 @@ static enum ssb_mitigation_cmd __init __
+ x86_spec_ctrl_set(SPEC_CTRL_RDS);
+ break;
+ case X86_VENDOR_AMD:
++ x86_amd_rds_enable();
+ break;
+ }
+ }
+@@ -489,6 +511,9 @@ void x86_spec_ctrl_setup_ap(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_IBRS))
+ x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
++
++ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
++ x86_amd_rds_enable();
+ }
+
+ #ifdef CONFIG_SYSFS
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -895,6 +895,10 @@ static const __initconst struct x86_cpu_
+ { X86_VENDOR_CENTAUR, 5, },
+ { X86_VENDOR_INTEL, 5, },
+ { X86_VENDOR_NSC, 5, },
++ { X86_VENDOR_AMD, 0x12, },
++ { X86_VENDOR_AMD, 0x11, },
++ { X86_VENDOR_AMD, 0x10, },
++ { X86_VENDOR_AMD, 0xf, },
+ { X86_VENDOR_ANY, 4, },
+ {}
+ };
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:16 -0400
+Subject: x86/bugs: Concentrate bug detection into a separate function
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 4a28bfe3267b68e22c663ac26185aa16c9b879ef upstream
+
+Combine the various logic which goes through all those
+x86_cpu_id matching structures in one function.
+
+Suggested-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/common.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -879,21 +879,27 @@ static const __initconst struct x86_cpu_
+ {}
+ };
+
+-static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
++static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 ia32_cap = 0;
+
++ if (x86_match_cpu(cpu_no_speculation))
++ return;
++
++ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
++ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
++
+ if (x86_match_cpu(cpu_no_meltdown))
+- return false;
++ return;
+
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ /* Rogue Data Cache Load? No! */
+ if (ia32_cap & ARCH_CAP_RDCL_NO)
+- return false;
++ return;
+
+- return true;
++ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+ }
+
+ /*
+@@ -942,12 +948,7 @@ static void __init early_identify_cpu(st
+
+ setup_force_cpu_cap(X86_FEATURE_ALWAYS);
+
+- if (!x86_match_cpu(cpu_no_speculation)) {
+- if (cpu_vulnerable_to_meltdown(c))
+- setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+- }
++ cpu_set_bug_bits(c);
+
+ fpu__init_system(c);
+
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:17 -0400
+Subject: x86/bugs: Concentrate bug reporting into a separate function
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit d1059518b4789cabe34bb4b714d07e6089c82ca1 upstream
+
+Those SysFS functions have a similar preamble, as such make common
+code to handle them.
+
+Suggested-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 46 +++++++++++++++++++++++++++++++--------------
+ 1 file changed, 32 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -313,30 +313,48 @@ retpoline_auto:
+ #undef pr_fmt
+
+ #ifdef CONFIG_SYSFS
+-ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
++
++ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
++ char *buf, unsigned int bug)
+ {
+- if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
++ if (!boot_cpu_has_bug(bug))
+ return sprintf(buf, "Not affected\n");
+- if (boot_cpu_has(X86_FEATURE_KAISER))
+- return sprintf(buf, "Mitigation: PTI\n");
++
++ switch (bug) {
++ case X86_BUG_CPU_MELTDOWN:
++ if (boot_cpu_has(X86_FEATURE_KAISER))
++ return sprintf(buf, "Mitigation: PTI\n");
++
++ break;
++
++ case X86_BUG_SPECTRE_V1:
++ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++
++ case X86_BUG_SPECTRE_V2:
++ return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
++ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
++ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
++ spectre_v2_module_string());
++
++ default:
++ break;
++ }
++
+ return sprintf(buf, "Vulnerable\n");
+ }
+
++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
++}
++
+ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
+- return sprintf(buf, "Not affected\n");
+- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
+ }
+
+ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+- return sprintf(buf, "Not affected\n");
+-
+- return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+- boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+- boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+- spectre_v2_module_string());
++ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
+ }
+ #endif
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:20 -0400
+Subject: x86/bugs: Expose /sys/../spec_store_bypass
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit c456442cd3a59eeb1d60293c26cbe2ff2c4e42cf upstream
+
+Add the sysfs file for the new vulerability. It does not do much except
+show the words 'Vulnerable' for recent x86 cores.
+
+Intel cores prior to family 6 are known not to be vulnerable, and so are
+some Atoms and some Xeon Phi.
+
+It assumes that older Cyrix, Centaur, etc. cores are immune.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 1
+ arch/x86/include/asm/cpufeatures.h | 1
+ arch/x86/kernel/cpu/bugs.c | 5 ++++
+ arch/x86/kernel/cpu/common.c | 23 +++++++++++++++++++++
+ drivers/base/cpu.c | 8 +++++++
+ include/linux/cpu.h | 2 +
+ 6 files changed, 40 insertions(+)
+
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -355,6 +355,7 @@ What: /sys/devices/system/cpu/vulnerabi
+ /sys/devices/system/cpu/vulnerabilities/meltdown
+ /sys/devices/system/cpu/vulnerabilities/spectre_v1
+ /sys/devices/system/cpu/vulnerabilities/spectre_v2
++ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+ Date: January 2018
+ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description: Information about CPU vulnerabilities
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -335,5 +335,6 @@
+ #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+ #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+ #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
++#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -403,4 +403,9 @@ ssize_t cpu_show_spectre_v2(struct devic
+ {
+ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
+ }
++
++ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
++}
+ #endif
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -879,10 +879,33 @@ static const __initconst struct x86_cpu_
+ {}
+ };
+
++static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
++ { X86_VENDOR_CENTAUR, 5, },
++ { X86_VENDOR_INTEL, 5, },
++ { X86_VENDOR_NSC, 5, },
++ { X86_VENDOR_ANY, 4, },
++ {}
++};
++
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 ia32_cap = 0;
+
++ if (!x86_match_cpu(cpu_no_spec_store_bypass))
++ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
++
+ if (x86_match_cpu(cpu_no_speculation))
+ return;
+
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -519,14 +519,22 @@ ssize_t __weak cpu_show_spectre_v2(struc
+ return sprintf(buf, "Not affected\n");
+ }
+
++ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "Not affected\n");
++}
++
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+ static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
++static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
+
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+ &dev_attr_spectre_v1.attr,
+ &dev_attr_spectre_v2.attr,
++ &dev_attr_spec_store_bypass.attr,
+ NULL
+ };
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -50,6 +50,8 @@ extern ssize_t cpu_show_spectre_v1(struc
+ struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
++ struct device_attribute *attr, char *buf);
+
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 12 May 2018 20:49:16 +0200
+Subject: x86/bugs: Expose x86_spec_ctrl_base directly
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit fa8ac4988249c38476f6ad678a4848a736373403 upstream
+
+x86_spec_ctrl_base is the system wide default value for the SPEC_CTRL MSR.
+x86_spec_ctrl_get_default() returns x86_spec_ctrl_base and was intended to
+prevent modification to that variable. Though the variable is read only
+after init and globaly visible already.
+
+Remove the function and export the variable instead.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 16 +++++-----------
+ arch/x86/include/asm/spec-ctrl.h | 3 ---
+ arch/x86/kernel/cpu/bugs.c | 11 +----------
+ 3 files changed, 6 insertions(+), 24 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -217,16 +217,7 @@ enum spectre_v2_mitigation {
+ SPECTRE_V2_IBRS,
+ };
+
+-/*
+- * The Intel specification for the SPEC_CTRL MSR requires that we
+- * preserve any already set reserved bits at boot time (e.g. for
+- * future additions that this kernel is not currently aware of).
+- * We then set any additional mitigation bits that we want
+- * ourselves and always use this as the base for SPEC_CTRL.
+- * We also use this when handling guest entry/exit as below.
+- */
+ extern void x86_spec_ctrl_set(u64);
+-extern u64 x86_spec_ctrl_get_default(void);
+
+ /* The Speculative Store Bypass disable variants */
+ enum ssb_mitigation {
+@@ -278,6 +269,9 @@ static inline void indirect_branch_predi
+ alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
+ }
+
++/* The Intel SPEC CTRL MSR base value cache */
++extern u64 x86_spec_ctrl_base;
++
+ /*
+ * With retpoline, we must use IBRS to restrict branch prediction
+ * before calling into firmware.
+@@ -286,7 +280,7 @@ static inline void indirect_branch_predi
+ */
+ #define firmware_restrict_branch_speculation_start() \
+ do { \
+- u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS; \
++ u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
+ \
+ preempt_disable(); \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
+@@ -295,7 +289,7 @@ do { \
+
+ #define firmware_restrict_branch_speculation_end() \
+ do { \
+- u64 val = x86_spec_ctrl_get_default(); \
++ u64 val = x86_spec_ctrl_base; \
+ \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
+ X86_FEATURE_USE_IBRS_FW); \
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -47,9 +47,6 @@ void x86_spec_ctrl_restore_host(u64 gues
+ extern u64 x86_amd_ls_cfg_base;
+ extern u64 x86_amd_ls_cfg_ssbd_mask;
+
+-/* The Intel SPEC CTRL MSR base value cache */
+-extern u64 x86_spec_ctrl_base;
+-
+ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
+ {
+ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -35,6 +35,7 @@ static void __init ssb_select_mitigation
+ * writes to SPEC_CTRL contain whatever reserved bits have been set.
+ */
+ u64 __ro_after_init x86_spec_ctrl_base;
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+
+ /*
+ * The vendor and possibly platform specific bits which can be modified in
+@@ -140,16 +141,6 @@ void x86_spec_ctrl_set(u64 val)
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
+
+-u64 x86_spec_ctrl_get_default(void)
+-{
+- u64 msrval = x86_spec_ctrl_base;
+-
+- if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+- msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+- return msrval;
+-}
+-EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+-
+ void
+ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
+ {
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Thu, 10 May 2018 22:47:18 +0200
+Subject: x86/bugs: Fix __ssb_select_mitigation() return type
+
+From: Jiri Kosina <jkosina@suse.cz>
+
+commit d66d8ff3d21667b41eddbe86b35ab411e40d8c5f upstream
+
+__ssb_select_mitigation() returns one of the members of enum ssb_mitigation,
+not ssb_mitigation_cmd; fix the prototype to reflect that.
+
+Fixes: 24f7fc83b9204 ("x86/bugs: Provide boot parameters for the spec_store_bypass_disable mitigation")
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -467,7 +467,7 @@ static enum ssb_mitigation_cmd __init ss
+ return cmd;
+ }
+
+-static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
++static enum ssb_mitigation __init __ssb_select_mitigation(void)
+ {
+ enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
+ enum ssb_mitigation_cmd cmd;
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Fri, 11 May 2018 16:50:35 -0400
+Subject: x86/bugs: Fix the parameters alignment and missing void
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit ffed645e3be0e32f8e9ab068d257aee8d0fe8eec upstream
+
+Fixes: 7bb4d366c ("x86/bugs: Make cpu_show_common() static")
+Fixes: 24f7fc83b ("x86/bugs: Provide boot parameters for the spec_store_bypass_disable mitigation")
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -530,7 +530,7 @@ static enum ssb_mitigation __init __ssb_
+ return mode;
+ }
+
+-static void ssb_select_mitigation()
++static void ssb_select_mitigation(void)
+ {
+ ssb_mode = __ssb_select_mitigation();
+
+@@ -640,7 +640,7 @@ void x86_spec_ctrl_setup_ap(void)
+ #ifdef CONFIG_SYSFS
+
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+- char *buf, unsigned int bug)
++ char *buf, unsigned int bug)
+ {
+ if (!boot_cpu_has_bug(bug))
+ return sprintf(buf, "Not affected\n");
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:22 -0400
+Subject: x86/bugs/intel: Set proper CPU features and setup RDS
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 772439717dbf703b39990be58d8d4e3e4ad0598a upstream
+
+Intel CPUs expose methods to:
+
+ - Detect whether RDS capability is available via CPUID.7.0.EDX[31],
+
+ - The SPEC_CTRL MSR(0x48), bit 2 set to enable RDS.
+
+ - MSR_IA32_ARCH_CAPABILITIES, Bit(4) no need to enable RRS.
+
+With that in mind if spec_store_bypass_disable=[auto,on] is selected set at
+boot-time the SPEC_CTRL MSR to enable RDS if the platform requires it.
+
+Note that this does not fix the KVM case where the SPEC_CTRL is exposed to
+guests which can muck with it, see patch titled :
+ KVM/SVM/VMX/x86/spectre_v2: Support the combination of guest and host IBRS.
+
+And for the firmware (IBRS to be set), see patch titled:
+ x86/spectre_v2: Read SPEC_CTRL MSR during boot and re-use reserved bits
+
+[ tglx: Distangled it from the intel implementation and kept the call order ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/msr-index.h | 6 ++++++
+ arch/x86/kernel/cpu/bugs.c | 30 ++++++++++++++++++++++++++++--
+ arch/x86/kernel/cpu/common.c | 10 ++++++----
+ arch/x86/kernel/cpu/cpu.h | 3 +++
+ arch/x86/kernel/cpu/intel.c | 1 +
+ 5 files changed, 44 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -40,6 +40,7 @@
+ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+ #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+ #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
++#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */
+
+ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+@@ -61,6 +62,11 @@
+ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+ #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+ #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
++#define ARCH_CAP_RDS_NO (1 << 4) /*
++ * Not susceptible to Speculative Store Bypass
++ * attack, so no Reduced Data Speculation control
++ * required.
++ */
+
+ #define MSR_IA32_BBL_CR_CTL 0x00000119
+ #define MSR_IA32_BBL_CR_CTL3 0x0000011e
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -116,7 +116,7 @@ static enum spectre_v2_mitigation spectr
+
+ void x86_spec_ctrl_set(u64 val)
+ {
+- if (val & ~SPEC_CTRL_IBRS)
++ if (val & ~(SPEC_CTRL_IBRS | SPEC_CTRL_RDS))
+ WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
+ else
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
+@@ -443,8 +443,28 @@ static enum ssb_mitigation_cmd __init __
+ break;
+ }
+
+- if (mode != SPEC_STORE_BYPASS_NONE)
++ /*
++ * We have three CPU feature flags that are in play here:
++ * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
++ * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
++ * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
++ */
++ if (mode != SPEC_STORE_BYPASS_NONE) {
+ setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
++ /*
++ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
++ * a completely different MSR and bit dependent on family.
++ */
++ switch (boot_cpu_data.x86_vendor) {
++ case X86_VENDOR_INTEL:
++ x86_spec_ctrl_base |= SPEC_CTRL_RDS;
++ x86_spec_ctrl_set(SPEC_CTRL_RDS);
++ break;
++ case X86_VENDOR_AMD:
++ break;
++ }
++ }
++
+ return mode;
+ }
+
+@@ -458,6 +478,12 @@ static void ssb_select_mitigation()
+
+ #undef pr_fmt
+
++void x86_spec_ctrl_setup_ap(void)
++{
++ if (boot_cpu_has(X86_FEATURE_IBRS))
++ x86_spec_ctrl_set(x86_spec_ctrl_base & (SPEC_CTRL_IBRS | SPEC_CTRL_RDS));
++}
++
+ #ifdef CONFIG_SYSFS
+
+ ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -903,7 +903,11 @@ static void __init cpu_set_bug_bits(stru
+ {
+ u64 ia32_cap = 0;
+
+- if (!x86_match_cpu(cpu_no_spec_store_bypass))
++ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
++ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
++
++ if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
++ !(ia32_cap & ARCH_CAP_RDS_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+ if (x86_match_cpu(cpu_no_speculation))
+@@ -915,9 +919,6 @@ static void __init cpu_set_bug_bits(stru
+ if (x86_match_cpu(cpu_no_meltdown))
+ return;
+
+- if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+-
+ /* Rogue Data Cache Load? No! */
+ if (ia32_cap & ARCH_CAP_RDCL_NO)
+ return;
+@@ -1339,6 +1340,7 @@ void identify_secondary_cpu(struct cpuin
+ #endif
+ mtrr_ap_init();
+ validate_apic_and_package_id(c);
++ x86_spec_ctrl_setup_ap();
+ }
+
+ struct msr_range {
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -46,4 +46,7 @@ extern const struct cpu_dev *const __x86
+
+ extern void get_cpu_cap(struct cpuinfo_x86 *c);
+ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
++
++extern void x86_spec_ctrl_setup_ap(void);
++
+ #endif /* ARCH_X86_CPU_H */
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -154,6 +154,7 @@ static void early_init_intel(struct cpui
+ setup_clear_cpu_cap(X86_FEATURE_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
++ setup_clear_cpu_cap(X86_FEATURE_RDS);
+ }
+
+ /*
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 9 May 2018 23:01:01 +0200
+Subject: x86/bugs, KVM: Extend speculation control for VIRT_SPEC_CTRL
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit ccbcd2674472a978b48c91c1fbfb66c0ff959f24 upstream
+
+AMD is proposing a VIRT_SPEC_CTRL MSR to handle the Speculative Store
+Bypass Disable via MSR_AMD64_LS_CFG so that guests do not have to care
+about the bit position of the SSBD bit and thus facilitate migration.
+Also, the sibling coordination on Family 17H CPUs can only be done on
+the host.
+
+Extend x86_spec_ctrl_set_guest() and x86_spec_ctrl_restore_host() with an
+extra argument for the VIRT_SPEC_CTRL MSR.
+
+Hand in 0 from VMX and in SVM add a new virt_spec_ctrl member to the CPU
+data structure which is going to be used in later patches for the actual
+implementation.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/spec-ctrl.h | 9 ++++++---
+ arch/x86/kernel/cpu/bugs.c | 20 ++++++++++++++++++--
+ arch/x86/kvm/svm.c | 11 +++++++++--
+ arch/x86/kvm/vmx.c | 5 +++--
+ 4 files changed, 36 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -10,10 +10,13 @@
+ * the guest has, while on VMEXIT we restore the host view. This
+ * would be easier if SPEC_CTRL were architecturally maskable or
+ * shadowable for guests but this is not (currently) the case.
+- * Takes the guest view of SPEC_CTRL MSR as a parameter.
++ * Takes the guest view of SPEC_CTRL MSR as a parameter and also
++ * the guest's version of VIRT_SPEC_CTRL, if emulated.
+ */
+-extern void x86_spec_ctrl_set_guest(u64);
+-extern void x86_spec_ctrl_restore_host(u64);
++extern void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl,
++ u64 guest_virt_spec_ctrl);
++extern void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl,
++ u64 guest_virt_spec_ctrl);
+
+ /* AMD specific Speculative Store Bypass MSR data */
+ extern u64 x86_amd_ls_cfg_base;
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -150,7 +150,15 @@ u64 x86_spec_ctrl_get_default(void)
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+
+-void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
++/**
++ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
++ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
++ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
++ * (may get translated to MSR_AMD64_LS_CFG bits)
++ *
++ * Avoids writing to the MSR if the content/bits are the same
++ */
++void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+ {
+ u64 host = x86_spec_ctrl_base;
+
+@@ -167,7 +175,15 @@ void x86_spec_ctrl_set_guest(u64 guest_s
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
+
+-void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
++/**
++ * x86_spec_ctrl_restore_host - Restore host speculation control registers
++ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
++ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
++ * (may get translated to MSR_AMD64_LS_CFG bits)
++ *
++ * Avoids writing to the MSR if the content/bits are the same
++ */
++void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+ {
+ u64 host = x86_spec_ctrl_base;
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -185,6 +185,12 @@ struct vcpu_svm {
+ } host;
+
+ u64 spec_ctrl;
++ /*
++ * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
++ * translated into the appropriate L2_CFG bits on the host to
++ * perform speculative control.
++ */
++ u64 virt_spec_ctrl;
+
+ u32 *msrpm;
+
+@@ -1561,6 +1567,7 @@ static void svm_vcpu_reset(struct kvm_vc
+ u32 eax = 1;
+
+ svm->spec_ctrl = 0;
++ svm->virt_spec_ctrl = 0;
+
+ if (!init_event) {
+ svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
+@@ -4917,7 +4924,7 @@ static void svm_vcpu_run(struct kvm_vcpu
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+- x86_spec_ctrl_set_guest(svm->spec_ctrl);
++ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
+
+ asm volatile (
+ "push %%" _ASM_BP "; \n\t"
+@@ -5041,7 +5048,7 @@ static void svm_vcpu_run(struct kvm_vcpu
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+- x86_spec_ctrl_restore_host(svm->spec_ctrl);
++ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
+
+ reload_tss(vcpu);
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8916,9 +8916,10 @@ static void __noclone vmx_vcpu_run(struc
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+- x86_spec_ctrl_set_guest(vmx->spec_ctrl);
++ x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+
+ vmx->__launched = vmx->loaded_vmcs->launched;
++
+ asm(
+ /* Store host registers */
+ "push %%" _ASM_DX "; push %%" _ASM_BP ";"
+@@ -9054,7 +9055,7 @@ static void __noclone vmx_vcpu_run(struc
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+- x86_spec_ctrl_restore_host(vmx->spec_ctrl);
++ x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
+
+ /* Eliminate branch target predictions from guest mode */
+ vmexit_fill_RSB();
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:19 -0400
+Subject: x86/bugs, KVM: Support the combination of guest and host IBRS
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 5cf687548705412da47c9cec342fd952d71ed3d5 upstream
+
+A guest may modify the SPEC_CTRL MSR from the value used by the
+kernel. Since the kernel doesn't use IBRS, this means a value of zero is
+what is needed in the host.
+
+But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to
+the other bits as reserved so the kernel should respect the boot time
+SPEC_CTRL value and use that.
+
+This allows to deal with future extensions to the SPEC_CTRL interface if
+any at all.
+
+Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any
+difference as paravirt will over-write the callq *0xfff.. with the wrmsrl
+assembler code.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 10 ++++++++++
+ arch/x86/kernel/cpu/bugs.c | 18 ++++++++++++++++++
+ arch/x86/kvm/svm.c | 6 ++----
+ arch/x86/kvm/vmx.c | 6 ++----
+ 4 files changed, 32 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -228,6 +228,16 @@ enum spectre_v2_mitigation {
+ extern void x86_spec_ctrl_set(u64);
+ extern u64 x86_spec_ctrl_get_default(void);
+
++/*
++ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
++ * the guest has, while on VMEXIT we restore the host view. This
++ * would be easier if SPEC_CTRL were architecturally maskable or
++ * shadowable for guests but this is not (currently) the case.
++ * Takes the guest view of SPEC_CTRL MSR as a parameter.
++ */
++extern void x86_spec_ctrl_set_guest(u64);
++extern void x86_spec_ctrl_restore_host(u64);
++
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -122,6 +122,24 @@ u64 x86_spec_ctrl_get_default(void)
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+
++void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
++{
++ if (!boot_cpu_has(X86_FEATURE_IBRS))
++ return;
++ if (x86_spec_ctrl_base != guest_spec_ctrl)
++ wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
++}
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
++
++void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
++{
++ if (!boot_cpu_has(X86_FEATURE_IBRS))
++ return;
++ if (x86_spec_ctrl_base != guest_spec_ctrl)
++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++}
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4917,8 +4917,7 @@ static void svm_vcpu_run(struct kvm_vcpu
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+- if (svm->spec_ctrl)
+- native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
++ x86_spec_ctrl_set_guest(svm->spec_ctrl);
+
+ asm volatile (
+ "push %%" _ASM_BP "; \n\t"
+@@ -5030,8 +5029,7 @@ static void svm_vcpu_run(struct kvm_vcpu
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+- if (svm->spec_ctrl)
+- native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
++ x86_spec_ctrl_restore_host(svm->spec_ctrl);
+
+ /* Eliminate branch target predictions from guest mode */
+ vmexit_fill_RSB();
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8916,8 +8916,7 @@ static void __noclone vmx_vcpu_run(struc
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+- if (vmx->spec_ctrl)
+- native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
++ x86_spec_ctrl_set_guest(vmx->spec_ctrl);
+
+ vmx->__launched = vmx->loaded_vmcs->launched;
+ asm(
+@@ -9055,8 +9054,7 @@ static void __noclone vmx_vcpu_run(struc
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+- if (vmx->spec_ctrl)
+- native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
++ x86_spec_ctrl_restore_host(vmx->spec_ctrl);
+
+ /* Eliminate branch target predictions from guest mode */
+ vmexit_fill_RSB();
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 3 May 2018 15:03:30 -0700
+Subject: x86/bugs: Make boot modes __ro_after_init
+
+From: Kees Cook <keescook@chromium.org>
+
+commit f9544b2b076ca90d887c5ae5d74fab4c21bb7c13 upstream
+
+There's no reason for these to be changed after boot.
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -128,7 +128,8 @@ static const char *spectre_v2_strings[]
+ #undef pr_fmt
+ #define pr_fmt(fmt) "Spectre V2 : " fmt
+
+-static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
++static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
++ SPECTRE_V2_NONE;
+
+ void x86_spec_ctrl_set(u64 val)
+ {
+@@ -406,7 +407,7 @@ retpoline_auto:
+ #undef pr_fmt
+ #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
+
+-static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
++static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
+
+ /* The kernel command line selection */
+ enum ssb_mitigation_cmd {
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Thu, 10 May 2018 22:47:32 +0200
+Subject: x86/bugs: Make cpu_show_common() static
+
+From: Jiri Kosina <jkosina@suse.cz>
+
+commit 7bb4d366cba992904bffa4820d24e70a3de93e76 upstream
+
+cpu_show_common() is not used outside of arch/x86/kernel/cpu/bugs.c, so
+make it static.
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -639,7 +639,7 @@ void x86_spec_ctrl_setup_ap(void)
+
+ #ifdef CONFIG_SYSFS
+
+-ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
++static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ char *buf, unsigned int bug)
+ {
+ if (!boot_cpu_has_bug(bug))
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:21 -0400
+Subject: x86/bugs: Provide boot parameters for the spec_store_bypass_disable mitigation
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 24f7fc83b9204d20f878c57cb77d261ae825e033 upstream
+
+Contemporary high performance processors use a common industry-wide
+optimization known as "Speculative Store Bypass" in which loads from
+addresses to which a recent store has occurred may (speculatively) see an
+older value. Intel refers to this feature as "Memory Disambiguation" which
+is part of their "Smart Memory Access" capability.
+
+Memory Disambiguation can expose a cache side-channel attack against such
+speculatively read values. An attacker can create exploit code that allows
+them to read memory outside of a sandbox environment (for example,
+malicious JavaScript in a web page), or to perform more complex attacks
+against code running within the same privilege level, e.g. via the stack.
+
+As a first step to mitigate against such attacks, provide two boot command
+line control knobs:
+
+ nospec_store_bypass_disable
+ spec_store_bypass_disable=[off,auto,on]
+
+By default affected x86 processors will power on with Speculative
+Store Bypass enabled. Hence the provided kernel parameters are written
+from the point of view of whether to enable a mitigation or not.
+The parameters are as follows:
+
+ - auto - Kernel detects whether your CPU model contains an implementation
+ of Speculative Store Bypass and picks the most appropriate
+ mitigation.
+
+ - on - disable Speculative Store Bypass
+ - off - enable Speculative Store Bypass
+
+[ tglx: Reordered the checks so that the whole evaluation is not done
+ when the CPU does not support RDS ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/kernel-parameters.txt | 33 +++++++++++
+ arch/x86/include/asm/cpufeatures.h | 1
+ arch/x86/include/asm/nospec-branch.h | 6 ++
+ arch/x86/kernel/cpu/bugs.c | 103 +++++++++++++++++++++++++++++++++++
+ 4 files changed, 143 insertions(+)
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2699,6 +2699,9 @@ bytes respectively. Such letter suffixes
+ allow data leaks with this option, which is equivalent
+ to spectre_v2=off.
+
++ nospec_store_bypass_disable
++ [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
++
+ noxsave [BUGS=X86] Disables x86 extended register state save
+ and restore using xsave. The kernel will fallback to
+ enabling legacy floating-point and sse state.
+@@ -3973,6 +3976,36 @@ bytes respectively. Such letter suffixes
+ Not specifying this option is equivalent to
+ spectre_v2=auto.
+
++ spec_store_bypass_disable=
++ [HW] Control Speculative Store Bypass (SSB) Disable mitigation
++ (Speculative Store Bypass vulnerability)
++
++ Certain CPUs are vulnerable to an exploit against a
++ a common industry wide performance optimization known
++ as "Speculative Store Bypass" in which recent stores
++ to the same memory location may not be observed by
++ later loads during speculative execution. The idea
++ is that such stores are unlikely and that they can
++ be detected prior to instruction retirement at the
++ end of a particular speculation execution window.
++
++ In vulnerable processors, the speculatively forwarded
++ store can be used in a cache side channel attack, for
++ example to read memory to which the attacker does not
++ directly have access (e.g. inside sandboxed code).
++
++ This parameter controls whether the Speculative Store
++ Bypass optimization is used.
++
++ on - Unconditionally disable Speculative Store Bypass
++ off - Unconditionally enable Speculative Store Bypass
++ auto - Kernel detects whether the CPU model contains an
++ implementation of Speculative Store Bypass and
++ picks the most appropriate mitigation
++
++ Not specifying this option is equivalent to
++ spec_store_bypass_disable=auto.
++
+ spia_io_base= [HW,MTD]
+ spia_fio_base=
+ spia_pedr=
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -204,6 +204,7 @@
+
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
++#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -238,6 +238,12 @@ extern u64 x86_spec_ctrl_get_default(voi
+ extern void x86_spec_ctrl_set_guest(u64);
+ extern void x86_spec_ctrl_restore_host(u64);
+
++/* The Speculative Store Bypass disable variants */
++enum ssb_mitigation {
++ SPEC_STORE_BYPASS_NONE,
++ SPEC_STORE_BYPASS_DISABLE,
++};
++
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -26,6 +26,7 @@
+ #include <asm/intel-family.h>
+
+ static void __init spectre_v2_select_mitigation(void);
++static void __init ssb_select_mitigation(void);
+
+ /*
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+@@ -52,6 +53,12 @@ void __init check_bugs(void)
+ /* Select the proper spectre mitigation before patching alternatives */
+ spectre_v2_select_mitigation();
+
++ /*
++ * Select proper mitigation for any exposure to the Speculative Store
++ * Bypass vulnerability.
++ */
++ ssb_select_mitigation();
++
+ #ifdef CONFIG_X86_32
+ /*
+ * Check whether we are able to run this kernel safely on SMP.
+@@ -357,6 +364,99 @@ retpoline_auto:
+ }
+
+ #undef pr_fmt
++#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
++
++static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
++
++/* The kernel command line selection */
++enum ssb_mitigation_cmd {
++ SPEC_STORE_BYPASS_CMD_NONE,
++ SPEC_STORE_BYPASS_CMD_AUTO,
++ SPEC_STORE_BYPASS_CMD_ON,
++};
++
++static const char *ssb_strings[] = {
++ [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
++ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled"
++};
++
++static const struct {
++ const char *option;
++ enum ssb_mitigation_cmd cmd;
++} ssb_mitigation_options[] = {
++ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
++ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
++ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
++};
++
++static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
++{
++ enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
++ char arg[20];
++ int ret, i;
++
++ if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
++ return SPEC_STORE_BYPASS_CMD_NONE;
++ } else {
++ ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
++ arg, sizeof(arg));
++ if (ret < 0)
++ return SPEC_STORE_BYPASS_CMD_AUTO;
++
++ for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
++ if (!match_option(arg, ret, ssb_mitigation_options[i].option))
++ continue;
++
++ cmd = ssb_mitigation_options[i].cmd;
++ break;
++ }
++
++ if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
++ pr_err("unknown option (%s). Switching to AUTO select\n", arg);
++ return SPEC_STORE_BYPASS_CMD_AUTO;
++ }
++ }
++
++ return cmd;
++}
++
++static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
++{
++ enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
++ enum ssb_mitigation_cmd cmd;
++
++ if (!boot_cpu_has(X86_FEATURE_RDS))
++ return mode;
++
++ cmd = ssb_parse_cmdline();
++ if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
++ (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
++ cmd == SPEC_STORE_BYPASS_CMD_AUTO))
++ return mode;
++
++ switch (cmd) {
++ case SPEC_STORE_BYPASS_CMD_AUTO:
++ case SPEC_STORE_BYPASS_CMD_ON:
++ mode = SPEC_STORE_BYPASS_DISABLE;
++ break;
++ case SPEC_STORE_BYPASS_CMD_NONE:
++ break;
++ }
++
++ if (mode != SPEC_STORE_BYPASS_NONE)
++ setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
++ return mode;
++}
++
++static void ssb_select_mitigation()
++{
++ ssb_mode = __ssb_select_mitigation();
++
++ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
++ pr_info("%s\n", ssb_strings[ssb_mode]);
++}
++
++#undef pr_fmt
+
+ #ifdef CONFIG_SYSFS
+
+@@ -382,6 +482,9 @@ ssize_t cpu_show_common(struct device *d
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+ spectre_v2_module_string());
+
++ case X86_BUG_SPEC_STORE_BYPASS:
++ return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
++
+ default:
+ break;
+ }
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:18 -0400
+Subject: x86/bugs: Read SPEC_CTRL MSR during boot and re-use reserved bits
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 1b86883ccb8d5d9506529d42dbe1a5257cb30b18 upstream
+
+The 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to all
+the other bits as reserved. The Intel SDM glossary defines reserved as
+implementation specific - aka unknown.
+
+As such at bootup this must be taken it into account and proper masking for
+the bits in use applied.
+
+A copy of this document is available at
+https://bugzilla.kernel.org/show_bug.cgi?id=199511
+
+[ tglx: Made x86_spec_ctrl_base __ro_after_init ]
+
+Suggested-by: Jon Masters <jcm@redhat.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 24 ++++++++++++++++++++----
+ arch/x86/kernel/cpu/bugs.c | 28 ++++++++++++++++++++++++++++
+ 2 files changed, 48 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -217,6 +217,17 @@ enum spectre_v2_mitigation {
+ SPECTRE_V2_IBRS,
+ };
+
++/*
++ * The Intel specification for the SPEC_CTRL MSR requires that we
++ * preserve any already set reserved bits at boot time (e.g. for
++ * future additions that this kernel is not currently aware of).
++ * We then set any additional mitigation bits that we want
++ * ourselves and always use this as the base for SPEC_CTRL.
++ * We also use this when handling guest entry/exit as below.
++ */
++extern void x86_spec_ctrl_set(u64);
++extern u64 x86_spec_ctrl_get_default(void);
++
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+@@ -254,8 +265,9 @@ void alternative_msr_write(unsigned int
+
+ static inline void indirect_branch_prediction_barrier(void)
+ {
+- alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
+- X86_FEATURE_USE_IBPB);
++ u64 val = PRED_CMD_IBPB;
++
++ alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
+ }
+
+ /*
+@@ -266,14 +278,18 @@ static inline void indirect_branch_predi
+ */
+ #define firmware_restrict_branch_speculation_start() \
+ do { \
++ u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS; \
++ \
+ preempt_disable(); \
+- alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
++ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
+ X86_FEATURE_USE_IBRS_FW); \
+ } while (0)
+
+ #define firmware_restrict_branch_speculation_end() \
+ do { \
+- alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
++ u64 val = x86_spec_ctrl_get_default(); \
++ \
++ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
+ X86_FEATURE_USE_IBRS_FW); \
+ preempt_enable(); \
+ } while (0)
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -27,6 +27,12 @@
+
+ static void __init spectre_v2_select_mitigation(void);
+
++/*
++ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
++ * writes to SPEC_CTRL contain whatever reserved bits have been set.
++ */
++static u64 __ro_after_init x86_spec_ctrl_base;
++
+ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+@@ -36,6 +42,13 @@ void __init check_bugs(void)
+ print_cpu_info(&boot_cpu_data);
+ }
+
++ /*
++ * Read the SPEC_CTRL MSR to account for reserved bits which may
++ * have unknown values.
++ */
++ if (boot_cpu_has(X86_FEATURE_IBRS))
++ rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++
+ /* Select the proper spectre mitigation before patching alternatives */
+ spectre_v2_select_mitigation();
+
+@@ -94,6 +107,21 @@ static const char *spectre_v2_strings[]
+
+ static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+
++void x86_spec_ctrl_set(u64 val)
++{
++ if (val & ~SPEC_CTRL_IBRS)
++ WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
++ else
++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
++}
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
++
++u64 x86_spec_ctrl_get_default(void)
++{
++ return x86_spec_ctrl_base;
++}
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 12 May 2018 20:53:14 +0200
+Subject: x86/bugs: Remove x86_spec_ctrl_set()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 4b59bdb569453a60b752b274ca61f009e37f4dae upstream
+
+x86_spec_ctrl_set() is only used in bugs.c and the extra mask checks there
+provide no real value as both call sites can just write x86_spec_ctrl_base
+to MSR_SPEC_CTRL. x86_spec_ctrl_base is valid and does not need any extra
+masking or checking.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 2 --
+ arch/x86/kernel/cpu/bugs.c | 13 ++-----------
+ 2 files changed, 2 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -217,8 +217,6 @@ enum spectre_v2_mitigation {
+ SPECTRE_V2_IBRS,
+ };
+
+-extern void x86_spec_ctrl_set(u64);
+-
+ /* The Speculative Store Bypass disable variants */
+ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_NONE,
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -132,15 +132,6 @@ static const char *spectre_v2_strings[]
+ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+ SPECTRE_V2_NONE;
+
+-void x86_spec_ctrl_set(u64 val)
+-{
+- if (val & x86_spec_ctrl_mask)
+- WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
+- else
+- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
+-}
+-EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
+-
+ void
+ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
+ {
+@@ -502,7 +493,7 @@ static enum ssb_mitigation __init __ssb_
+ case X86_VENDOR_INTEL:
+ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+ x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
+- x86_spec_ctrl_set(SPEC_CTRL_SSBD);
++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ break;
+ case X86_VENDOR_AMD:
+ x86_amd_ssb_disable();
+@@ -614,7 +605,7 @@ int arch_prctl_spec_ctrl_get(struct task
+ void x86_spec_ctrl_setup_ap(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+- x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+ x86_amd_ssb_disable();
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 9 May 2018 21:41:38 +0200
+Subject: x86/bugs: Rename _RDS to _SSBD
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 9f65fb29374ee37856dbad847b4e121aab72b510 upstream
+
+Intel collateral will reference the SSB mitigation bit in IA32_SPEC_CTL[2]
+as SSBD (Speculative Store Bypass Disable).
+
+Hence changing it.
+
+It is unclear yet what the MSR_IA32_ARCH_CAPABILITIES (0x10a) Bit(4) name
+is going to be. Following the rename it would be SSBD_NO but that rolls out
+to Speculative Store Bypass Disable No.
+
+Also fixed the missing space in X86_FEATURE_AMD_SSBD.
+
+[ tglx: Fixup x86_amd_rds_enable() and rds_tif_to_amd_ls_cfg() as well ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 4 ++--
+ arch/x86/include/asm/msr-index.h | 10 +++++-----
+ arch/x86/include/asm/spec-ctrl.h | 12 ++++++------
+ arch/x86/include/asm/thread_info.h | 6 +++---
+ arch/x86/kernel/cpu/amd.c | 14 +++++++-------
+ arch/x86/kernel/cpu/bugs.c | 36 ++++++++++++++++++------------------
+ arch/x86/kernel/cpu/common.c | 2 +-
+ arch/x86/kernel/cpu/intel.c | 2 +-
+ arch/x86/kernel/process.c | 8 ++++----
+ arch/x86/kvm/cpuid.c | 2 +-
+ arch/x86/kvm/cpuid.h | 2 +-
+ arch/x86/kvm/vmx.c | 2 +-
+ 12 files changed, 50 insertions(+), 50 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -205,7 +205,7 @@
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+-#define X86_FEATURE_AMD_RDS (7*32+24) /* "" AMD RDS implementation */
++#define X86_FEATURE_AMD_SSBD (7*32+24) /* "" AMD SSBD implementation */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+@@ -308,7 +308,7 @@
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+-#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */
++#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */
+
+ /*
+ * BUG word(s)
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -40,8 +40,8 @@
+ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+ #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+ #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+-#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
+-#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
++#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
++#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+
+ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+@@ -63,10 +63,10 @@
+ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+ #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+ #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
+-#define ARCH_CAP_RDS_NO (1 << 4) /*
++#define ARCH_CAP_SSBD_NO (1 << 4) /*
+ * Not susceptible to Speculative Store Bypass
+- * attack, so no Reduced Data Speculation control
+- * required.
++ * attack, so no Speculative Store Bypass
++ * control required.
+ */
+
+ #define MSR_IA32_BBL_CR_CTL 0x00000119
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -17,20 +17,20 @@ extern void x86_spec_ctrl_restore_host(u
+
+ /* AMD specific Speculative Store Bypass MSR data */
+ extern u64 x86_amd_ls_cfg_base;
+-extern u64 x86_amd_ls_cfg_rds_mask;
++extern u64 x86_amd_ls_cfg_ssbd_mask;
+
+ /* The Intel SPEC CTRL MSR base value cache */
+ extern u64 x86_spec_ctrl_base;
+
+-static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
++static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
+ {
+- BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
+- return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
++ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
++ return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+ }
+
+-static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
++static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
+ {
+- return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
++ return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
+ }
+
+ extern void speculative_store_bypass_update(void);
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -83,7 +83,7 @@ struct thread_info {
+ #define TIF_SIGPENDING 2 /* signal pending */
+ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
+-#define TIF_RDS 5 /* Reduced data speculation */
++#define TIF_SSBD 5 /* Reduced data speculation */
+ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
+@@ -107,7 +107,7 @@ struct thread_info {
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+-#define _TIF_RDS (1 << TIF_RDS)
++#define _TIF_SSBD (1 << TIF_SSBD)
+ #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
+@@ -141,7 +141,7 @@ struct thread_info {
+
+ /* flags to check in __switch_to() */
+ #define _TIF_WORK_CTXSW \
+- (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
++ (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
+
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -555,12 +555,12 @@ static void bsp_init_amd(struct cpuinfo_
+ }
+ /*
+ * Try to cache the base value so further operations can
+- * avoid RMW. If that faults, do not enable RDS.
++ * avoid RMW. If that faults, do not enable SSBD.
+ */
+ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+- setup_force_cpu_cap(X86_FEATURE_RDS);
+- setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
+- x86_amd_ls_cfg_rds_mask = 1ULL << bit;
++ setup_force_cpu_cap(X86_FEATURE_SSBD);
++ setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
++ x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
+ }
+ }
+ }
+@@ -849,9 +849,9 @@ static void init_amd(struct cpuinfo_x86
+ if (!cpu_has(c, X86_FEATURE_XENPV))
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+
+- if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
+- set_cpu_cap(c, X86_FEATURE_RDS);
+- set_cpu_cap(c, X86_FEATURE_AMD_RDS);
++ if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
++ set_cpu_cap(c, X86_FEATURE_SSBD);
++ set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
+ }
+ }
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -44,10 +44,10 @@ static u64 __ro_after_init x86_spec_ctrl
+
+ /*
+ * AMD specific MSR info for Speculative Store Bypass control.
+- * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
++ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
+ */
+ u64 __ro_after_init x86_amd_ls_cfg_base;
+-u64 __ro_after_init x86_amd_ls_cfg_rds_mask;
++u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
+
+ void __init check_bugs(void)
+ {
+@@ -145,7 +145,7 @@ u64 x86_spec_ctrl_get_default(void)
+ u64 msrval = x86_spec_ctrl_base;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+- msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++ msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+ return msrval;
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+@@ -158,7 +158,7 @@ void x86_spec_ctrl_set_guest(u64 guest_s
+ return;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+@@ -173,18 +173,18 @@ void x86_spec_ctrl_restore_host(u64 gues
+ return;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, host);
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+
+-static void x86_amd_rds_enable(void)
++static void x86_amd_ssb_disable(void)
+ {
+- u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
++ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
+
+- if (boot_cpu_has(X86_FEATURE_AMD_RDS))
++ if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
+@@ -472,7 +472,7 @@ static enum ssb_mitigation_cmd __init __
+ enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
+ enum ssb_mitigation_cmd cmd;
+
+- if (!boot_cpu_has(X86_FEATURE_RDS))
++ if (!boot_cpu_has(X86_FEATURE_SSBD))
+ return mode;
+
+ cmd = ssb_parse_cmdline();
+@@ -506,7 +506,7 @@ static enum ssb_mitigation_cmd __init __
+ /*
+ * We have three CPU feature flags that are in play here:
+ * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
+- * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
++ * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
+ * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
+ */
+ if (mode == SPEC_STORE_BYPASS_DISABLE) {
+@@ -517,12 +517,12 @@ static enum ssb_mitigation_cmd __init __
+ */
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+- x86_spec_ctrl_base |= SPEC_CTRL_RDS;
+- x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
+- x86_spec_ctrl_set(SPEC_CTRL_RDS);
++ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
++ x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
++ x86_spec_ctrl_set(SPEC_CTRL_SSBD);
+ break;
+ case X86_VENDOR_AMD:
+- x86_amd_rds_enable();
++ x86_amd_ssb_disable();
+ break;
+ }
+ }
+@@ -555,16 +555,16 @@ static int ssb_prctl_set(struct task_str
+ if (task_spec_ssb_force_disable(task))
+ return -EPERM;
+ task_clear_spec_ssb_disable(task);
+- update = test_and_clear_tsk_thread_flag(task, TIF_RDS);
++ update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ case PR_SPEC_DISABLE:
+ task_set_spec_ssb_disable(task);
+- update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
++ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ case PR_SPEC_FORCE_DISABLE:
+ task_set_spec_ssb_disable(task);
+ task_set_spec_ssb_force_disable(task);
+- update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
++ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ default:
+ return -ERANGE;
+@@ -634,7 +634,7 @@ void x86_spec_ctrl_setup_ap(void)
+ x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
+
+ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+- x86_amd_rds_enable();
++ x86_amd_ssb_disable();
+ }
+
+ #ifdef CONFIG_SYSFS
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -911,7 +911,7 @@ static void __init cpu_set_bug_bits(stru
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+- !(ia32_cap & ARCH_CAP_RDS_NO))
++ !(ia32_cap & ARCH_CAP_SSBD_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+ if (x86_match_cpu(cpu_no_speculation))
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -154,7 +154,7 @@ static void early_init_intel(struct cpui
+ setup_clear_cpu_cap(X86_FEATURE_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+- setup_clear_cpu_cap(X86_FEATURE_RDS);
++ setup_clear_cpu_cap(X86_FEATURE_SSBD);
+ }
+
+ /*
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -207,11 +207,11 @@ static __always_inline void __speculativ
+ {
+ u64 msr;
+
+- if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
+- msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
++ if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
++ msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
+ } else {
+- msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
++ msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+ }
+ }
+@@ -250,7 +250,7 @@ void __switch_to_xtra(struct task_struct
+ if ((tifp ^ tifn) & _TIF_NOTSC)
+ cr4_toggle_bits(X86_CR4_TSD);
+
+- if ((tifp ^ tifn) & _TIF_RDS)
++ if ((tifp ^ tifn) & _TIF_SSBD)
+ __speculative_store_bypass_update(tifn);
+ }
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -382,7 +382,7 @@ static inline int __do_cpuid_ent(struct
+
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+- F(SPEC_CTRL) | F(RDS) | F(ARCH_CAPABILITIES);
++ F(SPEC_CTRL) | F(SSBD) | F(ARCH_CAPABILITIES);
+
+ /* all calls to cpuid_count() should be made on the same cpu */
+ get_cpu();
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -179,7 +179,7 @@ static inline bool guest_cpuid_has_spec_
+ if (best && (best->ebx & bit(X86_FEATURE_IBRS)))
+ return true;
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+- return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_RDS)));
++ return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SSBD)));
+ }
+
+ static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3141,7 +3141,7 @@ static int vmx_set_msr(struct kvm_vcpu *
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_RDS))
++ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
+ return 1;
+
+ vmx->spec_ctrl = data;
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 16 May 2018 23:18:09 -0400
+Subject: x86/bugs: Rename SSBD_NO to SSB_NO
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 240da953fcc6a9008c92fae5b1f727ee5ed167ab upstream
+
+The "336996 Speculative Execution Side Channel Mitigations" from
+May defines this as SSB_NO, hence lets sync-up.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/msr-index.h | 2 +-
+ arch/x86/kernel/cpu/common.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -63,7 +63,7 @@
+ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+ #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+ #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
+-#define ARCH_CAP_SSBD_NO (1 << 4) /*
++#define ARCH_CAP_SSB_NO (1 << 4) /*
+ * Not susceptible to Speculative Store Bypass
+ * attack, so no Speculative Store Bypass
+ * control required.
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -926,7 +926,7 @@ static void __init cpu_set_bug_bits(stru
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+- !(ia32_cap & ARCH_CAP_SSBD_NO))
++ !(ia32_cap & ARCH_CAP_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+ if (x86_match_cpu(cpu_no_speculation))
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 12 May 2018 20:10:00 +0200
+Subject: x86/bugs: Rework spec_ctrl base and mask logic
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit be6fcb5478e95bb1c91f489121238deb3abca46a upstream
+
+x86_spec_ctrL_mask is intended to mask out bits from a MSR_SPEC_CTRL value
+which are not to be modified. However the implementation is not really used
+and the bitmask was inverted to make a check easier, which was removed in
+"x86/bugs: Remove x86_spec_ctrl_set()"
+
+Aside of that it is missing the STIBP bit if it is supported by the
+platform, so if the mask would be used in x86_virt_spec_ctrl() then it
+would prevent a guest from setting STIBP.
+
+Add the STIBP bit if supported and use the mask in x86_virt_spec_ctrl() to
+sanitize the value which is supplied by the guest.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 26 +++++++++++++++++++-------
+ 1 file changed, 19 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+ * The vendor and possibly platform specific bits which can be modified in
+ * x86_spec_ctrl_base.
+ */
+-static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
++static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
+
+ /*
+ * AMD specific MSR info for Speculative Store Bypass control.
+@@ -67,6 +67,10 @@ void __init check_bugs(void)
+ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
++ /* Allow STIBP in MSR_SPEC_CTRL if supported */
++ if (boot_cpu_has(X86_FEATURE_STIBP))
++ x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
++
+ /* Select the proper spectre mitigation before patching alternatives */
+ spectre_v2_select_mitigation();
+
+@@ -135,18 +139,26 @@ static enum spectre_v2_mitigation spectr
+ void
+ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
+ {
++ u64 msrval, guestval, hostval = x86_spec_ctrl_base;
+ struct thread_info *ti = current_thread_info();
+- u64 msr, host = x86_spec_ctrl_base;
+
+ /* Is MSR_SPEC_CTRL implemented ? */
+ if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
++ /*
++ * Restrict guest_spec_ctrl to supported values. Clear the
++ * modifiable bits in the host base value and or the
++ * modifiable bits from the guest value.
++ */
++ guestval = hostval & ~x86_spec_ctrl_mask;
++ guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
++
+ /* SSBD controlled in MSR_SPEC_CTRL */
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+- host |= ssbd_tif_to_spec_ctrl(ti->flags);
++ hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
+
+- if (host != guest_spec_ctrl) {
+- msr = setguest ? guest_spec_ctrl : host;
+- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
++ if (hostval != guestval) {
++ msrval = setguest ? guestval : hostval;
++ wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
+ }
+ }
+ }
+@@ -492,7 +504,7 @@ static enum ssb_mitigation __init __ssb_
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+- x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
++ x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ break;
+ case X86_VENDOR_AMD:
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Borislav Petkov <bp@suse.de>
+Date: Sat, 12 May 2018 00:14:51 +0200
+Subject: x86/bugs: Unify x86_spec_ctrl_{set_guest,restore_host}
+
+From: Borislav Petkov <bp@suse.de>
+
+commit cc69b34989210f067b2c51d5539b5f96ebcc3a01 upstream
+
+Function bodies are very similar and are going to grow more almost
+identical code. Add a bool arg to determine whether SPEC_CTRL is being set
+for the guest or restored to the host.
+
+No functional changes.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/spec-ctrl.h | 33 ++++++++++++++++++---
+ arch/x86/kernel/cpu/bugs.c | 60 +++++++++------------------------------
+ 2 files changed, 44 insertions(+), 49 deletions(-)
+
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -13,10 +13,35 @@
+ * Takes the guest view of SPEC_CTRL MSR as a parameter and also
+ * the guest's version of VIRT_SPEC_CTRL, if emulated.
+ */
+-extern void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl,
+- u64 guest_virt_spec_ctrl);
+-extern void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl,
+- u64 guest_virt_spec_ctrl);
++extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
++
++/**
++ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
++ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
++ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
++ * (may get translated to MSR_AMD64_LS_CFG bits)
++ *
++ * Avoids writing to the MSR if the content/bits are the same
++ */
++static inline
++void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
++{
++ x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
++}
++
++/**
++ * x86_spec_ctrl_restore_host - Restore host speculation control registers
++ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
++ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
++ * (may get translated to MSR_AMD64_LS_CFG bits)
++ *
++ * Avoids writing to the MSR if the content/bits are the same
++ */
++static inline
++void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
++{
++ x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
++}
+
+ /* AMD specific Speculative Store Bypass MSR data */
+ extern u64 x86_amd_ls_cfg_base;
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -150,55 +150,25 @@ u64 x86_spec_ctrl_get_default(void)
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+
+-/**
+- * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
+- * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
+- * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
+- * (may get translated to MSR_AMD64_LS_CFG bits)
+- *
+- * Avoids writing to the MSR if the content/bits are the same
+- */
+-void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
++void
++x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
+ {
+- u64 host = x86_spec_ctrl_base;
++ struct thread_info *ti = current_thread_info();
++ u64 msr, host = x86_spec_ctrl_base;
+
+ /* Is MSR_SPEC_CTRL implemented ? */
+- if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+- return;
+-
+- /* SSBD controlled in MSR_SPEC_CTRL */
+- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+- host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+-
+- if (host != guest_spec_ctrl)
+- wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+-}
+-EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
+-
+-/**
+- * x86_spec_ctrl_restore_host - Restore host speculation control registers
+- * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
+- * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
+- * (may get translated to MSR_AMD64_LS_CFG bits)
+- *
+- * Avoids writing to the MSR if the content/bits are the same
+- */
+-void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+-{
+- u64 host = x86_spec_ctrl_base;
+-
+- /* Is MSR_SPEC_CTRL implemented ? */
+- if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+- return;
+-
+- /* SSBD controlled in MSR_SPEC_CTRL */
+- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+- host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+-
+- if (host != guest_spec_ctrl)
+- wrmsrl(MSR_IA32_SPEC_CTRL, host);
++ if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
++ /* SSBD controlled in MSR_SPEC_CTRL */
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
++ host |= ssbd_tif_to_spec_ctrl(ti->flags);
++
++ if (host != guest_spec_ctrl) {
++ msr = setguest ? guest_spec_ctrl : host;
++ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
++ }
++ }
+ }
+-EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
++EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
+
+ static void x86_amd_ssb_disable(void)
+ {
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:23 -0400
+Subject: x86/bugs: Whitelist allowed SPEC_CTRL MSR values
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 1115a859f33276fe8afb31c60cf9d8e657872558 upstream
+
+Intel and AMD SPEC_CTRL (0x48) MSR semantics may differ in the
+future (or in fact use different MSRs for the same functionality).
+
+As such a run-time mechanism is required to whitelist the appropriate MSR
+values.
+
+[ tglx: Made the variable __ro_after_init ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -34,6 +34,12 @@ static void __init ssb_select_mitigation
+ */
+ static u64 __ro_after_init x86_spec_ctrl_base;
+
++/*
++ * The vendor and possibly platform specific bits which can be modified in
++ * x86_spec_ctrl_base.
++ */
++static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
++
+ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+@@ -116,7 +122,7 @@ static enum spectre_v2_mitigation spectr
+
+ void x86_spec_ctrl_set(u64 val)
+ {
+- if (val & ~(SPEC_CTRL_IBRS | SPEC_CTRL_RDS))
++ if (val & x86_spec_ctrl_mask)
+ WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
+ else
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
+@@ -458,6 +464,7 @@ static enum ssb_mitigation_cmd __init __
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ x86_spec_ctrl_base |= SPEC_CTRL_RDS;
++ x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
+ x86_spec_ctrl_set(SPEC_CTRL_RDS);
+ break;
+ case X86_VENDOR_AMD:
+@@ -481,7 +488,7 @@ static void ssb_select_mitigation()
+ void x86_spec_ctrl_setup_ap(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_IBRS))
+- x86_spec_ctrl_set(x86_spec_ctrl_base & (SPEC_CTRL_IBRS | SPEC_CTRL_RDS));
++ x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
+ }
+
+ #ifdef CONFIG_SYSFS
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Borislav Petkov <bp@suse.de>
+Date: Thu, 7 Sep 2017 19:08:21 +0200
+Subject: x86/cpu/AMD: Fix erratum 1076 (CPB bit)
+
+From: Borislav Petkov <bp@suse.de>
+
+commit f7f3dc00f61261cdc9ccd8b886f21bc4dffd6fd9 upstream
+
+CPUID Fn8000_0007_EDX[CPB] is wrongly 0 on models up to B1. But they do
+support CPB (AMD's Core Performance Boosting cpufreq CPU feature), so fix that.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sherry Hurwitz <sherry.hurwitz@amd.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20170907170821.16021-1-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -749,6 +749,16 @@ static void init_amd_bd(struct cpuinfo_x
+ }
+ }
+
++static void init_amd_zn(struct cpuinfo_x86 *c)
++{
++ /*
++ * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
++ * all up to and including B1.
++ */
++ if (c->x86_model <= 1 && c->x86_stepping <= 1)
++ set_cpu_cap(c, X86_FEATURE_CPB);
++}
++
+ static void init_amd(struct cpuinfo_x86 *c)
+ {
+ u32 dummy;
+@@ -779,6 +789,7 @@ static void init_amd(struct cpuinfo_x86
+ case 0x10: init_amd_gh(c); break;
+ case 0x12: init_amd_ln(c); break;
+ case 0x15: init_amd_bd(c); break;
++ case 0x17: init_amd_zn(c); break;
+ }
+
+ /* Enable workaround for FXSAVE leak */
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Jim Mattson <jmattson@google.com>
+Date: Sun, 13 May 2018 17:33:57 -0400
+Subject: x86/cpu: Make alternative_msr_write work for 32-bit code
+
+From: Jim Mattson <jmattson@google.com>
+
+commit 5f2b745f5e1304f438f9b2cd03ebc8120b6e0d3b upstream
+
+Cast val and (val >> 32) to (u32), so that they fit in a
+general-purpose register in both 32-bit and 64-bit code.
+
+[ tglx: Made it u32 instead of uintptr_t ]
+
+Fixes: c65732e4f721 ("x86/cpu: Restore CPUID_8000_0008_EBX reload")
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -265,8 +265,8 @@ void alternative_msr_write(unsigned int
+ {
+ asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
+ : : "c" (msr),
+- "a" (val),
+- "d" (val >> 32),
++ "a" ((u32)val),
++ "d" ((u32)(val >> 32)),
+ [feature] "i" (feature)
+ : "memory");
+ }
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 10 May 2018 16:26:00 +0200
+Subject: x86/cpufeatures: Add FEATURE_ZEN
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit d1035d971829dcf80e8686ccde26f94b0a069472 upstream
+
+Add a ZEN feature bit so family-dependent static_cpu_has() optimizations
+can be built for ZEN.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 2 ++
+ arch/x86/kernel/cpu/amd.c | 1 +
+ 2 files changed, 3 insertions(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -212,6 +212,8 @@
+ #define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
+ #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
+ #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
++
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -751,6 +751,7 @@ static void init_amd_bd(struct cpuinfo_x
+
+ static void init_amd_zn(struct cpuinfo_x86 *c)
+ {
++ set_cpu_cap(c, X86_FEATURE_ZEN);
+ /*
+ * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
+ * all up to and including B1.
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Sat, 28 Apr 2018 22:34:17 +0200
+Subject: x86/cpufeatures: Add X86_FEATURE_RDS
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 0cc5fa00b0a88dad140b4e5c2cead9951ad36822 upstream
+
+Add the CPU feature bit CPUID.7.0.EDX[31] which indicates whether the CPU
+supports Reduced Data Speculation.
+
+[ tglx: Split it out from a later patch ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -306,6 +306,7 @@
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
++#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */
+
+ /*
+ * BUG word(s)
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 10 May 2018 19:13:18 +0200
+Subject: x86/cpufeatures: Disentangle MSR_SPEC_CTRL enumeration from IBRS
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 7eb8956a7fec3c1f0abc2a5517dada99ccc8a961 upstream
+
+The availability of the SPEC_CTRL MSR is enumerated by a CPUID bit on
+Intel and implied by IBRS or STIBP support on AMD. That's just confusing
+and in case an AMD CPU has IBRS not supported because the underlying
+problem has been fixed but has another bit valid in the SPEC_CTRL MSR,
+the thing falls apart.
+
+Add a synthetic feature bit X86_FEATURE_MSR_SPEC_CTRL to denote the
+availability on both Intel and AMD.
+
+While at it replace the boot_cpu_has() checks with static_cpu_has() where
+possible. This prevents late microcode loading from exposing SPEC_CTRL, but
+late loading is already very limited as it does not reevaluate the
+mitigation options and other bits and pieces. Having static_cpu_has() is
+the simplest and least fragile solution.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 2 ++
+ arch/x86/kernel/cpu/bugs.c | 18 +++++++++++-------
+ arch/x86/kernel/cpu/common.c | 9 +++++++--
+ arch/x86/kernel/cpu/intel.c | 1 +
+ 4 files changed, 21 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -197,6 +197,8 @@
+ #define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+
++#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
++
+ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+
+ /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -63,7 +63,7 @@ void __init check_bugs(void)
+ * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
+ * init code as it is not enumerated and depends on the family.
+ */
+- if (boot_cpu_has(X86_FEATURE_IBRS))
++ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+ /* Select the proper spectre mitigation before patching alternatives */
+@@ -144,7 +144,7 @@ u64 x86_spec_ctrl_get_default(void)
+ {
+ u64 msrval = x86_spec_ctrl_base;
+
+- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+ msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+ return msrval;
+ }
+@@ -154,10 +154,12 @@ void x86_spec_ctrl_set_guest(u64 guest_s
+ {
+ u64 host = x86_spec_ctrl_base;
+
+- if (!boot_cpu_has(X86_FEATURE_IBRS))
++ /* Is MSR_SPEC_CTRL implemented ? */
++ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ return;
+
+- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ /* Intel controls SSB in MSR_SPEC_CTRL */
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+@@ -169,10 +171,12 @@ void x86_spec_ctrl_restore_host(u64 gues
+ {
+ u64 host = x86_spec_ctrl_base;
+
+- if (!boot_cpu_has(X86_FEATURE_IBRS))
++ /* Is MSR_SPEC_CTRL implemented ? */
++ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ return;
+
+- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ /* Intel controls SSB in MSR_SPEC_CTRL */
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+@@ -630,7 +634,7 @@ int arch_prctl_spec_ctrl_get(struct task
+
+ void x86_spec_ctrl_setup_ap(void)
+ {
+- if (boot_cpu_has(X86_FEATURE_IBRS))
++ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
+
+ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -729,19 +729,24 @@ static void init_speculation_control(str
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_IBPB);
++ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ }
+
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+
+- if (cpu_has(c, X86_FEATURE_AMD_IBRS))
++ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
++ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
++ }
+
+ if (cpu_has(c, X86_FEATURE_AMD_IBPB))
+ set_cpu_cap(c, X86_FEATURE_IBPB);
+
+- if (cpu_has(c, X86_FEATURE_AMD_STIBP))
++ if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
+ set_cpu_cap(c, X86_FEATURE_STIBP);
++ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
++ }
+ }
+
+ void get_cpu_cap(struct cpuinfo_x86 *c)
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -153,6 +153,7 @@ static void early_init_intel(struct cpui
+ setup_clear_cpu_cap(X86_FEATURE_IBPB);
+ setup_clear_cpu_cap(X86_FEATURE_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
++ setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SSBD);
+ }
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 10 May 2018 20:21:36 +0200
+Subject: x86/cpufeatures: Disentangle SSBD enumeration
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 52817587e706686fcdb27f14c1b000c92f266c96 upstream
+
+The SSBD enumeration is similarly to the other bits magically shared
+between Intel and AMD though the mechanisms are different.
+
+Make X86_FEATURE_SSBD synthetic and set it depending on the vendor specific
+features or family dependent setup.
+
+Change the Intel bit to X86_FEATURE_SPEC_CTRL_SSBD to denote that SSBD is
+controlled via MSR_SPEC_CTRL and fix up the usage sites.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 5 +++--
+ arch/x86/kernel/cpu/amd.c | 7 +------
+ arch/x86/kernel/cpu/bugs.c | 10 +++++-----
+ arch/x86/kernel/cpu/common.c | 3 +++
+ arch/x86/kernel/cpu/intel.c | 1 +
+ arch/x86/kernel/process.c | 2 +-
+ 6 files changed, 14 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -198,6 +198,7 @@
+ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+
+ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
++#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
+
+ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+
+@@ -207,7 +208,7 @@
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+-#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
++#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
+ #define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
+ #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
+ #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
+@@ -314,7 +315,7 @@
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+-#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */
++#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+
+ /*
+ * BUG word(s)
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -558,8 +558,8 @@ static void bsp_init_amd(struct cpuinfo_
+ * avoid RMW. If that faults, do not enable SSBD.
+ */
+ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
++ setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
+ setup_force_cpu_cap(X86_FEATURE_SSBD);
+- setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
+ x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
+ }
+ }
+@@ -848,11 +848,6 @@ static void init_amd(struct cpuinfo_x86
+ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+ if (!cpu_has(c, X86_FEATURE_XENPV))
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+-
+- if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
+- set_cpu_cap(c, X86_FEATURE_SSBD);
+- set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
+- }
+ }
+
+ #ifdef CONFIG_X86_32
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -158,8 +158,8 @@ void x86_spec_ctrl_set_guest(u64 guest_s
+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ return;
+
+- /* Intel controls SSB in MSR_SPEC_CTRL */
+- if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
++ /* SSBD controlled in MSR_SPEC_CTRL */
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+@@ -175,8 +175,8 @@ void x86_spec_ctrl_restore_host(u64 gues
+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ return;
+
+- /* Intel controls SSB in MSR_SPEC_CTRL */
+- if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
++ /* SSBD controlled in MSR_SPEC_CTRL */
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+@@ -188,7 +188,7 @@ static void x86_amd_ssb_disable(void)
+ {
+ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
+
+- if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
++ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -735,6 +735,9 @@ static void init_speculation_control(str
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+
++ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
++ set_cpu_cap(c, X86_FEATURE_SSBD);
++
+ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -156,6 +156,7 @@ static void early_init_intel(struct cpui
+ setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SSBD);
++ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
+ }
+
+ /*
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -207,7 +207,7 @@ static __always_inline void __speculativ
+ {
+ u64 msr;
+
+- if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
++ if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+ msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
+ } else {
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:25 -0400
+Subject: x86/KVM/VMX: Expose SPEC_CTRL Bit(2) to the guest
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit da39556f66f5cfe8f9c989206974f1cb16ca5d7c upstream
+
+Expose the CPUID.7.EDX[31] bit to the guest, and also guard against various
+combinations of SPEC_CTRL MSR values.
+
+The handling of the MSR (to take into account the host value of SPEC_CTRL
+Bit(2)) is taken care of in patch:
+
+ KVM/SVM/VMX/x86/spectre_v2: Support the combination of guest and host IBRS
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+
+[dwmw2: Handle 4.9 guest CPUID differences, rename
+ guest_cpu_has_ibrs() → guest_cpu_has_spec_ctrl()]
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/cpuid.c | 2 +-
+ arch/x86/kvm/cpuid.h | 4 ++--
+ arch/x86/kvm/svm.c | 4 ++--
+ arch/x86/kvm/vmx.c | 6 +++---
+ 4 files changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -382,7 +382,7 @@ static inline int __do_cpuid_ent(struct
+
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+- F(SPEC_CTRL) | F(ARCH_CAPABILITIES);
++ F(SPEC_CTRL) | F(RDS) | F(ARCH_CAPABILITIES);
+
+ /* all calls to cpuid_count() should be made on the same cpu */
+ get_cpu();
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -171,7 +171,7 @@ static inline bool guest_cpuid_has_ibpb(
+ return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
+ }
+
+-static inline bool guest_cpuid_has_ibrs(struct kvm_vcpu *vcpu)
++static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu)
+ {
+ struct kvm_cpuid_entry2 *best;
+
+@@ -179,7 +179,7 @@ static inline bool guest_cpuid_has_ibrs(
+ if (best && (best->ebx & bit(X86_FEATURE_IBRS)))
+ return true;
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+- return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
++ return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_RDS)));
+ }
+
+ static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3545,7 +3545,7 @@ static int svm_get_msr(struct kvm_vcpu *
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+- !guest_cpuid_has_ibrs(vcpu))
++ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ msr_info->data = svm->spec_ctrl;
+@@ -3643,7 +3643,7 @@ static int svm_set_msr(struct kvm_vcpu *
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr->host_initiated &&
+- !guest_cpuid_has_ibrs(vcpu))
++ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3020,7 +3020,7 @@ static int vmx_get_msr(struct kvm_vcpu *
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+- !guest_cpuid_has_ibrs(vcpu))
++ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ msr_info->data = to_vmx(vcpu)->spec_ctrl;
+@@ -3137,11 +3137,11 @@ static int vmx_set_msr(struct kvm_vcpu *
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+- !guest_cpuid_has_ibrs(vcpu))
++ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
++ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_RDS))
+ return 1;
+
+ vmx->spec_ctrl = data;
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Tue, 1 May 2018 15:55:51 +0200
+Subject: x86/nospec: Simplify alternative_msr_write()
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 1aa7a5735a41418d8e01fa7c9565eb2657e2ea3f upstream
+
+The macro is not type safe and I did look for why that "g" constraint for
+the asm doesn't work: it's because the asm is more fundamentally wrong.
+
+It does
+
+ movl %[val], %%eax
+
+but "val" isn't a 32-bit value, so then gcc will pass it in a register,
+and generate code like
+
+ movl %rsi, %eax
+
+and gas will complain about a nonsensical 'mov' instruction (it's moving a
+64-bit register to a 32-bit one).
+
+Passing it through memory will just hide the real bug - gcc still thinks
+the memory location is 64-bit, but the "movl" will only load the first 32
+bits and it all happens to work because x86 is little-endian.
+
+Convert it to a type safe inline function with a little trick which hands
+the feature into the ALTERNATIVE macro.
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -241,15 +241,16 @@ static inline void vmexit_fill_RSB(void)
+ #endif
+ }
+
+-#define alternative_msr_write(_msr, _val, _feature) \
+- asm volatile(ALTERNATIVE("", \
+- "movl %[msr], %%ecx\n\t" \
+- "movl %[val], %%eax\n\t" \
+- "movl $0, %%edx\n\t" \
+- "wrmsr", \
+- _feature) \
+- : : [msr] "i" (_msr), [val] "i" (_val) \
+- : "eax", "ecx", "edx", "memory")
++static __always_inline
++void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
++{
++ asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
++ : : "c" (msr),
++ "a" (val),
++ "d" (val >> 32),
++ [feature] "i" (feature)
++ : "memory");
++}
+
+ static inline void indirect_branch_prediction_barrier(void)
+ {
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 29 Apr 2018 15:21:42 +0200
+Subject: x86/process: Allow runtime control of Speculative Store Bypass
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 885f82bfbc6fefb6664ea27965c3ab9ac4194b8c upstream
+
+The Speculative Store Bypass vulnerability can be mitigated with the
+Reduced Data Speculation (RDS) feature. To allow finer grained control of
+this eventually expensive mitigation a per task mitigation control is
+required.
+
+Add a new TIF_RDS flag and put it into the group of TIF flags which are
+evaluated for mismatch in switch_to(). If these bits differ in the previous
+and the next task, then the slow path function __switch_to_xtra() is
+invoked. Implement the TIF_RDS dependent mitigation control in the slow
+path.
+
+If the prctl for controlling Speculative Store Bypass is disabled or no
+task uses the prctl then there is no overhead in the switch_to() fast
+path.
+
+Update the KVM related speculation control functions to take TID_RDS into
+account as well.
+
+Based on a patch from Tim Chen. Completely rewritten.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/msr-index.h | 3 ++-
+ arch/x86/include/asm/spec-ctrl.h | 17 +++++++++++++++++
+ arch/x86/include/asm/thread_info.h | 6 ++++--
+ arch/x86/kernel/cpu/bugs.c | 26 +++++++++++++++++++++-----
+ arch/x86/kernel/process.c | 22 ++++++++++++++++++++++
+ 5 files changed, 66 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -40,7 +40,8 @@
+ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+ #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+ #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+-#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */
++#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
++#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
+
+ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -2,6 +2,7 @@
+ #ifndef _ASM_X86_SPECCTRL_H_
+ #define _ASM_X86_SPECCTRL_H_
+
++#include <linux/thread_info.h>
+ #include <asm/nospec-branch.h>
+
+ /*
+@@ -18,4 +19,20 @@ extern void x86_spec_ctrl_restore_host(u
+ extern u64 x86_amd_ls_cfg_base;
+ extern u64 x86_amd_ls_cfg_rds_mask;
+
++/* The Intel SPEC CTRL MSR base value cache */
++extern u64 x86_spec_ctrl_base;
++
++static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
++{
++ BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
++ return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
++}
++
++static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
++{
++ return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
++}
++
++extern void speculative_store_bypass_update(void);
++
+ #endif
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -83,6 +83,7 @@ struct thread_info {
+ #define TIF_SIGPENDING 2 /* signal pending */
+ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
++#define TIF_RDS 5 /* Reduced data speculation */
+ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
+@@ -104,8 +105,9 @@ struct thread_info {
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+-#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
++#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
++#define _TIF_RDS (1 << TIF_RDS)
+ #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
+@@ -139,7 +141,7 @@ struct thread_info {
+
+ /* flags to check in __switch_to() */
+ #define _TIF_WORK_CTXSW \
+- (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
++ (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
+
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -32,7 +32,7 @@ static void __init ssb_select_mitigation
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+ * writes to SPEC_CTRL contain whatever reserved bits have been set.
+ */
+-static u64 __ro_after_init x86_spec_ctrl_base;
++u64 __ro_after_init x86_spec_ctrl_base;
+
+ /*
+ * The vendor and possibly platform specific bits which can be modified in
+@@ -139,25 +139,41 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
+
+ u64 x86_spec_ctrl_get_default(void)
+ {
+- return x86_spec_ctrl_base;
++ u64 msrval = x86_spec_ctrl_base;
++
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++ return msrval;
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+
+ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
+ {
++ u64 host = x86_spec_ctrl_base;
++
+ if (!boot_cpu_has(X86_FEATURE_IBRS))
+ return;
+- if (x86_spec_ctrl_base != guest_spec_ctrl)
++
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++
++ if (host != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
+
+ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
+ {
++ u64 host = x86_spec_ctrl_base;
++
+ if (!boot_cpu_has(X86_FEATURE_IBRS))
+ return;
+- if (x86_spec_ctrl_base != guest_spec_ctrl)
+- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++
++ if (host != guest_spec_ctrl)
++ wrmsrl(MSR_IA32_SPEC_CTRL, host);
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -33,6 +33,7 @@
+ #include <asm/mce.h>
+ #include <asm/vm86.h>
+ #include <asm/switch_to.h>
++#include <asm/spec-ctrl.h>
+
+ /*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+@@ -202,6 +203,24 @@ static inline void switch_to_bitmap(stru
+ }
+ }
+
++static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
++{
++ u64 msr;
++
++ if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
++ msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
++ wrmsrl(MSR_AMD64_LS_CFG, msr);
++ } else {
++ msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
++ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
++ }
++}
++
++void speculative_store_bypass_update(void)
++{
++ __speculative_store_bypass_update(current_thread_info()->flags);
++}
++
+ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ struct tss_struct *tss)
+ {
+@@ -230,6 +249,9 @@ void __switch_to_xtra(struct task_struct
+
+ if ((tifp ^ tifn) & _TIF_NOTSC)
+ cr4_toggle_bits(X86_CR4_TSD);
++
++ if ((tifp ^ tifn) & _TIF_RDS)
++ __speculative_store_bypass_update(tifn);
+ }
+
+ /*
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Kyle Huey <me@kylehuey.com>
+Date: Tue, 14 Feb 2017 00:11:03 -0800
+Subject: x86/process: Correct and optimize TIF_BLOCKSTEP switch
+
+From: Kyle Huey <me@kylehuey.com>
+
+commit b9894a2f5bd18b1691cb6872c9afe32b148d0132 upstream
+
+The debug control MSR is "highly magical" as the blockstep bit can be
+cleared by hardware under not well documented circumstances.
+
+So a task switch relying on the bit set by the previous task (according to
+the previous tasks thread flags) can trip over this and not update the flag
+for the next task.
+
+To fix this its required to handle DEBUGCTLMSR_BTF when either the previous
+or the next or both tasks have the TIF_BLOCKSTEP flag set.
+
+While at it avoid branching within the TIF_BLOCKSTEP case and evaluating
+boot_cpu_data twice in kernels without CONFIG_X86_DEBUGCTLMSR.
+
+x86_64: arch/x86/kernel/process.o
+text data bss dec hex
+3024 8577 16 11617 2d61 Before
+3008 8577 16 11601 2d51 After
+
+i386: No change
+
+[ tglx: Made the shift value explicit, use a local variable to make the
+code readable and massaged changelog]
+
+Originally-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Kyle Huey <khuey@kylehuey.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Link: http://lkml.kernel.org/r/20170214081104.9244-3-khuey@kylehuey.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/msr-index.h | 1 +
+ arch/x86/kernel/process.c | 12 +++++++-----
+ 2 files changed, 8 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -141,6 +141,7 @@
+
+ /* DEBUGCTLMSR bits (others vary by model): */
+ #define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
++#define DEBUGCTLMSR_BTF_SHIFT 1
+ #define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
+ #define DEBUGCTLMSR_TR (1UL << 6)
+ #define DEBUGCTLMSR_BTS (1UL << 7)
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -227,13 +227,15 @@ void __switch_to_xtra(struct task_struct
+
+ propagate_user_return_notify(prev_p, next_p);
+
+- if ((tifp ^ tifn) & _TIF_BLOCKSTEP) {
+- unsigned long debugctl = get_debugctlmsr();
++ if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
++ arch_has_block_step()) {
++ unsigned long debugctl, msk;
+
++ rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ debugctl &= ~DEBUGCTLMSR_BTF;
+- if (tifn & _TIF_BLOCKSTEP)
+- debugctl |= DEBUGCTLMSR_BTF;
+- update_debugctlmsr(debugctl);
++ msk = tifn & _TIF_BLOCKSTEP;
++ debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
++ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ }
+
+ if ((tifp ^ tifn) & _TIF_NOTSC) {
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Kyle Huey <me@kylehuey.com>
+Date: Tue, 14 Feb 2017 00:11:02 -0800
+Subject: x86/process: Optimize TIF checks in __switch_to_xtra()
+
+From: Kyle Huey <me@kylehuey.com>
+
+commit af8b3cd3934ec60f4c2a420d19a9d416554f140b upstream
+
+Help the compiler to avoid reevaluating the thread flags for each checked
+bit by reordering the bit checks and providing an explicit xor for
+evaluation.
+
+With default defconfigs for each arch,
+
+x86_64: arch/x86/kernel/process.o
+text data bss dec hex
+3056 8577 16 11649 2d81 Before
+3024 8577 16 11617 2d61 After
+
+i386: arch/x86/kernel/process.o
+text data bss dec hex
+2957 8673 8 11638 2d76 Before
+2925 8673 8 11606 2d56 After
+
+Originally-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Kyle Huey <khuey@kylehuey.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Link: http://lkml.kernel.org/r/20170214081104.9244-2-khuey@kylehuey.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+[dwmw2: backported to make TIF_RDS handling simpler.
+ No deferred TR reload.]
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/process.c | 54 ++++++++++++++++++++++++++--------------------
+ 1 file changed, 31 insertions(+), 23 deletions(-)
+
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -192,48 +192,56 @@ int set_tsc_mode(unsigned int val)
+ return 0;
+ }
+
++static inline void switch_to_bitmap(struct tss_struct *tss,
++ struct thread_struct *prev,
++ struct thread_struct *next,
++ unsigned long tifp, unsigned long tifn)
++{
++ if (tifn & _TIF_IO_BITMAP) {
++ /*
++ * Copy the relevant range of the IO bitmap.
++ * Normally this is 128 bytes or less:
++ */
++ memcpy(tss->io_bitmap, next->io_bitmap_ptr,
++ max(prev->io_bitmap_max, next->io_bitmap_max));
++ } else if (tifp & _TIF_IO_BITMAP) {
++ /*
++ * Clear any possible leftover bits:
++ */
++ memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
++ }
++}
++
+ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ struct tss_struct *tss)
+ {
+ struct thread_struct *prev, *next;
++ unsigned long tifp, tifn;
+
+ prev = &prev_p->thread;
+ next = &next_p->thread;
+
+- if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
+- test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
++ tifn = READ_ONCE(task_thread_info(next_p)->flags);
++ tifp = READ_ONCE(task_thread_info(prev_p)->flags);
++ switch_to_bitmap(tss, prev, next, tifp, tifn);
++
++ propagate_user_return_notify(prev_p, next_p);
++
++ if ((tifp ^ tifn) & _TIF_BLOCKSTEP) {
+ unsigned long debugctl = get_debugctlmsr();
+
+ debugctl &= ~DEBUGCTLMSR_BTF;
+- if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
++ if (tifn & _TIF_BLOCKSTEP)
+ debugctl |= DEBUGCTLMSR_BTF;
+-
+ update_debugctlmsr(debugctl);
+ }
+
+- if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
+- test_tsk_thread_flag(next_p, TIF_NOTSC)) {
+- /* prev and next are different */
+- if (test_tsk_thread_flag(next_p, TIF_NOTSC))
++ if ((tifp ^ tifn) & _TIF_NOTSC) {
++ if (tifn & _TIF_NOTSC)
+ hard_disable_TSC();
+ else
+ hard_enable_TSC();
+ }
+-
+- if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
+- /*
+- * Copy the relevant range of the IO bitmap.
+- * Normally this is 128 bytes or less:
+- */
+- memcpy(tss->io_bitmap, next->io_bitmap_ptr,
+- max(prev->io_bitmap_max, next->io_bitmap_max));
+- } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
+- /*
+- * Clear any possible leftover bits:
+- */
+- memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
+- }
+- propagate_user_return_notify(prev_p, next_p);
+ }
+
+ /*
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 14 Feb 2017 00:11:04 -0800
+Subject: x86/process: Optimize TIF_NOTSC switch
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 5a920155e388ec22a22e0532fb695b9215c9b34d upstream
+
+Provide and use a toggle helper instead of doing it with a branch.
+
+x86_64: arch/x86/kernel/process.o
+text data bss dec hex
+3008 8577 16 11601 2d51 Before
+2976 8577 16 11569 2d31 After
+
+i386: arch/x86/kernel/process.o
+text data bss dec hex
+2925 8673 8 11606 2d56 Before
+2893 8673 8 11574 2d36 After
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Link: http://lkml.kernel.org/r/20170214081104.9244-4-khuey@kylehuey.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/tlbflush.h | 10 ++++++++++
+ arch/x86/kernel/process.c | 22 ++++------------------
+ 2 files changed, 14 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -111,6 +111,16 @@ static inline void cr4_clear_bits(unsign
+ }
+ }
+
++static inline void cr4_toggle_bits(unsigned long mask)
++{
++ unsigned long cr4;
++
++ cr4 = this_cpu_read(cpu_tlbstate.cr4);
++ cr4 ^= mask;
++ this_cpu_write(cpu_tlbstate.cr4, cr4);
++ __write_cr4(cr4);
++}
++
+ /* Read the CR4 shadow. */
+ static inline unsigned long cr4_read_shadow(void)
+ {
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -134,11 +134,6 @@ void flush_thread(void)
+ fpu__clear(&tsk->thread.fpu);
+ }
+
+-static void hard_disable_TSC(void)
+-{
+- cr4_set_bits(X86_CR4_TSD);
+-}
+-
+ void disable_TSC(void)
+ {
+ preempt_disable();
+@@ -147,15 +142,10 @@ void disable_TSC(void)
+ * Must flip the CPU state synchronously with
+ * TIF_NOTSC in the current running context.
+ */
+- hard_disable_TSC();
++ cr4_set_bits(X86_CR4_TSD);
+ preempt_enable();
+ }
+
+-static void hard_enable_TSC(void)
+-{
+- cr4_clear_bits(X86_CR4_TSD);
+-}
+-
+ static void enable_TSC(void)
+ {
+ preempt_disable();
+@@ -164,7 +154,7 @@ static void enable_TSC(void)
+ * Must flip the CPU state synchronously with
+ * TIF_NOTSC in the current running context.
+ */
+- hard_enable_TSC();
++ cr4_clear_bits(X86_CR4_TSD);
+ preempt_enable();
+ }
+
+@@ -238,12 +228,8 @@ void __switch_to_xtra(struct task_struct
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ }
+
+- if ((tifp ^ tifn) & _TIF_NOTSC) {
+- if (tifn & _TIF_NOTSC)
+- hard_disable_TSC();
+- else
+- hard_enable_TSC();
+- }
++ if ((tifp ^ tifn) & _TIF_NOTSC)
++ cr4_toggle_bits(X86_CR4_TSD);
+ }
+
+ /*
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 29 Apr 2018 15:26:40 +0200
+Subject: x86/speculation: Add prctl for Speculative Store Bypass mitigation
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit a73ec77ee17ec556fe7f165d00314cb7c047b1ac upstream
+
+Add prctl based control for Speculative Store Bypass mitigation and make it
+the default mitigation for Intel and AMD.
+
+Andi Kleen provided the following rationale (slightly redacted):
+
+ There are multiple levels of impact of Speculative Store Bypass:
+
+ 1) JITed sandbox.
+ It cannot invoke system calls, but can do PRIME+PROBE and may have call
+ interfaces to other code
+
+ 2) Native code process.
+ No protection inside the process at this level.
+
+ 3) Kernel.
+
+ 4) Between processes.
+
+ The prctl tries to protect against case (1) doing attacks.
+
+ If the untrusted code can do random system calls then control is already
+ lost in a much worse way. So there needs to be system call protection in
+ some way (using a JIT not allowing them or seccomp). Or rather if the
+ process can subvert its environment somehow to do the prctl it can already
+ execute arbitrary code, which is much worse than SSB.
+
+ To put it differently, the point of the prctl is to not allow JITed code
+ to read data it shouldn't read from its JITed sandbox. If it already has
+ escaped its sandbox then it can already read everything it wants in its
+ address space, and do much worse.
+
+ The ability to control Speculative Store Bypass allows to enable the
+ protection selectively without affecting overall system performance.
+
+Based on an initial patch from Tim Chen. Completely rewritten.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/kernel-parameters.txt | 6 ++
+ arch/x86/include/asm/nospec-branch.h | 1
+ arch/x86/kernel/cpu/bugs.c | 83 ++++++++++++++++++++++++++++++-----
+ 3 files changed, 79 insertions(+), 11 deletions(-)
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -4001,7 +4001,11 @@ bytes respectively. Such letter suffixes
+ off - Unconditionally enable Speculative Store Bypass
+ auto - Kernel detects whether the CPU model contains an
+ implementation of Speculative Store Bypass and
+- picks the most appropriate mitigation
++ picks the most appropriate mitigation.
++ prctl - Control Speculative Store Bypass per thread
++ via prctl. Speculative Store Bypass is enabled
++ for a process by default. The state of the control
++ is inherited on fork.
+
+ Not specifying this option is equivalent to
+ spec_store_bypass_disable=auto.
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -232,6 +232,7 @@ extern u64 x86_spec_ctrl_get_default(voi
+ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_NONE,
+ SPEC_STORE_BYPASS_DISABLE,
++ SPEC_STORE_BYPASS_PRCTL,
+ };
+
+ extern char __indirect_thunk_start[];
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -11,6 +11,8 @@
+ #include <linux/utsname.h>
+ #include <linux/cpu.h>
+ #include <linux/module.h>
++#include <linux/nospec.h>
++#include <linux/prctl.h>
+
+ #include <asm/spec-ctrl.h>
+ #include <asm/cmdline.h>
+@@ -411,20 +413,23 @@ enum ssb_mitigation_cmd {
+ SPEC_STORE_BYPASS_CMD_NONE,
+ SPEC_STORE_BYPASS_CMD_AUTO,
+ SPEC_STORE_BYPASS_CMD_ON,
++ SPEC_STORE_BYPASS_CMD_PRCTL,
+ };
+
+ static const char *ssb_strings[] = {
+ [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
+- [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled"
++ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
++ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl"
+ };
+
+ static const struct {
+ const char *option;
+ enum ssb_mitigation_cmd cmd;
+ } ssb_mitigation_options[] = {
+- { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
+- { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
+- { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
++ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
++ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
++ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
++ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
+ };
+
+ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
+@@ -474,14 +479,15 @@ static enum ssb_mitigation_cmd __init __
+
+ switch (cmd) {
+ case SPEC_STORE_BYPASS_CMD_AUTO:
+- /*
+- * AMD platforms by default don't need SSB mitigation.
+- */
+- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+- break;
++ /* Choose prctl as the default mode */
++ mode = SPEC_STORE_BYPASS_PRCTL;
++ break;
+ case SPEC_STORE_BYPASS_CMD_ON:
+ mode = SPEC_STORE_BYPASS_DISABLE;
+ break;
++ case SPEC_STORE_BYPASS_CMD_PRCTL:
++ mode = SPEC_STORE_BYPASS_PRCTL;
++ break;
+ case SPEC_STORE_BYPASS_CMD_NONE:
+ break;
+ }
+@@ -492,7 +498,7 @@ static enum ssb_mitigation_cmd __init __
+ * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
+ * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
+ */
+- if (mode != SPEC_STORE_BYPASS_NONE) {
++ if (mode == SPEC_STORE_BYPASS_DISABLE) {
+ setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
+ /*
+ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
+@@ -523,6 +529,63 @@ static void ssb_select_mitigation()
+
+ #undef pr_fmt
+
++static int ssb_prctl_set(unsigned long ctrl)
++{
++ bool rds = !!test_tsk_thread_flag(current, TIF_RDS);
++
++ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
++ return -ENXIO;
++
++ if (ctrl == PR_SPEC_ENABLE)
++ clear_tsk_thread_flag(current, TIF_RDS);
++ else
++ set_tsk_thread_flag(current, TIF_RDS);
++
++ if (rds != !!test_tsk_thread_flag(current, TIF_RDS))
++ speculative_store_bypass_update();
++
++ return 0;
++}
++
++static int ssb_prctl_get(void)
++{
++ switch (ssb_mode) {
++ case SPEC_STORE_BYPASS_DISABLE:
++ return PR_SPEC_DISABLE;
++ case SPEC_STORE_BYPASS_PRCTL:
++ if (test_tsk_thread_flag(current, TIF_RDS))
++ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
++ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
++ default:
++ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
++ return PR_SPEC_ENABLE;
++ return PR_SPEC_NOT_AFFECTED;
++ }
++}
++
++int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
++{
++ if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
++ return -ERANGE;
++
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssb_prctl_set(ctrl);
++ default:
++ return -ENODEV;
++ }
++}
++
++int arch_prctl_spec_ctrl_get(unsigned long which)
++{
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssb_prctl_get();
++ default:
++ return -ENODEV;
++ }
++}
++
+ void x86_spec_ctrl_setup_ap(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_IBRS))
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Thu, 17 May 2018 17:09:18 +0200
+Subject: x86/speculation: Add virtualized speculative store bypass disable support
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit 11fb0683493b2da112cd64c9dada221b52463bf7 upstream
+
+Some AMD processors only support a non-architectural means of enabling
+speculative store bypass disable (SSBD). To allow a simplified view of
+this to a guest, an architectural definition has been created through a new
+CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f. With this, a
+hypervisor can virtualize the existence of this definition and provide an
+architectural method for using SSBD to a guest.
+
+Add the new CPUID feature, the new MSR and update the existing SSBD
+support to use this MSR when present.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/include/asm/msr-index.h | 2 ++
+ arch/x86/kernel/cpu/bugs.c | 4 +++-
+ arch/x86/kernel/process.c | 13 ++++++++++++-
+ 4 files changed, 18 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -274,6 +274,7 @@
+ #define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
+ #define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
+ #define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
+
+ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
+ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -323,6 +323,8 @@
+ #define MSR_AMD64_IBSOPDATA4 0xc001103d
+ #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+
++#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
++
+ /* Fam 17h MSRs */
+ #define MSR_F17H_IRPERF 0xc00000e9
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -204,7 +204,9 @@ static void x86_amd_ssb_disable(void)
+ {
+ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
+
+- if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
++ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
++ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
++ else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -312,6 +312,15 @@ static __always_inline void amd_set_core
+ }
+ #endif
+
++static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
++{
++ /*
++ * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
++ * so ssbd_tif_to_spec_ctrl() just works.
++ */
++ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
++}
++
+ static __always_inline void intel_set_ssb_state(unsigned long tifn)
+ {
+ u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+@@ -321,7 +330,9 @@ static __always_inline void intel_set_ss
+
+ static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+ {
+- if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
++ if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
++ amd_set_ssb_virt_state(tifn);
++ else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ amd_set_core_ssb_state(tifn);
+ else
+ intel_set_ssb_state(tifn);
--- /dev/null
+From foo@baz Mon May 21 22:23:32 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 29 Apr 2018 15:01:37 +0200
+Subject: x86/speculation: Create spec-ctrl.h to avoid include hell
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 28a2775217b17208811fa43a9e96bd1fdf417b86 upstream
+
+Having everything in nospec-branch.h creates a hell of dependencies when
+adding the prctl based switching mechanism. Move everything which is not
+required in nospec-branch.h to spec-ctrl.h and fix up the includes in the
+relevant files.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 14 --------------
+ arch/x86/include/asm/spec-ctrl.h | 21 +++++++++++++++++++++
+ arch/x86/kernel/cpu/amd.c | 2 +-
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ arch/x86/kvm/svm.c | 2 +-
+ arch/x86/kvm/vmx.c | 2 +-
+ 6 files changed, 25 insertions(+), 18 deletions(-)
+ create mode 100644 arch/x86/include/asm/spec-ctrl.h
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -228,26 +228,12 @@ enum spectre_v2_mitigation {
+ extern void x86_spec_ctrl_set(u64);
+ extern u64 x86_spec_ctrl_get_default(void);
+
+-/*
+- * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
+- * the guest has, while on VMEXIT we restore the host view. This
+- * would be easier if SPEC_CTRL were architecturally maskable or
+- * shadowable for guests but this is not (currently) the case.
+- * Takes the guest view of SPEC_CTRL MSR as a parameter.
+- */
+-extern void x86_spec_ctrl_set_guest(u64);
+-extern void x86_spec_ctrl_restore_host(u64);
+-
+ /* The Speculative Store Bypass disable variants */
+ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_NONE,
+ SPEC_STORE_BYPASS_DISABLE,
+ };
+
+-/* AMD specific Speculative Store Bypass MSR data */
+-extern u64 x86_amd_ls_cfg_base;
+-extern u64 x86_amd_ls_cfg_rds_mask;
+-
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+--- /dev/null
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -0,0 +1,21 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_X86_SPECCTRL_H_
++#define _ASM_X86_SPECCTRL_H_
++
++#include <asm/nospec-branch.h>
++
++/*
++ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
++ * the guest has, while on VMEXIT we restore the host view. This
++ * would be easier if SPEC_CTRL were architecturally maskable or
++ * shadowable for guests but this is not (currently) the case.
++ * Takes the guest view of SPEC_CTRL MSR as a parameter.
++ */
++extern void x86_spec_ctrl_set_guest(u64);
++extern void x86_spec_ctrl_restore_host(u64);
++
++/* AMD specific Speculative Store Bypass MSR data */
++extern u64 x86_amd_ls_cfg_base;
++extern u64 x86_amd_ls_cfg_rds_mask;
++
++#endif
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -9,7 +9,7 @@
+ #include <asm/processor.h>
+ #include <asm/apic.h>
+ #include <asm/cpu.h>
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+ #include <asm/smp.h>
+ #include <asm/pci-direct.h>
+ #include <asm/delay.h>
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -12,7 +12,7 @@
+ #include <linux/cpu.h>
+ #include <linux/module.h>
+
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+ #include <asm/cmdline.h>
+ #include <asm/bugs.h>
+ #include <asm/processor.h>
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -45,7 +45,7 @@
+ #include <asm/kvm_para.h>
+ #include <asm/irq_remapping.h>
+ #include <asm/microcode.h>
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+
+ #include <asm/virtext.h>
+ #include "trace.h"
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -50,7 +50,7 @@
+ #include <asm/apic.h>
+ #include <asm/irq_remapping.h>
+ #include <asm/microcode.h>
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+
+ #include "trace.h"
+ #include "pmu.h"
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 9 May 2018 21:53:09 +0200
+Subject: x86/speculation: Handle HT correctly on AMD
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 1f50ddb4f4189243c05926b842dc1a0332195f31 upstream
+
+The AMD64_LS_CFG MSR is a per core MSR on Family 17H CPUs. That means when
+hyperthreading is enabled the SSBD bit toggle needs to take both cores into
+account. Otherwise the following situation can happen:
+
+CPU0 CPU1
+
+disable SSB
+ disable SSB
+ enable SSB <- Enables it for the Core, i.e. for CPU0 as well
+
+So after the SSB enable on CPU1 the task on CPU0 runs with SSB enabled
+again.
+
+On Intel the SSBD control is per core as well, but the synchronization
+logic is implemented behind the per thread SPEC_CTRL MSR. It works like
+this:
+
+ CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
+
+i.e. if one of the threads enables a mitigation then this affects both and
+the mitigation is only disabled in the core when both threads disabled it.
+
+Add the necessary synchronization logic for AMD family 17H. Unfortunately
+that requires a spinlock to serialize the access to the MSR, but the locks
+are only shared between siblings.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/spec-ctrl.h | 6 +
+ arch/x86/kernel/process.c | 125 +++++++++++++++++++++++++++++++++++++--
+ arch/x86/kernel/smpboot.c | 5 +
+ 3 files changed, 130 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -33,6 +33,12 @@ static inline u64 ssbd_tif_to_amd_ls_cfg
+ return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
+ }
+
++#ifdef CONFIG_SMP
++extern void speculative_store_bypass_ht_init(void);
++#else
++static inline void speculative_store_bypass_ht_init(void) { }
++#endif
++
+ extern void speculative_store_bypass_update(void);
+
+ #endif
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -203,22 +203,135 @@ static inline void switch_to_bitmap(stru
+ }
+ }
+
+-static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
++#ifdef CONFIG_SMP
++
++struct ssb_state {
++ struct ssb_state *shared_state;
++ raw_spinlock_t lock;
++ unsigned int disable_state;
++ unsigned long local_state;
++};
++
++#define LSTATE_SSB 0
++
++static DEFINE_PER_CPU(struct ssb_state, ssb_state);
++
++void speculative_store_bypass_ht_init(void)
++{
++ struct ssb_state *st = this_cpu_ptr(&ssb_state);
++ unsigned int this_cpu = smp_processor_id();
++ unsigned int cpu;
++
++ st->local_state = 0;
++
++ /*
++ * Shared state setup happens once on the first bringup
++ * of the CPU. It's not destroyed on CPU hotunplug.
++ */
++ if (st->shared_state)
++ return;
++
++ raw_spin_lock_init(&st->lock);
++
++ /*
++ * Go over HT siblings and check whether one of them has set up the
++ * shared state pointer already.
++ */
++ for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
++ if (cpu == this_cpu)
++ continue;
++
++ if (!per_cpu(ssb_state, cpu).shared_state)
++ continue;
++
++ /* Link it to the state of the sibling: */
++ st->shared_state = per_cpu(ssb_state, cpu).shared_state;
++ return;
++ }
++
++ /*
++ * First HT sibling to come up on the core. Link shared state of
++ * the first HT sibling to itself. The siblings on the same core
++ * which come up later will see the shared state pointer and link
++ * themself to the state of this CPU.
++ */
++ st->shared_state = st;
++}
++
++/*
++ * Logic is: First HT sibling enables SSBD for both siblings in the core
++ * and last sibling to disable it, disables it for the whole core. This how
++ * MSR_SPEC_CTRL works in "hardware":
++ *
++ * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
++ */
++static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
+ {
+- u64 msr;
++ struct ssb_state *st = this_cpu_ptr(&ssb_state);
++ u64 msr = x86_amd_ls_cfg_base;
+
+- if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+- msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
++ if (!static_cpu_has(X86_FEATURE_ZEN)) {
++ msr |= ssbd_tif_to_amd_ls_cfg(tifn);
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
++ return;
++ }
++
++ if (tifn & _TIF_SSBD) {
++ /*
++ * Since this can race with prctl(), block reentry on the
++ * same CPU.
++ */
++ if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
++ return;
++
++ msr |= x86_amd_ls_cfg_ssbd_mask;
++
++ raw_spin_lock(&st->shared_state->lock);
++ /* First sibling enables SSBD: */
++ if (!st->shared_state->disable_state)
++ wrmsrl(MSR_AMD64_LS_CFG, msr);
++ st->shared_state->disable_state++;
++ raw_spin_unlock(&st->shared_state->lock);
+ } else {
+- msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
++ if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
++ return;
++
++ raw_spin_lock(&st->shared_state->lock);
++ st->shared_state->disable_state--;
++ if (!st->shared_state->disable_state)
++ wrmsrl(MSR_AMD64_LS_CFG, msr);
++ raw_spin_unlock(&st->shared_state->lock);
+ }
+ }
++#else
++static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
++{
++ u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
++
++ wrmsrl(MSR_AMD64_LS_CFG, msr);
++}
++#endif
++
++static __always_inline void intel_set_ssb_state(unsigned long tifn)
++{
++ u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
++
++ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
++}
++
++static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
++{
++ if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
++ amd_set_core_ssb_state(tifn);
++ else
++ intel_set_ssb_state(tifn);
++}
+
+ void speculative_store_bypass_update(void)
+ {
++ preempt_disable();
+ __speculative_store_bypass_update(current_thread_info()->flags);
++ preempt_enable();
+ }
+
+ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -75,6 +75,7 @@
+ #include <asm/i8259.h>
+ #include <asm/realmode.h>
+ #include <asm/misc.h>
++#include <asm/spec-ctrl.h>
+
+ /* Number of siblings per CPU package */
+ int smp_num_siblings = 1;
+@@ -229,6 +230,8 @@ static void notrace start_secondary(void
+ */
+ check_tsc_sync_target();
+
++ speculative_store_bypass_ht_init();
++
+ /*
+ * Lock vector_lock and initialize the vectors on this cpu
+ * before setting the cpu online. We must set it online with
+@@ -1325,6 +1328,8 @@ void __init native_smp_prepare_cpus(unsi
+ set_mtrr_aps_delayed_init();
+
+ smp_quirk_init_udelay();
++
++ speculative_store_bypass_ht_init();
+ }
+
+ void arch_enable_nonboot_cpus_begin(void)
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 10 May 2018 20:42:48 +0200
+Subject: x86/speculation, KVM: Implement support for VIRT_SPEC_CTRL/LS_CFG
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 47c61b3955cf712cadfc25635bf9bc174af030ea upstream
+
+Add the necessary logic for supporting the emulated VIRT_SPEC_CTRL MSR to
+x86_virt_spec_ctrl(). If either X86_FEATURE_LS_CFG_SSBD or
+X86_FEATURE_VIRT_SPEC_CTRL is set then use the new guest_virt_spec_ctrl
+argument to check whether the state must be modified on the host. The
+update reuses speculative_store_bypass_update() so the ZEN-specific sibling
+coordination can be reused.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/spec-ctrl.h | 6 ++++++
+ arch/x86/kernel/cpu/bugs.c | 30 ++++++++++++++++++++++++++++++
+ 2 files changed, 36 insertions(+)
+
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -53,6 +53,12 @@ static inline u64 ssbd_tif_to_spec_ctrl(
+ return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+ }
+
++static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
++{
++ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
++ return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
++}
++
+ static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
+ {
+ return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -161,6 +161,36 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl,
+ wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
+ }
+ }
++
++ /*
++ * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
++ * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
++ */
++ if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
++ !static_cpu_has(X86_FEATURE_VIRT_SSBD))
++ return;
++
++ /*
++ * If the host has SSBD mitigation enabled, force it in the host's
++ * virtual MSR value. If its not permanently enabled, evaluate
++ * current's TIF_SSBD thread flag.
++ */
++ if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
++ hostval = SPEC_CTRL_SSBD;
++ else
++ hostval = ssbd_tif_to_spec_ctrl(ti->flags);
++
++ /* Sanitize the guest value */
++ guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
++
++ if (hostval != guestval) {
++ unsigned long tif;
++
++ tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
++ ssbd_spec_ctrl_to_tif(hostval);
++
++ speculative_store_bypass_update(tif);
++ }
+ }
+ EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
+
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 3 May 2018 14:37:54 -0700
+Subject: x86/speculation: Make "seccomp" the default mode for Speculative Store Bypass
+
+From: Kees Cook <keescook@chromium.org>
+
+commit f21b53b20c754021935ea43364dbf53778eeba32 upstream
+
+Unless explicitly opted out of, anything running under seccomp will have
+SSB mitigations enabled. Choosing the "prctl" mode will disable this.
+
+[ tglx: Adjusted it to the new arch_seccomp_spec_mitigate() mechanism ]
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/kernel-parameters.txt | 26 +++++++++++++++++---------
+ arch/x86/include/asm/nospec-branch.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 32 +++++++++++++++++++++++---------
+ 3 files changed, 41 insertions(+), 18 deletions(-)
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -3997,19 +3997,27 @@ bytes respectively. Such letter suffixes
+ This parameter controls whether the Speculative Store
+ Bypass optimization is used.
+
+- on - Unconditionally disable Speculative Store Bypass
+- off - Unconditionally enable Speculative Store Bypass
+- auto - Kernel detects whether the CPU model contains an
+- implementation of Speculative Store Bypass and
+- picks the most appropriate mitigation.
+- prctl - Control Speculative Store Bypass per thread
+- via prctl. Speculative Store Bypass is enabled
+- for a process by default. The state of the control
+- is inherited on fork.
++ on - Unconditionally disable Speculative Store Bypass
++ off - Unconditionally enable Speculative Store Bypass
++ auto - Kernel detects whether the CPU model contains an
++ implementation of Speculative Store Bypass and
++ picks the most appropriate mitigation. If the
++ CPU is not vulnerable, "off" is selected. If the
++ CPU is vulnerable the default mitigation is
++ architecture and Kconfig dependent. See below.
++ prctl - Control Speculative Store Bypass per thread
++ via prctl. Speculative Store Bypass is enabled
++ for a process by default. The state of the control
++ is inherited on fork.
++ seccomp - Same as "prctl" above, but all seccomp threads
++ will disable SSB unless they explicitly opt out.
+
+ Not specifying this option is equivalent to
+ spec_store_bypass_disable=auto.
+
++ Default mitigations:
++ X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
++
+ spia_io_base= [HW,MTD]
+ spia_fio_base=
+ spia_pedr=
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -233,6 +233,7 @@ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_NONE,
+ SPEC_STORE_BYPASS_DISABLE,
+ SPEC_STORE_BYPASS_PRCTL,
++ SPEC_STORE_BYPASS_SECCOMP,
+ };
+
+ extern char __indirect_thunk_start[];
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -415,22 +415,25 @@ enum ssb_mitigation_cmd {
+ SPEC_STORE_BYPASS_CMD_AUTO,
+ SPEC_STORE_BYPASS_CMD_ON,
+ SPEC_STORE_BYPASS_CMD_PRCTL,
++ SPEC_STORE_BYPASS_CMD_SECCOMP,
+ };
+
+ static const char *ssb_strings[] = {
+ [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
+ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
+- [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl"
++ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
++ [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
+ };
+
+ static const struct {
+ const char *option;
+ enum ssb_mitigation_cmd cmd;
+ } ssb_mitigation_options[] = {
+- { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
+- { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
+- { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
+- { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
++ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
++ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
++ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
++ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
++ { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
+ };
+
+ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
+@@ -480,8 +483,15 @@ static enum ssb_mitigation_cmd __init __
+
+ switch (cmd) {
+ case SPEC_STORE_BYPASS_CMD_AUTO:
+- /* Choose prctl as the default mode */
+- mode = SPEC_STORE_BYPASS_PRCTL;
++ case SPEC_STORE_BYPASS_CMD_SECCOMP:
++ /*
++ * Choose prctl+seccomp as the default mode if seccomp is
++ * enabled.
++ */
++ if (IS_ENABLED(CONFIG_SECCOMP))
++ mode = SPEC_STORE_BYPASS_SECCOMP;
++ else
++ mode = SPEC_STORE_BYPASS_PRCTL;
+ break;
+ case SPEC_STORE_BYPASS_CMD_ON:
+ mode = SPEC_STORE_BYPASS_DISABLE;
+@@ -529,12 +539,14 @@ static void ssb_select_mitigation()
+ }
+
+ #undef pr_fmt
++#define pr_fmt(fmt) "Speculation prctl: " fmt
+
+ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+ {
+ bool update;
+
+- if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
++ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
++ ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
+ return -ENXIO;
+
+ switch (ctrl) {
+@@ -582,7 +594,8 @@ int arch_prctl_spec_ctrl_set(struct task
+ #ifdef CONFIG_SECCOMP
+ void arch_seccomp_spec_mitigate(struct task_struct *task)
+ {
+- ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
++ if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
++ ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+ }
+ #endif
+
+@@ -591,6 +604,7 @@ static int ssb_prctl_get(struct task_str
+ switch (ssb_mode) {
+ case SPEC_STORE_BYPASS_DISABLE:
+ return PR_SPEC_DISABLE;
++ case SPEC_STORE_BYPASS_SECCOMP:
+ case SPEC_STORE_BYPASS_PRCTL:
+ if (task_spec_ssb_force_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 10 May 2018 20:31:44 +0200
+Subject: x86/speculation: Rework speculative_store_bypass_update()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 0270be3e34efb05a88bc4c422572ece038ef3608 upstream
+
+The upcoming support for the virtual SPEC_CTRL MSR on AMD needs to reuse
+speculative_store_bypass_update() to avoid code duplication. Add an
+argument for supplying a thread info (TIF) value and create a wrapper
+speculative_store_bypass_update_current() which is used at the existing
+call site.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/spec-ctrl.h | 7 ++++++-
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ arch/x86/kernel/process.c | 4 ++--
+ 3 files changed, 9 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -42,6 +42,11 @@ extern void speculative_store_bypass_ht_
+ static inline void speculative_store_bypass_ht_init(void) { }
+ #endif
+
+-extern void speculative_store_bypass_update(void);
++extern void speculative_store_bypass_update(unsigned long tif);
++
++static inline void speculative_store_bypass_update_current(void)
++{
++ speculative_store_bypass_update(current_thread_info()->flags);
++}
+
+ #endif
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -597,7 +597,7 @@ static int ssb_prctl_set(struct task_str
+ * mitigation until it is next scheduled.
+ */
+ if (task == current && update)
+- speculative_store_bypass_update();
++ speculative_store_bypass_update_current();
+
+ return 0;
+ }
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -338,10 +338,10 @@ static __always_inline void __speculativ
+ intel_set_ssb_state(tifn);
+ }
+
+-void speculative_store_bypass_update(void)
++void speculative_store_bypass_update(unsigned long tif)
+ {
+ preempt_disable();
+- __speculative_store_bypass_update(current_thread_info()->flags);
++ __speculative_store_bypass_update(tif);
+ preempt_enable();
+ }
+
--- /dev/null
+From foo@baz Mon May 21 22:23:33 CEST 2018
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 2 May 2018 18:15:14 +0200
+Subject: x86/speculation: Use synthetic bits for IBRS/IBPB/STIBP
+
+From: Borislav Petkov <bp@suse.de>
+
+commit e7c587da125291db39ddf1f49b18e5970adbac17 upstream
+
+Intel and AMD have different CPUID bits hence for those use synthetic bits
+which get set on the respective vendor's in init_speculation_control(). So
+that debacles like what the commit message of
+
+ c65732e4f721 ("x86/cpu: Restore CPUID_8000_0008_EBX reload")
+
+talks about don't happen anymore.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Tested-by: Jörg Otte <jrg.otte@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Link: https://lkml.kernel.org/r/20180504161815.GG9257@pd.tnic
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 12 ++++++++----
+ arch/x86/kernel/cpu/common.c | 14 ++++++++++----
+ arch/x86/kvm/cpuid.c | 10 +++++-----
+ arch/x86/kvm/cpuid.h | 4 ++--
+ 4 files changed, 25 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -205,7 +205,10 @@
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+-#define X86_FEATURE_AMD_SSBD (7*32+24) /* "" AMD SSBD implementation */
++#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
++#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
++#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
++#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+@@ -263,9 +266,9 @@
+ /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
+ #define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
+ #define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */
+-#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
+-#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
+-#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
++#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
++#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
+
+ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
+ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
+@@ -301,6 +304,7 @@
+ #define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */
+ #define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */
+
++
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -725,17 +725,23 @@ static void init_speculation_control(str
+ * and they also have a different bit for STIBP support. Also,
+ * a hypervisor might have set the individual AMD bits even on
+ * Intel CPUs, for finer-grained selection of what's available.
+- *
+- * We use the AMD bits in 0x8000_0008 EBX as the generic hardware
+- * features, which are visible in /proc/cpuinfo and used by the
+- * kernel. So set those accordingly from the Intel bits.
+ */
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_IBPB);
+ }
++
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
++
++ if (cpu_has(c, X86_FEATURE_AMD_IBRS))
++ set_cpu_cap(c, X86_FEATURE_IBRS);
++
++ if (cpu_has(c, X86_FEATURE_AMD_IBPB))
++ set_cpu_cap(c, X86_FEATURE_IBPB);
++
++ if (cpu_has(c, X86_FEATURE_AMD_STIBP))
++ set_cpu_cap(c, X86_FEATURE_STIBP);
+ }
+
+ void get_cpu_cap(struct cpuinfo_x86 *c)
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -357,7 +357,7 @@ static inline int __do_cpuid_ent(struct
+
+ /* cpuid 0x80000008.ebx */
+ const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+- F(IBPB) | F(IBRS);
++ F(AMD_IBPB) | F(AMD_IBRS);
+
+ /* cpuid 0xC0000001.edx */
+ const u32 kvm_cpuid_C000_0001_edx_x86_features =
+@@ -619,10 +619,10 @@ static inline int __do_cpuid_ent(struct
+ entry->eax = g_phys_as | (virt_as << 8);
+ entry->edx = 0;
+ /* IBRS and IBPB aren't necessarily present in hardware cpuid */
+- if (boot_cpu_has(X86_FEATURE_IBPB))
+- entry->ebx |= F(IBPB);
+- if (boot_cpu_has(X86_FEATURE_IBRS))
+- entry->ebx |= F(IBRS);
++ if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
++ entry->ebx |= F(AMD_IBPB);
++ if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
++ entry->ebx |= F(AMD_IBRS);
+ entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
+ cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
+ break;
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -165,7 +165,7 @@ static inline bool guest_cpuid_has_ibpb(
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+- if (best && (best->ebx & bit(X86_FEATURE_IBPB)))
++ if (best && (best->ebx & bit(X86_FEATURE_AMD_IBPB)))
+ return true;
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
+@@ -176,7 +176,7 @@ static inline bool guest_cpuid_has_spec_
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+- if (best && (best->ebx & bit(X86_FEATURE_IBRS)))
++ if (best && (best->ebx & bit(X86_FEATURE_AMD_IBRS)))
+ return true;
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SSBD)));