--- /dev/null
+Subject: ptrace: Remove unused ptrace_may_access_sched() and MODE_IBRS
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:50 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 46f7ecb1e7359f183f5bbd1e08b90e10e52164f9 upstream
+
+The IBPB control code in x86 removed the usage. Remove the functionality
+which was introduced for this.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185005.559149393@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/ptrace.h | 17 -----------------
+ kernel/ptrace.c | 10 ----------
+ 2 files changed, 27 deletions(-)
+
+--- a/include/linux/ptrace.h
++++ b/include/linux/ptrace.h
+@@ -64,15 +64,12 @@ extern void exit_ptrace(struct task_stru
+ #define PTRACE_MODE_NOAUDIT 0x04
+ #define PTRACE_MODE_FSCREDS 0x08
+ #define PTRACE_MODE_REALCREDS 0x10
+-#define PTRACE_MODE_SCHED 0x20
+-#define PTRACE_MODE_IBPB 0x40
+
+ /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
+ #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
+ #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
+ #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
+ #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
+-#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB)
+
+ /**
+ * ptrace_may_access - check whether the caller is permitted to access
+@@ -90,20 +87,6 @@ extern void exit_ptrace(struct task_stru
+ */
+ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
+
+-/**
+- * ptrace_may_access - check whether the caller is permitted to access
+- * a target task.
+- * @task: target task
+- * @mode: selects type of access and caller credentials
+- *
+- * Returns true on success, false on denial.
+- *
+- * Similar to ptrace_may_access(). Only to be called from context switch
+- * code. Does not call into audit and the regular LSM hooks due to locking
+- * constraints.
+- */
+-extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode);
+-
+ static inline int ptrace_reparented(struct task_struct *child)
+ {
+ return !same_thread_group(child->real_parent, child->parent);
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -261,9 +261,6 @@ static int ptrace_check_attach(struct ta
+
+ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
+ {
+- if (mode & PTRACE_MODE_SCHED)
+- return false;
+-
+ if (mode & PTRACE_MODE_NOAUDIT)
+ return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
+ else
+@@ -331,16 +328,9 @@ ok:
+ !ptrace_has_cap(mm->user_ns, mode)))
+ return -EPERM;
+
+- if (mode & PTRACE_MODE_SCHED)
+- return 0;
+ return security_ptrace_access_check(task, mode);
+ }
+
+-bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode)
+-{
+- return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED);
+-}
+-
+ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
+ {
+ int err;
--- /dev/null
+Subject: sched/core: Fix cpu.max vs. cpuhotplug deadlock
+From: Peter Zijlstra peterz@infradead.org
+Date: Mon Jan 22 22:53:28 2018 +0100
+
+From: Peter Zijlstra peterz@infradead.org
+
+commit ce48c146495a1a50e48cdbfbfaba3e708be7c07c upstream
+
+Tejun reported the following cpu-hotplug lock (percpu-rwsem) read recursion:
+
+ tg_set_cfs_bandwidth()
+ get_online_cpus()
+ cpus_read_lock()
+
+ cfs_bandwidth_usage_inc()
+ static_key_slow_inc()
+ cpus_read_lock()
+
+Reported-by: Tejun Heo <tj@kernel.org>
+Tested-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20180122215328.GP3397@worktop
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/jump_label.h | 7 +++++++
+ kernel/jump_label.c | 12 +++++++++---
+ kernel/sched/fair.c | 4 ++--
+ 3 files changed, 18 insertions(+), 5 deletions(-)
+
+--- a/include/linux/jump_label.h
++++ b/include/linux/jump_label.h
+@@ -160,6 +160,8 @@ extern void arch_jump_label_transform_st
+ extern int jump_label_text_reserved(void *start, void *end);
+ extern void static_key_slow_inc(struct static_key *key);
+ extern void static_key_slow_dec(struct static_key *key);
++extern void static_key_slow_inc_cpuslocked(struct static_key *key);
++extern void static_key_slow_dec_cpuslocked(struct static_key *key);
+ extern void jump_label_apply_nops(struct module *mod);
+ extern int static_key_count(struct static_key *key);
+ extern void static_key_enable(struct static_key *key);
+@@ -222,6 +224,9 @@ static inline void static_key_slow_dec(s
+ atomic_dec(&key->enabled);
+ }
+
++#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
++#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
++
+ static inline int jump_label_text_reserved(void *start, void *end)
+ {
+ return 0;
+@@ -416,6 +421,8 @@ extern bool ____wrong_branch_error(void)
+
+ #define static_branch_inc(x) static_key_slow_inc(&(x)->key)
+ #define static_branch_dec(x) static_key_slow_dec(&(x)->key)
++#define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
++#define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
+
+ /*
+ * Normal usage; boolean enable/disable.
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -79,7 +79,7 @@ int static_key_count(struct static_key *
+ }
+ EXPORT_SYMBOL_GPL(static_key_count);
+
+-static void static_key_slow_inc_cpuslocked(struct static_key *key)
++void static_key_slow_inc_cpuslocked(struct static_key *key)
+ {
+ int v, v1;
+
+@@ -180,7 +180,7 @@ void static_key_disable(struct static_ke
+ }
+ EXPORT_SYMBOL_GPL(static_key_disable);
+
+-static void static_key_slow_dec_cpuslocked(struct static_key *key,
++static void __static_key_slow_dec_cpuslocked(struct static_key *key,
+ unsigned long rate_limit,
+ struct delayed_work *work)
+ {
+@@ -211,7 +211,7 @@ static void __static_key_slow_dec(struct
+ struct delayed_work *work)
+ {
+ cpus_read_lock();
+- static_key_slow_dec_cpuslocked(key, rate_limit, work);
++ __static_key_slow_dec_cpuslocked(key, rate_limit, work);
+ cpus_read_unlock();
+ }
+
+@@ -229,6 +229,12 @@ void static_key_slow_dec(struct static_k
+ }
+ EXPORT_SYMBOL_GPL(static_key_slow_dec);
+
++void static_key_slow_dec_cpuslocked(struct static_key *key)
++{
++ STATIC_KEY_CHECK_USE();
++ __static_key_slow_dec_cpuslocked(key, 0, NULL);
++}
++
+ void static_key_slow_dec_deferred(struct static_key_deferred *key)
+ {
+ STATIC_KEY_CHECK_USE();
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4040,12 +4040,12 @@ static inline bool cfs_bandwidth_used(vo
+
+ void cfs_bandwidth_usage_inc(void)
+ {
+- static_key_slow_inc(&__cfs_bandwidth_used);
++ static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
+ }
+
+ void cfs_bandwidth_usage_dec(void)
+ {
+- static_key_slow_dec(&__cfs_bandwidth_used);
++ static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
+ }
+ #else /* HAVE_JUMP_LABEL */
+ static bool cfs_bandwidth_used(void)
--- /dev/null
+Subject: sched/smt: Expose sched_smt_present static key
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:38 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 321a874a7ef85655e93b3206d0f36b4a6097f948 upstream
+
+Make the scheduler's 'sched_smt_present' static key globaly available, so
+it can be used in the x86 speculation control code.
+
+Provide a query function and a stub for the CONFIG_SMP=n case.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185004.430168326@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/sched/smt.h | 18 ++++++++++++++++++
+ kernel/sched/sched.h | 4 +---
+ 2 files changed, 19 insertions(+), 3 deletions(-)
+
+--- /dev/null
++++ b/include/linux/sched/smt.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_SCHED_SMT_H
++#define _LINUX_SCHED_SMT_H
++
++#include <linux/static_key.h>
++
++#ifdef CONFIG_SCHED_SMT
++extern struct static_key_false sched_smt_present;
++
++static __always_inline bool sched_smt_active(void)
++{
++ return static_branch_likely(&sched_smt_present);
++}
++#else
++static inline bool sched_smt_active(void) { return false; }
++#endif
++
++#endif
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -20,6 +20,7 @@
+ #include <linux/sched/task_stack.h>
+ #include <linux/sched/cputime.h>
+ #include <linux/sched/init.h>
++#include <linux/sched/smt.h>
+
+ #include <linux/u64_stats_sync.h>
+ #include <linux/kernel_stat.h>
+@@ -825,9 +826,6 @@ static inline int cpu_of(struct rq *rq)
+
+
+ #ifdef CONFIG_SCHED_SMT
+-
+-extern struct static_key_false sched_smt_present;
+-
+ extern void __update_idle_core(struct rq *rq);
+
+ static inline void update_idle_core(struct rq *rq)
--- /dev/null
+Subject: sched/smt: Make sched_smt_present track topology
+From: Peter Zijlstra (Intel) peterz@infradead.org
+Date: Sun Nov 25 19:33:36 2018 +0100
+
+From: Peter Zijlstra (Intel) peterz@infradead.org
+
+commit c5511d03ec090980732e929c318a7a6374b5550e upstream
+
+Currently the 'sched_smt_present' static key is enabled when at CPU bringup
+SMT topology is observed, but it is never disabled. However there is demand
+to also disable the key when the topology changes such that there is no SMT
+present anymore.
+
+Implement this by making the key count the number of cores that have SMT
+enabled.
+
+In particular, the SMT topology bits are set before interrrupts are enabled
+and similarly, are cleared after interrupts are disabled for the last time
+and the CPU dies.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185004.246110444@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/core.c | 19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5617,15 +5617,10 @@ int sched_cpu_activate(unsigned int cpu)
+
+ #ifdef CONFIG_SCHED_SMT
+ /*
+- * The sched_smt_present static key needs to be evaluated on every
+- * hotplug event because at boot time SMT might be disabled when
+- * the number of booted CPUs is limited.
+- *
+- * If then later a sibling gets hotplugged, then the key would stay
+- * off and SMT scheduling would never be functional.
++ * When going up, increment the number of cores with SMT present.
+ */
+- if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
+- static_branch_enable_cpuslocked(&sched_smt_present);
++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++ static_branch_inc_cpuslocked(&sched_smt_present);
+ #endif
+ set_cpu_active(cpu, true);
+
+@@ -5669,6 +5664,14 @@ int sched_cpu_deactivate(unsigned int cp
+ */
+ synchronize_rcu_mult(call_rcu, call_rcu_sched);
+
++#ifdef CONFIG_SCHED_SMT
++ /*
++ * When going down, decrement the number of cores with SMT present.
++ */
++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++ static_branch_dec_cpuslocked(&sched_smt_present);
++#endif
++
+ if (!sched_smp_initialized)
+ return 0;
+
rapidio-rionet-do-not-free-skb-before-reading-its-length.patch
s390-qeth-fix-length-check-in-snmp-processing.patch
usbnet-ipheth-fix-potential-recvmsg-bug-and-recvmsg-bug-2.patch
+schedcore_Fix_cpu.max_vs._cpuhotplug_deadlock.patch
+x86bugs_Add_AMDs_variant_of_SSB_NO.patch
+x86bugs_Add_AMDs_SPEC_CTRL_MSR_usage.patch
+x86bugs_Switch_the_selection_of_mitigation_from_CPU_vendor_to_CPU_features.patch
+x86bugs_Update_when_to_check_for_the_LS_CFG_SSBD_mitigation.patch
+x86bugs_Fix_the_AMD_SSBD_usage_of_the_SPEC_CTRL_MSR.patch
+x86speculation_Enable_cross-hyperthread_spectre_v2_STIBP_mitigation.patch
+x86speculation_Apply_IBPB_more_strictly_to_avoid_cross-process_data_leak.patch
+x86speculation_Propagate_information_about_RSB_filling_mitigation_to_sysfs.patch
+x86speculation_Add_RETPOLINE_AMD_support_to_the_inline_asm_CALL_NOSPEC_variant.patch
+x86retpoline_Make_CONFIG_RETPOLINE_depend_on_compiler_support.patch
+x86retpoline_Remove_minimal_retpoline_support.patch
+x86speculation_Update_the_TIF_SSBD_comment.patch
+x86speculation_Clean_up_spectre_v2_parse_cmdline().patch
+x86speculation_Remove_unnecessary_ret_variable_in_cpu_show_common().patch
+x86speculation_Move_STIPBIBPB_string_conditionals_out_of_cpu_show_common().patch
+x86speculation_Disable_STIBP_when_enhanced_IBRS_is_in_use.patch
+x86speculation_Rename_SSBD_update_functions.patch
+x86speculation_Reorganize_speculation_control_MSRs_update.patch
+schedsmt_Make_sched_smt_present_track_topology.patch
+x86Kconfig_Select_SCHED_SMT_if_SMP_enabled.patch
+schedsmt_Expose_sched_smt_present_static_key.patch
+x86speculation_Rework_SMT_state_change.patch
+x86l1tf_Show_actual_SMT_state.patch
+x86speculation_Reorder_the_spec_v2_code.patch
+x86speculation_Mark_string_arrays_const_correctly.patch
+x86speculataion_Mark_command_line_parser_data___initdata.patch
+x86speculation_Unify_conditional_spectre_v2_print_functions.patch
+x86speculation_Add_command_line_control_for_indirect_branch_speculation.patch
+x86speculation_Prepare_for_per_task_indirect_branch_speculation_control.patch
+x86process_Consolidate_and_simplify_switch_to_xtra_code.patch
+x86speculation_Avoid___switch_to_xtra_calls.patch
+x86speculation_Prepare_for_conditional_IBPB_in_switch_mm.patch
+ptrace_Remove_unused_ptrace_may_access_sched_and_MODE_IBRS.patch
+x86speculation_Split_out_TIF_update.patch
+x86speculation_Prevent_stale_SPEC_CTRL_msr_content.patch
+x86speculation_Prepare_arch_smt_update_for_PRCTL_mode.patch
+x86speculation_Add_prctl()_control_for_indirect_branch_speculation.patch
+x86speculation_Enable_prctl_mode_for_spectre_v2_user.patch
+x86speculation_Add_seccomp_Spectre_v2_user_space_protection_mode.patch
+x86speculation_Provide_IBPB_always_command_line_options.patch
--- /dev/null
+Subject: x86/Kconfig: Select SCHED_SMT if SMP enabled
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:37 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit dbe733642e01dd108f71436aaea7b328cb28fd87 upstream
+
+CONFIG_SCHED_SMT is enabled by all distros, so there is not a real point to
+have it configurable. The runtime overhead in the core scheduler code is
+minimal because the actual SMT scheduling parts are conditional on a static
+key.
+
+This allows to expose the scheduler's SMT state static key to the
+speculation control code. Alternatively the scheduler's static key could be
+made always available when CONFIG_SMP is enabled, but that's just adding an
+unused static key to every other architecture for nothing.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185004.337452245@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/Kconfig | 8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -955,13 +955,7 @@ config NR_CPUS
+ approximately eight kilobytes to the kernel image.
+
+ config SCHED_SMT
+- bool "SMT (Hyperthreading) scheduler support"
+- depends on SMP
+- ---help---
+- SMT scheduler support improves the CPU scheduler's decision making
+- when dealing with Intel Pentium 4 chips with HyperThreading at a
+- cost of slightly increased overhead in some places. If unsure say
+- N here.
++ def_bool y if SMP
+
+ config SCHED_MC
+ def_bool y
--- /dev/null
+Subject: x86/bugs: Add AMD's SPEC_CTRL MSR usage
+From: Konrad Rzeszutek Wilk konrad.wilk@oracle.com
+Date: Fri Jun 1 10:59:20 2018 -0400
+
+From: Konrad Rzeszutek Wilk konrad.wilk@oracle.com
+
+commit 6ac2f49edb1ef5446089c7c660017732886d62d6 upstream
+
+The AMD document outlining the SSBD handling
+124441_AMD64_SpeculativeStoreBypassDisable_Whitepaper_final.pdf
+mentions that if CPUID 8000_0008.EBX[24] is set we should be using
+the SPEC_CTRL MSR (0x48) over the VIRT SPEC_CTRL MSR (0xC001_011f)
+for speculative store bypass disable.
+
+This in effect means we should clear the X86_FEATURE_VIRT_SSBD
+flag so that we would prefer the SPEC_CTRL MSR.
+
+See the document titled:
+ 124441_AMD64_SpeculativeStoreBypassDisable_Whitepaper_final.pdf
+
+A copy of this document is available at
+ https://bugzilla.kernel.org/show_bug.cgi?id=199889
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
+Cc: kvm@vger.kernel.org
+Cc: KarimAllah Ahmed <karahmed@amazon.de>
+Cc: andrew.cooper3@citrix.com
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Kees Cook <keescook@chromium.org>
+Link: https://lkml.kernel.org/r/20180601145921.9500-3-konrad.wilk@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 12 +++++++-----
+ arch/x86/kernel/cpu/common.c | 6 ++++++
+ arch/x86/kvm/cpuid.c | 10 ++++++++--
+ arch/x86/kvm/svm.c | 8 +++++---
+ 5 files changed, 27 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -284,6 +284,7 @@
+ #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
+ #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
+ #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
+ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
+ #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -532,18 +532,20 @@ static enum ssb_mitigation __init __ssb_
+ if (mode == SPEC_STORE_BYPASS_DISABLE) {
+ setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
+ /*
+- * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
+- * a completely different MSR and bit dependent on family.
++ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
++ * use a completely different MSR and bit dependent on family.
+ */
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
++ case X86_VENDOR_AMD:
++ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
++ x86_amd_ssb_disable();
++ break;
++ }
+ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+ x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ break;
+- case X86_VENDOR_AMD:
+- x86_amd_ssb_disable();
+- break;
+ }
+ }
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -760,6 +760,12 @@ static void init_speculation_control(str
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ }
++
++ if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
++ set_cpu_cap(c, X86_FEATURE_SSBD);
++ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
++ clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
++ }
+ }
+
+ void get_cpu_cap(struct cpuinfo_x86 *c)
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -367,7 +367,8 @@ static inline int __do_cpuid_ent(struct
+
+ /* cpuid 0x80000008.ebx */
+ const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+- F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD) | F(AMD_SSB_NO);
++ F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
++ F(AMD_SSB_NO);
+
+ /* cpuid 0xC0000001.edx */
+ const u32 kvm_cpuid_C000_0001_edx_x86_features =
+@@ -649,7 +650,12 @@ static inline int __do_cpuid_ent(struct
+ entry->ebx |= F(VIRT_SSBD);
+ entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
+ cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
+- if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
++ /*
++ * The preference is to use SPEC CTRL MSR instead of the
++ * VIRT_SPEC MSR.
++ */
++ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
++ !boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ entry->ebx |= F(VIRT_SSBD);
+ break;
+ }
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3644,7 +3644,8 @@ static int svm_get_msr(struct kvm_vcpu *
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
++ !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
++ !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
+ return 1;
+
+ msr_info->data = svm->spec_ctrl;
+@@ -3749,11 +3750,12 @@ static int svm_set_msr(struct kvm_vcpu *
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr->host_initiated &&
+- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
++ !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
++ !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
++ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
+ return 1;
+
+ svm->spec_ctrl = data;
--- /dev/null
+Subject: x86/bugs: Add AMD's variant of SSB_NO
+From: Konrad Rzeszutek Wilk konrad.wilk@oracle.com
+Date: Fri Jun 1 10:59:19 2018 -0400
+
+From: Konrad Rzeszutek Wilk konrad.wilk@oracle.com
+
+commit 24809860012e0130fbafe536709e08a22b3e959e upstream
+
+The AMD document outlining the SSBD handling
+124441_AMD64_SpeculativeStoreBypassDisable_Whitepaper_final.pdf
+mentions that the CPUID 8000_0008.EBX[26] will mean that the
+speculative store bypass disable is no longer needed.
+
+A copy of this document is available at:
+ https://bugzilla.kernel.org/show_bug.cgi?id=199889
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
+Cc: kvm@vger.kernel.org
+Cc: andrew.cooper3@citrix.com
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Link: https://lkml.kernel.org/r/20180601145921.9500-2-konrad.wilk@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/kernel/cpu/common.c | 3 ++-
+ arch/x86/kvm/cpuid.c | 2 +-
+ 3 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -285,6 +285,7 @@
+ #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
+ #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
++#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
+
+ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
+ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -958,7 +958,8 @@ static void __init cpu_set_bug_bits(stru
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+- !(ia32_cap & ARCH_CAP_SSB_NO))
++ !(ia32_cap & ARCH_CAP_SSB_NO) &&
++ !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+ if (x86_match_cpu(cpu_no_speculation))
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -367,7 +367,7 @@ static inline int __do_cpuid_ent(struct
+
+ /* cpuid 0x80000008.ebx */
+ const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+- F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
++ F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD) | F(AMD_SSB_NO);
+
+ /* cpuid 0xC0000001.edx */
+ const u32 kvm_cpuid_C000_0001_edx_x86_features =
--- /dev/null
+Subject: x86/bugs: Fix the AMD SSBD usage of the SPEC_CTRL MSR
+From: Tom Lendacky thomas.lendacky@amd.com
+Date: Mon Jul 2 16:36:02 2018 -0500
+
+From: Tom Lendacky thomas.lendacky@amd.com
+
+commit 612bc3b3d4be749f73a513a17d9b3ee1330d3487 upstream
+
+On AMD, the presence of the MSR_SPEC_CTRL feature does not imply that the
+SSBD mitigation support should use the SPEC_CTRL MSR. Other features could
+have caused the MSR_SPEC_CTRL feature to be set, while a different SSBD
+mitigation option is in place.
+
+Update the SSBD support to check for the actual SSBD features that will
+use the SPEC_CTRL MSR.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Borislav Petkov <bpetkov@suse.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: 6ac2f49edb1e ("x86/bugs: Add AMD's SPEC_CTRL MSR usage")
+Link: http://lkml.kernel.org/r/20180702213602.29202.33151.stgit@tlendack-t1.amdoffice.net
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -166,7 +166,8 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl,
+ guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
+
+ /* SSBD controlled in MSR_SPEC_CTRL */
+- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
++ static_cpu_has(X86_FEATURE_AMD_SSBD))
+ hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
+
+ if (hostval != guestval) {
+@@ -535,9 +536,10 @@ static enum ssb_mitigation __init __ssb_
+ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
+ * use a completely different MSR and bit dependent on family.
+ */
+- if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
++ if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
++ !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ x86_amd_ssb_disable();
+- else {
++ } else {
+ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+ x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
--- /dev/null
+Subject: x86/bugs: Switch the selection of mitigation from CPU vendor to CPU features
+From: Konrad Rzeszutek Wilk konrad.wilk@oracle.com
+Date: Fri Jun 1 10:59:21 2018 -0400
+
+From: Konrad Rzeszutek Wilk konrad.wilk@oracle.com
+
+commit 108fab4b5c8f12064ef86e02cb0459992affb30f upstream
+
+Both AMD and Intel can have SPEC_CTRL_MSR for SSBD.
+
+However AMD also has two more other ways of doing it - which
+are !SPEC_CTRL MSR ways.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: kvm@vger.kernel.org
+Cc: KarimAllah Ahmed <karahmed@amazon.de>
+Cc: andrew.cooper3@citrix.com
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Link: https://lkml.kernel.org/r/20180601145921.9500-4-konrad.wilk@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -535,17 +535,12 @@ static enum ssb_mitigation __init __ssb_
+ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
+ * use a completely different MSR and bit dependent on family.
+ */
+- switch (boot_cpu_data.x86_vendor) {
+- case X86_VENDOR_INTEL:
+- case X86_VENDOR_AMD:
+- if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
+- x86_amd_ssb_disable();
+- break;
+- }
++ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
++ x86_amd_ssb_disable();
++ else {
+ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+ x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+- break;
+ }
+ }
+
--- /dev/null
+Subject: x86/bugs: Update when to check for the LS_CFG SSBD mitigation
+From: Tom Lendacky thomas.lendacky@amd.com
+Date: Mon Jul 2 16:35:53 2018 -0500
+
+From: Tom Lendacky thomas.lendacky@amd.com
+
+commit 845d382bb15c6e7dc5026c0ff919c5b13fc7e11b upstream
+
+If either the X86_FEATURE_AMD_SSBD or X86_FEATURE_VIRT_SSBD features are
+present, then there is no need to perform the check for the LS_CFG SSBD
+mitigation support.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Borislav Petkov <bpetkov@suse.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20180702213553.29202.21089.stgit@tlendack-t1.amdoffice.net
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -554,7 +554,9 @@ static void bsp_init_amd(struct cpuinfo_
+ nodes_per_socket = ((value >> 3) & 7) + 1;
+ }
+
+- if (c->x86 >= 0x15 && c->x86 <= 0x17) {
++ if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
++ !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
++ c->x86 >= 0x15 && c->x86 <= 0x17) {
+ unsigned int bit;
+
+ switch (c->x86) {
--- /dev/null
+Subject: x86/l1tf: Show actual SMT state
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:40 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 130d6f946f6f2a972ee3ec8540b7243ab99abe97 upstream
+
+Use the now exposed real SMT state, not the SMT sysfs control knob
+state. This reflects the state of the system when the mitigation status is
+queried.
+
+This does not change the warning in the VMX launch code. There the
+dependency on the control knob makes sense because siblings could be
+brought online anytime after launching the VM.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185004.613357354@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -829,13 +829,14 @@ static ssize_t l1tf_show_state(char *buf
+
+ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
+ (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
+- cpu_smt_control == CPU_SMT_ENABLED))
++ sched_smt_active())) {
+ return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
+ l1tf_vmx_states[l1tf_vmx_mitigation]);
++ }
+
+ return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
+ l1tf_vmx_states[l1tf_vmx_mitigation],
+- cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
++ sched_smt_active() ? "vulnerable" : "disabled");
+ }
+ #else
+ static ssize_t l1tf_show_state(char *buf)
--- /dev/null
+Subject: x86/process: Consolidate and simplify switch_to_xtra() code
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:47 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit ff16701a29cba3aafa0bd1656d766813b2d0a811 upstream
+
+Move the conditional invocation of __switch_to_xtra() into an inline
+function so the logic can be shared between 32 and 64 bit.
+
+Remove the handthrough of the TSS pointer and retrieve the pointer directly
+in the bitmap handling function. Use this_cpu_ptr() instead of the
+per_cpu() indirection.
+
+This is a preparatory change so integration of conditional indirect branch
+speculation optimization happens only in one place.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185005.280855518@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/switch_to.h | 3 ---
+ arch/x86/kernel/process.c | 12 +++++++-----
+ arch/x86/kernel/process.h | 24 ++++++++++++++++++++++++
+ arch/x86/kernel/process_32.c | 8 +-------
+ arch/x86/kernel/process_64.c | 10 +++-------
+ 5 files changed, 35 insertions(+), 22 deletions(-)
+
+--- a/arch/x86/include/asm/switch_to.h
++++ b/arch/x86/include/asm/switch_to.h
+@@ -11,9 +11,6 @@ struct task_struct *__switch_to_asm(stru
+
+ __visible struct task_struct *__switch_to(struct task_struct *prev,
+ struct task_struct *next);
+-struct tss_struct;
+-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+- struct tss_struct *tss);
+
+ /* This runs runs on the previous thread's stack. */
+ static inline void prepare_switch_to(struct task_struct *prev,
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -41,6 +41,8 @@
+ #include <asm/prctl.h>
+ #include <asm/spec-ctrl.h>
+
++#include "process.h"
++
+ /*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+ * no more per-task TSS's. The TSS size is kept cacheline-aligned
+@@ -255,11 +257,12 @@ void arch_setup_new_exec(void)
+ enable_cpuid();
+ }
+
+-static inline void switch_to_bitmap(struct tss_struct *tss,
+- struct thread_struct *prev,
++static inline void switch_to_bitmap(struct thread_struct *prev,
+ struct thread_struct *next,
+ unsigned long tifp, unsigned long tifn)
+ {
++ struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
++
+ if (tifn & _TIF_IO_BITMAP) {
+ /*
+ * Copy the relevant range of the IO bitmap.
+@@ -451,8 +454,7 @@ void speculation_ctrl_update(unsigned lo
+ preempt_enable();
+ }
+
+-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+- struct tss_struct *tss)
++void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
+ {
+ struct thread_struct *prev, *next;
+ unsigned long tifp, tifn;
+@@ -462,7 +464,7 @@ void __switch_to_xtra(struct task_struct
+
+ tifn = READ_ONCE(task_thread_info(next_p)->flags);
+ tifp = READ_ONCE(task_thread_info(prev_p)->flags);
+- switch_to_bitmap(tss, prev, next, tifp, tifn);
++ switch_to_bitmap(prev, next, tifp, tifn);
+
+ propagate_user_return_notify(prev_p, next_p);
+
+--- /dev/null
++++ b/arch/x86/kernel/process.h
+@@ -0,0 +1,24 @@
++// SPDX-License-Identifier: GPL-2.0
++//
++// Code shared between 32 and 64 bit
++
++void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
++
++/*
++ * This needs to be inline to optimize for the common case where no extra
++ * work needs to be done.
++ */
++static inline void switch_to_extra(struct task_struct *prev,
++ struct task_struct *next)
++{
++ unsigned long next_tif = task_thread_info(next)->flags;
++ unsigned long prev_tif = task_thread_info(prev)->flags;
++
++ /*
++ * __switch_to_xtra() handles debug registers, i/o bitmaps,
++ * speculation mitigations etc.
++ */
++ if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT ||
++ prev_tif & _TIF_WORK_CTXSW_PREV))
++ __switch_to_xtra(prev, next);
++}
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -234,7 +234,6 @@ __switch_to(struct task_struct *prev_p,
+ struct fpu *prev_fpu = &prev->fpu;
+ struct fpu *next_fpu = &next->fpu;
+ int cpu = smp_processor_id();
+- struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
+
+ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+
+@@ -266,12 +265,7 @@ __switch_to(struct task_struct *prev_p,
+ if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
+ set_iopl_mask(next->iopl);
+
+- /*
+- * Now maybe handle debug registers and/or IO bitmaps
+- */
+- if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
+- task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+- __switch_to_xtra(prev_p, next_p, tss);
++ switch_to_extra(prev_p, next_p);
+
+ /*
+ * Leave lazy mode, flushing any hypercalls made here.
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -59,6 +59,8 @@
+ #include <asm/unistd_32_ia32.h>
+ #endif
+
++#include "process.h"
++
+ __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
+
+ /* Prints also some state that isn't saved in the pt_regs */
+@@ -400,7 +402,6 @@ __switch_to(struct task_struct *prev_p,
+ struct fpu *prev_fpu = &prev->fpu;
+ struct fpu *next_fpu = &next->fpu;
+ int cpu = smp_processor_id();
+- struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
+
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
+ this_cpu_read(irq_count) != -1);
+@@ -467,12 +468,7 @@ __switch_to(struct task_struct *prev_p,
+ /* Reload sp0. */
+ update_sp0(next_p);
+
+- /*
+- * Now maybe reload the debug registers and handle I/O bitmaps
+- */
+- if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
+- task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
+- __switch_to_xtra(prev_p, next_p, tss);
++ __switch_to_xtra(prev_p, next_p);
+
+ #ifdef CONFIG_XEN_PV
+ /*
--- /dev/null
+Subject: x86/retpoline: Make CONFIG_RETPOLINE depend on compiler support
+From: Zhenzhong Duan zhenzhong.duan@oracle.com
+Date: Fri Nov 2 01:45:41 2018 -0700
+
+From: Zhenzhong Duan zhenzhong.duan@oracle.com
+
+commit 4cd24de3a0980bf3100c9dcb08ef65ca7c31af48 upstream
+
+Since retpoline capable compilers are widely available, make
+CONFIG_RETPOLINE hard depend on the compiler capability.
+
+Break the build when CONFIG_RETPOLINE is enabled and the compiler does not
+support it. Emit an error message in that case:
+
+ "arch/x86/Makefile:226: *** You are building kernel with non-retpoline
+ compiler, please update your compiler.. Stop."
+
+[dwmw: Fail the build with non-retpoline compiler]
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Zhenzhong Duan <zhenzhong.duan@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Michal Marek <michal.lkml@markovi.net>
+Cc: <srinivas.eeda@oracle.com>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/cca0cb20-f9e2-4094-840b-fb0f8810cd34@default
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/Kconfig | 4 ----
+ arch/x86/Makefile | 5 +++--
+ arch/x86/include/asm/nospec-branch.h | 10 ++++++----
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ scripts/Makefile.build | 2 --
+ 5 files changed, 10 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -440,10 +440,6 @@ config RETPOLINE
+ branches. Requires a compiler with -mindirect-branch=thunk-extern
+ support for full protection. The kernel may run slower.
+
+- Without compiler support, at least indirect branches in assembler
+- code are eliminated. Since this includes the syscall entry path,
+- it is not entirely pointless.
+-
+ config INTEL_RDT
+ bool "Intel Resource Director Technology support"
+ default n
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -241,9 +241,10 @@ KBUILD_CFLAGS += -fno-asynchronous-unwin
+
+ # Avoid indirect branches in kernel to deal with Spectre
+ ifdef CONFIG_RETPOLINE
+-ifneq ($(RETPOLINE_CFLAGS),)
+- KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
++ifeq ($(RETPOLINE_CFLAGS),)
++ $(error You are building kernel with non-retpoline compiler, please update your compiler.)
+ endif
++ KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
+ endif
+
+ archscripts: scripts_basic
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -162,11 +162,12 @@
+ _ASM_PTR " 999b\n\t" \
+ ".popsection\n\t"
+
+-#if defined(CONFIG_X86_64) && defined(RETPOLINE)
++#ifdef CONFIG_RETPOLINE
++#ifdef CONFIG_X86_64
+
+ /*
+- * Since the inline asm uses the %V modifier which is only in newer GCC,
+- * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
++ * Inline asm uses the %V modifier which is only in newer GCC
++ * which is ensured when CONFIG_RETPOLINE is defined.
+ */
+ # define CALL_NOSPEC \
+ ANNOTATE_NOSPEC_ALTERNATIVE \
+@@ -181,7 +182,7 @@
+ X86_FEATURE_RETPOLINE_AMD)
+ # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
+
+-#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
++#else /* CONFIG_X86_32 */
+ /*
+ * For i386 we use the original ret-equivalent retpoline, because
+ * otherwise we'll run out of registers. We don't care about CET
+@@ -211,6 +212,7 @@
+ X86_FEATURE_RETPOLINE_AMD)
+
+ # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
++#endif
+ #else /* No retpoline for C / inline asm */
+ # define CALL_NOSPEC "call *%[thunk_target]\n"
+ # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -251,7 +251,7 @@ static void __init spec2_print_if_secure
+
+ static inline bool retp_compiler(void)
+ {
+- return __is_defined(RETPOLINE);
++ return __is_defined(CONFIG_RETPOLINE);
+ }
+
+ static inline bool match_option(const char *arg, int arglen, const char *opt)
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -272,10 +272,8 @@ else
+ objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
+ endif
+ ifdef CONFIG_RETPOLINE
+-ifneq ($(RETPOLINE_CFLAGS),)
+ objtool_args += --retpoline
+ endif
+-endif
+
+
+ ifdef CONFIG_MODVERSIONS
--- /dev/null
+Subject: x86/retpoline: Remove minimal retpoline support
+From: Zhenzhong Duan zhenzhong.duan@oracle.com
+Date: Fri Nov 2 01:45:41 2018 -0700
+
+From: Zhenzhong Duan zhenzhong.duan@oracle.com
+
+commit ef014aae8f1cd2793e4e014bbb102bed53f852b7 upstream
+
+Now that CONFIG_RETPOLINE hard depends on compiler support, there is no
+reason to keep the minimal retpoline support around which only provided
+basic protection in the assembly files.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Zhenzhong Duan <zhenzhong.duan@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: <srinivas.eeda@oracle.com>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/f06f0a89-5587-45db-8ed2-0a9d6638d5c0@default
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 3 ---
+ arch/x86/kernel/cpu/bugs.c | 13 ++-----------
+ 2 files changed, 2 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -221,11 +221,8 @@
+ /* The Spectre V2 mitigation variants */
+ enum spectre_v2_mitigation {
+ SPECTRE_V2_NONE,
+- SPECTRE_V2_RETPOLINE_MINIMAL,
+- SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
+ SPECTRE_V2_RETPOLINE_GENERIC,
+ SPECTRE_V2_RETPOLINE_AMD,
+- SPECTRE_V2_IBRS,
+ SPECTRE_V2_IBRS_ENHANCED,
+ };
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -134,8 +134,6 @@ enum spectre_v2_mitigation_cmd {
+
+ static const char *spectre_v2_strings[] = {
+ [SPECTRE_V2_NONE] = "Vulnerable",
+- [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
+- [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
+ [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
+ [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
+ [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
+@@ -249,11 +247,6 @@ static void __init spec2_print_if_secure
+ pr_info("%s selected on command line.\n", reason);
+ }
+
+-static inline bool retp_compiler(void)
+-{
+- return __is_defined(CONFIG_RETPOLINE);
+-}
+-
+ static inline bool match_option(const char *arg, int arglen, const char *opt)
+ {
+ int len = strlen(opt);
+@@ -414,14 +407,12 @@ retpoline_auto:
+ pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
+ goto retpoline_generic;
+ }
+- mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
+- SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
++ mode = SPECTRE_V2_RETPOLINE_AMD;
+ setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
+ setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+ } else {
+ retpoline_generic:
+- mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
+- SPECTRE_V2_RETPOLINE_MINIMAL;
++ mode = SPECTRE_V2_RETPOLINE_GENERIC;
+ setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+ }
+
--- /dev/null
+Subject: x86/speculataion: Mark command line parser data __initdata
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:43 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 30ba72a990f5096ae08f284de17986461efcc408 upstream
+
+No point to keep that around.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185004.893886356@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -247,7 +247,7 @@ static const struct {
+ const char *option;
+ enum spectre_v2_mitigation_cmd cmd;
+ bool secure;
+-} mitigation_options[] = {
++} mitigation_options[] __initdata = {
+ { "off", SPECTRE_V2_CMD_NONE, false },
+ { "on", SPECTRE_V2_CMD_FORCE, true },
+ { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
+@@ -483,7 +483,7 @@ static const char * const ssb_strings[]
+ static const struct {
+ const char *option;
+ enum ssb_mitigation_cmd cmd;
+-} ssb_mitigation_options[] = {
++} ssb_mitigation_options[] __initdata = {
+ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
+ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
+ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
--- /dev/null
+Subject: x86/speculation: Add RETPOLINE_AMD support to the inline asm CALL_NOSPEC variant
+From: Zhenzhong Duan zhenzhong.duan@oracle.com
+Date: Tue Sep 18 07:45:00 2018 -0700
+
+From: Zhenzhong Duan zhenzhong.duan@oracle.com
+
+commit 0cbb76d6285794f30953bfa3ab831714b59dd700 upstream
+
+..so that they match their asm counterpart.
+
+Add the missing ANNOTATE_NOSPEC_ALTERNATIVE in CALL_NOSPEC, while at it.
+
+Signed-off-by: Zhenzhong Duan <zhenzhong.duan@oracle.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Wang YanQing <udknight@gmail.com>
+Cc: dhaval.giani@oracle.com
+Cc: srinivas.eeda@oracle.com
+Link: http://lkml.kernel.org/r/c3975665-173e-4d70-8dee-06c926ac26ee@default
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -170,11 +170,15 @@
+ */
+ # define CALL_NOSPEC \
+ ANNOTATE_NOSPEC_ALTERNATIVE \
+- ALTERNATIVE( \
++ ALTERNATIVE_2( \
+ ANNOTATE_RETPOLINE_SAFE \
+ "call *%[thunk_target]\n", \
+ "call __x86_indirect_thunk_%V[thunk_target]\n", \
+- X86_FEATURE_RETPOLINE)
++ X86_FEATURE_RETPOLINE, \
++ "lfence;\n" \
++ ANNOTATE_RETPOLINE_SAFE \
++ "call *%[thunk_target]\n", \
++ X86_FEATURE_RETPOLINE_AMD)
+ # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
+
+ #elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
+@@ -184,7 +188,8 @@
+ * here, anyway.
+ */
+ # define CALL_NOSPEC \
+- ALTERNATIVE( \
++ ANNOTATE_NOSPEC_ALTERNATIVE \
++ ALTERNATIVE_2( \
+ ANNOTATE_RETPOLINE_SAFE \
+ "call *%[thunk_target]\n", \
+ " jmp 904f;\n" \
+@@ -199,7 +204,11 @@
+ " ret;\n" \
+ " .align 16\n" \
+ "904: call 901b;\n", \
+- X86_FEATURE_RETPOLINE)
++ X86_FEATURE_RETPOLINE, \
++ "lfence;\n" \
++ ANNOTATE_RETPOLINE_SAFE \
++ "call *%[thunk_target]\n", \
++ X86_FEATURE_RETPOLINE_AMD)
+
+ # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+ #else /* No retpoline for C / inline asm */
--- /dev/null
+Subject: x86/speculation: Add command line control for indirect branch speculation
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:45 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit fa1202ef224391b6f5b26cdd44cc50495e8fab54 upstream
+
+Add command line control for user space indirect branch speculation
+mitigations. The new option is: spectre_v2_user=
+
+The initial options are:
+
+ - on: Unconditionally enabled
+ - off: Unconditionally disabled
+ -auto: Kernel selects mitigation (default off for now)
+
+When the spectre_v2= command line argument is either 'on' or 'off' this
+implies that the application to application control follows that state even
+if a contradicting spectre_v2_user= argument is supplied.
+
+Originally-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185005.082720373@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 32 +++++
+ arch/x86/include/asm/nospec-branch.h | 10 +
+ arch/x86/kernel/cpu/bugs.c | 133 ++++++++++++++++++++----
+ 3 files changed, 156 insertions(+), 19 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3994,9 +3994,13 @@
+
+ spectre_v2= [X86] Control mitigation of Spectre variant 2
+ (indirect branch speculation) vulnerability.
++ The default operation protects the kernel from
++ user space attacks.
+
+- on - unconditionally enable
+- off - unconditionally disable
++ on - unconditionally enable, implies
++ spectre_v2_user=on
++ off - unconditionally disable, implies
++ spectre_v2_user=off
+ auto - kernel detects whether your CPU model is
+ vulnerable
+
+@@ -4006,6 +4010,12 @@
+ CONFIG_RETPOLINE configuration option, and the
+ compiler with which the kernel was built.
+
++ Selecting 'on' will also enable the mitigation
++ against user space to user space task attacks.
++
++ Selecting 'off' will disable both the kernel and
++ the user space protections.
++
+ Specific mitigations can also be selected manually:
+
+ retpoline - replace indirect branches
+@@ -4015,6 +4025,24 @@
+ Not specifying this option is equivalent to
+ spectre_v2=auto.
+
++ spectre_v2_user=
++ [X86] Control mitigation of Spectre variant 2
++ (indirect branch speculation) vulnerability between
++ user space tasks
++
++ on - Unconditionally enable mitigations. Is
++ enforced by spectre_v2=on
++
++ off - Unconditionally disable mitigations. Is
++ enforced by spectre_v2=off
++
++ auto - Kernel selects the mitigation depending on
++ the available CPU features and vulnerability.
++ Default is off.
++
++ Not specifying this option is equivalent to
++ spectre_v2_user=auto.
++
+ spec_store_bypass_disable=
+ [HW] Control Speculative Store Bypass (SSB) Disable mitigation
+ (Speculative Store Bypass vulnerability)
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -3,6 +3,8 @@
+ #ifndef _ASM_X86_NOSPEC_BRANCH_H_
+ #define _ASM_X86_NOSPEC_BRANCH_H_
+
++#include <linux/static_key.h>
++
+ #include <asm/alternative.h>
+ #include <asm/alternative-asm.h>
+ #include <asm/cpufeatures.h>
+@@ -226,6 +228,12 @@ enum spectre_v2_mitigation {
+ SPECTRE_V2_IBRS_ENHANCED,
+ };
+
++/* The indirect branch speculation control variants */
++enum spectre_v2_user_mitigation {
++ SPECTRE_V2_USER_NONE,
++ SPECTRE_V2_USER_STRICT,
++};
++
+ /* The Speculative Store Bypass disable variants */
+ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_NONE,
+@@ -303,6 +311,8 @@ do { \
+ preempt_enable(); \
+ } while (0)
+
++DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
++
+ #endif /* __ASSEMBLY__ */
+
+ /*
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -53,6 +53,9 @@ static u64 __ro_after_init x86_spec_ctrl
+ u64 __ro_after_init x86_amd_ls_cfg_base;
+ u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
+
++/* Control conditional STIPB in switch_to() */
++DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
++
+ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+@@ -198,6 +201,9 @@ static void x86_amd_ssb_disable(void)
+ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+ SPECTRE_V2_NONE;
+
++static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
++ SPECTRE_V2_USER_NONE;
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+@@ -236,6 +242,104 @@ enum spectre_v2_mitigation_cmd {
+ SPECTRE_V2_CMD_RETPOLINE_AMD,
+ };
+
++enum spectre_v2_user_cmd {
++ SPECTRE_V2_USER_CMD_NONE,
++ SPECTRE_V2_USER_CMD_AUTO,
++ SPECTRE_V2_USER_CMD_FORCE,
++};
++
++static const char * const spectre_v2_user_strings[] = {
++ [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
++ [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
++};
++
++static const struct {
++ const char *option;
++ enum spectre_v2_user_cmd cmd;
++ bool secure;
++} v2_user_options[] __initdata = {
++ { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
++ { "off", SPECTRE_V2_USER_CMD_NONE, false },
++ { "on", SPECTRE_V2_USER_CMD_FORCE, true },
++};
++
++static void __init spec_v2_user_print_cond(const char *reason, bool secure)
++{
++ if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
++ pr_info("spectre_v2_user=%s forced on command line.\n", reason);
++}
++
++static enum spectre_v2_user_cmd __init
++spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
++{
++ char arg[20];
++ int ret, i;
++
++ switch (v2_cmd) {
++ case SPECTRE_V2_CMD_NONE:
++ return SPECTRE_V2_USER_CMD_NONE;
++ case SPECTRE_V2_CMD_FORCE:
++ return SPECTRE_V2_USER_CMD_FORCE;
++ default:
++ break;
++ }
++
++ ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
++ arg, sizeof(arg));
++ if (ret < 0)
++ return SPECTRE_V2_USER_CMD_AUTO;
++
++ for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
++ if (match_option(arg, ret, v2_user_options[i].option)) {
++ spec_v2_user_print_cond(v2_user_options[i].option,
++ v2_user_options[i].secure);
++ return v2_user_options[i].cmd;
++ }
++ }
++
++ pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
++ return SPECTRE_V2_USER_CMD_AUTO;
++}
++
++static void __init
++spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
++{
++ enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
++ bool smt_possible = IS_ENABLED(CONFIG_SMP);
++
++ if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
++ return;
++
++ if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
++ cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
++ smt_possible = false;
++
++ switch (spectre_v2_parse_user_cmdline(v2_cmd)) {
++ case SPECTRE_V2_USER_CMD_AUTO:
++ case SPECTRE_V2_USER_CMD_NONE:
++ goto set_mode;
++ case SPECTRE_V2_USER_CMD_FORCE:
++ mode = SPECTRE_V2_USER_STRICT;
++ break;
++ }
++
++ /* Initialize Indirect Branch Prediction Barrier */
++ if (boot_cpu_has(X86_FEATURE_IBPB)) {
++ setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
++ pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
++ }
++
++ /* If enhanced IBRS is enabled no STIPB required */
++ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
++ return;
++
++set_mode:
++ spectre_v2_user = mode;
++ /* Only print the STIBP mode when SMT possible */
++ if (smt_possible)
++ pr_info("%s\n", spectre_v2_user_strings[mode]);
++}
++
+ static const char * const spectre_v2_strings[] = {
+ [SPECTRE_V2_NONE] = "Vulnerable",
+ [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
+@@ -382,12 +486,6 @@ specv2_set_mode:
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+ pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
+
+- /* Initialize Indirect Branch Prediction Barrier if supported */
+- if (boot_cpu_has(X86_FEATURE_IBPB)) {
+- setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+- pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
+- }
+-
+ /*
+ * Retpoline means the kernel is safe because it has no indirect
+ * branches. Enhanced IBRS protects firmware too, so, enable restricted
+@@ -404,23 +502,21 @@ specv2_set_mode:
+ pr_info("Enabling Restricted Speculation for firmware calls\n");
+ }
+
++ /* Set up IBPB and STIBP depending on the general spectre V2 command */
++ spectre_v2_user_select_mitigation(cmd);
++
+ /* Enable STIBP if appropriate */
+ arch_smt_update();
+ }
+
+ static bool stibp_needed(void)
+ {
+- if (spectre_v2_enabled == SPECTRE_V2_NONE)
+- return false;
+-
+ /* Enhanced IBRS makes using STIBP unnecessary. */
+ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ return false;
+
+- if (!boot_cpu_has(X86_FEATURE_STIBP))
+- return false;
+-
+- return true;
++ /* Check for strict user mitigation mode */
++ return spectre_v2_user == SPECTRE_V2_USER_STRICT;
+ }
+
+ static void update_stibp_msr(void *info)
+@@ -841,10 +937,13 @@ static char *stibp_state(void)
+ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ return "";
+
+- if (x86_spec_ctrl_base & SPEC_CTRL_STIBP)
+- return ", STIBP";
+- else
+- return "";
++ switch (spectre_v2_user) {
++ case SPECTRE_V2_USER_NONE:
++ return ", STIBP: disabled";
++ case SPECTRE_V2_USER_STRICT:
++ return ", STIBP: forced";
++ }
++ return "";
+ }
+
+ static char *ibpb_state(void)
--- /dev/null
+Subject: x86/speculation: Add prctl() control for indirect branch speculation
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:53 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream
+
+Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
+PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
+indirect branch speculation via STIBP and IBPB.
+
+Invocations:
+ Check indirect branch speculation status with
+ - prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
+
+ Enable indirect branch speculation with
+ - prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
+
+ Disable indirect branch speculation with
+ - prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
+
+ Force disable indirect branch speculation with
+ - prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
+
+See Documentation/userspace-api/spec_ctrl.rst.
+
+Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/userspace-api/spec_ctrl.rst | 9 ++++
+ arch/x86/include/asm/nospec-branch.h | 1
+ arch/x86/kernel/cpu/bugs.c | 67 ++++++++++++++++++++++++++++++
+ arch/x86/kernel/process.c | 5 ++
+ include/linux/sched.h | 9 ++++
+ include/uapi/linux/prctl.h | 1
+ 6 files changed, 92 insertions(+)
+
+--- a/Documentation/userspace-api/spec_ctrl.rst
++++ b/Documentation/userspace-api/spec_ctrl.rst
+@@ -92,3 +92,12 @@ Speculation misfeature controls
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
++
++- PR_SPEC_INDIR_BRANCH: Indirect Branch Speculation in User Processes
++ (Mitigate Spectre V2 style attacks against user processes)
++
++ Invocations:
++ * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -232,6 +232,7 @@ enum spectre_v2_mitigation {
+ enum spectre_v2_user_mitigation {
+ SPECTRE_V2_USER_NONE,
+ SPECTRE_V2_USER_STRICT,
++ SPECTRE_V2_USER_PRCTL,
+ };
+
+ /* The Speculative Store Bypass disable variants */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -563,6 +563,8 @@ void arch_smt_update(void)
+ case SPECTRE_V2_USER_STRICT:
+ update_stibp_strict();
+ break;
++ case SPECTRE_V2_USER_PRCTL:
++ break;
+ }
+
+ mutex_unlock(&spec_ctrl_mutex);
+@@ -749,12 +751,50 @@ static int ssb_prctl_set(struct task_str
+ return 0;
+ }
+
++static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
++{
++ switch (ctrl) {
++ case PR_SPEC_ENABLE:
++ if (spectre_v2_user == SPECTRE_V2_USER_NONE)
++ return 0;
++ /*
++ * Indirect branch speculation is always disabled in strict
++ * mode.
++ */
++ if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
++ return -EPERM;
++ task_clear_spec_ib_disable(task);
++ task_update_spec_tif(task);
++ break;
++ case PR_SPEC_DISABLE:
++ case PR_SPEC_FORCE_DISABLE:
++ /*
++ * Indirect branch speculation is always allowed when
++ * mitigation is force disabled.
++ */
++ if (spectre_v2_user == SPECTRE_V2_USER_NONE)
++ return -EPERM;
++ if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
++ return 0;
++ task_set_spec_ib_disable(task);
++ if (ctrl == PR_SPEC_FORCE_DISABLE)
++ task_set_spec_ib_force_disable(task);
++ task_update_spec_tif(task);
++ break;
++ default:
++ return -ERANGE;
++ }
++ return 0;
++}
++
+ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl)
+ {
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssb_prctl_set(task, ctrl);
++ case PR_SPEC_INDIRECT_BRANCH:
++ return ib_prctl_set(task, ctrl);
+ default:
+ return -ENODEV;
+ }
+@@ -787,11 +827,34 @@ static int ssb_prctl_get(struct task_str
+ }
+ }
+
++static int ib_prctl_get(struct task_struct *task)
++{
++ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
++ return PR_SPEC_NOT_AFFECTED;
++
++ switch (spectre_v2_user) {
++ case SPECTRE_V2_USER_NONE:
++ return PR_SPEC_ENABLE;
++ case SPECTRE_V2_USER_PRCTL:
++ if (task_spec_ib_force_disable(task))
++ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
++ if (task_spec_ib_disable(task))
++ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
++ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
++ case SPECTRE_V2_USER_STRICT:
++ return PR_SPEC_DISABLE;
++ default:
++ return PR_SPEC_NOT_AFFECTED;
++ }
++}
++
+ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+ {
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssb_prctl_get(task);
++ case PR_SPEC_INDIRECT_BRANCH:
++ return ib_prctl_get(task);
+ default:
+ return -ENODEV;
+ }
+@@ -971,6 +1034,8 @@ static char *stibp_state(void)
+ return ", STIBP: disabled";
+ case SPECTRE_V2_USER_STRICT:
+ return ", STIBP: forced";
++ case SPECTRE_V2_USER_PRCTL:
++ return "";
+ }
+ return "";
+ }
+@@ -983,6 +1048,8 @@ static char *ibpb_state(void)
+ return ", IBPB: disabled";
+ case SPECTRE_V2_USER_STRICT:
+ return ", IBPB: always-on";
++ case SPECTRE_V2_USER_PRCTL:
++ return "";
+ }
+ }
+ return "";
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -453,6 +453,11 @@ static unsigned long speculation_ctrl_up
+ set_tsk_thread_flag(tsk, TIF_SSBD);
+ else
+ clear_tsk_thread_flag(tsk, TIF_SSBD);
++
++ if (task_spec_ib_disable(tsk))
++ set_tsk_thread_flag(tsk, TIF_SPEC_IB);
++ else
++ clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
+ }
+ /* Return the updated threadinfo flags*/
+ return task_thread_info(tsk)->flags;
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1405,6 +1405,8 @@ static inline bool is_percpu_thread(void
+ #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
+ #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
+ #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
++#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
++#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
+
+ #define TASK_PFA_TEST(name, func) \
+ static inline bool task_##func(struct task_struct *p) \
+@@ -1436,6 +1438,13 @@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ss
+ TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+ TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+
++TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
++TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
++TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
++
++TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
++TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
++
+ static inline void
+ current_restore_flags(unsigned long orig_flags, unsigned long flags)
+ {
+--- a/include/uapi/linux/prctl.h
++++ b/include/uapi/linux/prctl.h
+@@ -203,6 +203,7 @@ struct prctl_mm_map {
+ #define PR_SET_SPECULATION_CTRL 53
+ /* Speculation control variants */
+ # define PR_SPEC_STORE_BYPASS 0
++# define PR_SPEC_INDIRECT_BRANCH 1
+ /* Return and control values for PR_SET/GET_SPECULATION_CTRL */
+ # define PR_SPEC_NOT_AFFECTED 0
+ # define PR_SPEC_PRCTL (1UL << 0)
--- /dev/null
+Subject: x86/speculation: Add seccomp Spectre v2 user space protection mode
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:55 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 6b3e64c237c072797a9ec918654a60e3a46488e2 upstream
+
+If 'prctl' mode of user space protection from spectre v2 is selected
+on the kernel command-line, STIBP and IBPB are applied on tasks which
+restrict their indirect branch speculation via prctl.
+
+SECCOMP enables the SSBD mitigation for sandboxed tasks already, so it
+makes sense to prevent spectre v2 user space to user space attacks as
+well.
+
+The Intel mitigation guide documents how STIPB works:
+
+ Setting bit 1 (STIBP) of the IA32_SPEC_CTRL MSR on a logical processor
+ prevents the predicted targets of indirect branches on any logical
+ processor of that core from being controlled by software that executes
+ (or executed previously) on another logical processor of the same core.
+
+Ergo setting STIBP protects the task itself from being attacked from a task
+running on a different hyper-thread and protects the tasks running on
+different hyper-threads from being attacked.
+
+While the document suggests that the branch predictors are shielded between
+the logical processors, the observed performance regressions suggest that
+STIBP simply disables the branch predictor more or less completely. Of
+course the document wording is vague, but the fact that there is also no
+requirement for issuing IBPB when STIBP is used points clearly in that
+direction. The kernel still issues IBPB even when STIBP is used until Intel
+clarifies the whole mechanism.
+
+IBPB is issued when the task switches out, so malicious sandbox code cannot
+mistrain the branch predictor for the next user space task on the same
+logical processor.
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185006.051663132@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 9 ++++++++-
+ arch/x86/include/asm/nospec-branch.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 17 ++++++++++++++++-
+ 3 files changed, 25 insertions(+), 2 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -4041,9 +4041,16 @@
+ per thread. The mitigation control state
+ is inherited on fork.
+
++ seccomp
++ - Same as "prctl" above, but all seccomp
++ threads will enable the mitigation unless
++ they explicitly opt out.
++
+ auto - Kernel selects the mitigation depending on
+ the available CPU features and vulnerability.
+- Default is prctl.
++
++ Default mitigation:
++ If CONFIG_SECCOMP=y then "seccomp", otherwise "prctl"
+
+ Not specifying this option is equivalent to
+ spectre_v2_user=auto.
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -233,6 +233,7 @@ enum spectre_v2_user_mitigation {
+ SPECTRE_V2_USER_NONE,
+ SPECTRE_V2_USER_STRICT,
+ SPECTRE_V2_USER_PRCTL,
++ SPECTRE_V2_USER_SECCOMP,
+ };
+
+ /* The Speculative Store Bypass disable variants */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -255,12 +255,14 @@ enum spectre_v2_user_cmd {
+ SPECTRE_V2_USER_CMD_AUTO,
+ SPECTRE_V2_USER_CMD_FORCE,
+ SPECTRE_V2_USER_CMD_PRCTL,
++ SPECTRE_V2_USER_CMD_SECCOMP,
+ };
+
+ static const char * const spectre_v2_user_strings[] = {
+ [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
+ [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
+ [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
++ [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
+ };
+
+ static const struct {
+@@ -272,6 +274,7 @@ static const struct {
+ { "off", SPECTRE_V2_USER_CMD_NONE, false },
+ { "on", SPECTRE_V2_USER_CMD_FORCE, true },
+ { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
++ { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
+ };
+
+ static void __init spec_v2_user_print_cond(const char *reason, bool secure)
+@@ -331,10 +334,16 @@ spectre_v2_user_select_mitigation(enum s
+ case SPECTRE_V2_USER_CMD_FORCE:
+ mode = SPECTRE_V2_USER_STRICT;
+ break;
+- case SPECTRE_V2_USER_CMD_AUTO:
+ case SPECTRE_V2_USER_CMD_PRCTL:
+ mode = SPECTRE_V2_USER_PRCTL;
+ break;
++ case SPECTRE_V2_USER_CMD_AUTO:
++ case SPECTRE_V2_USER_CMD_SECCOMP:
++ if (IS_ENABLED(CONFIG_SECCOMP))
++ mode = SPECTRE_V2_USER_SECCOMP;
++ else
++ mode = SPECTRE_V2_USER_PRCTL;
++ break;
+ }
+
+ /* Initialize Indirect Branch Prediction Barrier */
+@@ -346,6 +355,7 @@ spectre_v2_user_select_mitigation(enum s
+ static_branch_enable(&switch_mm_always_ibpb);
+ break;
+ case SPECTRE_V2_USER_PRCTL:
++ case SPECTRE_V2_USER_SECCOMP:
+ static_branch_enable(&switch_mm_cond_ibpb);
+ break;
+ default:
+@@ -588,6 +598,7 @@ void arch_smt_update(void)
+ update_stibp_strict();
+ break;
+ case SPECTRE_V2_USER_PRCTL:
++ case SPECTRE_V2_USER_SECCOMP:
+ update_indir_branch_cond();
+ break;
+ }
+@@ -830,6 +841,8 @@ void arch_seccomp_spec_mitigate(struct t
+ {
+ if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
+ ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
++ if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
++ ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+ }
+ #endif
+
+@@ -861,6 +874,7 @@ static int ib_prctl_get(struct task_stru
+ case SPECTRE_V2_USER_NONE:
+ return PR_SPEC_ENABLE;
+ case SPECTRE_V2_USER_PRCTL:
++ case SPECTRE_V2_USER_SECCOMP:
+ if (task_spec_ib_force_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+ if (task_spec_ib_disable(task))
+@@ -1060,6 +1074,7 @@ static char *stibp_state(void)
+ case SPECTRE_V2_USER_STRICT:
+ return ", STIBP: forced";
+ case SPECTRE_V2_USER_PRCTL:
++ case SPECTRE_V2_USER_SECCOMP:
+ if (static_key_enabled(&switch_to_cond_stibp))
+ return ", STIBP: conditional";
+ }
--- /dev/null
+Subject: x86/speculation: Apply IBPB more strictly to avoid cross-process data leak
+From: Jiri Kosina jkosina@suse.cz
+Date: Tue Sep 25 14:38:18 2018 +0200
+
+From: Jiri Kosina jkosina@suse.cz
+
+commit dbfe2953f63c640463c630746cd5d9de8b2f63ae upstream
+
+Currently, IBPB is only issued in cases when switching into a non-dumpable
+process, the rationale being to protect such 'important and security
+sensitive' processess (such as GPG) from data leaking into a different
+userspace process via spectre v2.
+
+This is however completely insufficient to provide proper userspace-to-userpace
+spectrev2 protection, as any process can poison branch buffers before being
+scheduled out, and the newly scheduled process immediately becomes spectrev2
+victim.
+
+In order to minimize the performance impact (for usecases that do require
+spectrev2 protection), issue the barrier only in cases when switching between
+processess where the victim can't be ptraced by the potential attacker (as in
+such cases, the attacker doesn't have to bother with branch buffers at all).
+
+[ tglx: Split up PTRACE_MODE_NOACCESS_CHK into PTRACE_MODE_SCHED and
+ PTRACE_MODE_IBPB to be able to do ptrace() context tracking reasonably
+ fine-grained ]
+
+Fixes: 18bf3c3ea8 ("x86/speculation: Use Indirect Branch Prediction Barrier in context switch")
+Originally-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: "WoodhouseDavid" <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: "SchauflerCasey" <casey.schaufler@intel.com>
+Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1809251437340.15880@cbobk.fhfr.pm
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/tlb.c | 31 ++++++++++++++++++++-----------
+ include/linux/ptrace.h | 21 +++++++++++++++++++--
+ kernel/ptrace.c | 10 ++++++++++
+ 3 files changed, 49 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -7,6 +7,7 @@
+ #include <linux/export.h>
+ #include <linux/cpu.h>
+ #include <linux/debugfs.h>
++#include <linux/ptrace.h>
+
+ #include <asm/tlbflush.h>
+ #include <asm/mmu_context.h>
+@@ -180,6 +181,19 @@ static void sync_current_stack_to_mm(str
+ }
+ }
+
++static bool ibpb_needed(struct task_struct *tsk, u64 last_ctx_id)
++{
++ /*
++ * Check if the current (previous) task has access to the memory
++ * of the @tsk (next) task. If access is denied, make sure to
++ * issue a IBPB to stop user->user Spectre-v2 attacks.
++ *
++ * Note: __ptrace_may_access() returns 0 or -ERRNO.
++ */
++ return (tsk && tsk->mm && tsk->mm->context.ctx_id != last_ctx_id &&
++ ptrace_may_access_sched(tsk, PTRACE_MODE_SPEC_IBPB));
++}
++
+ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+ {
+@@ -256,18 +270,13 @@ void switch_mm_irqs_off(struct mm_struct
+ * one process from doing Spectre-v2 attacks on another.
+ *
+ * As an optimization, flush indirect branches only when
+- * switching into processes that disable dumping. This
+- * protects high value processes like gpg, without having
+- * too high performance overhead. IBPB is *expensive*!
+- *
+- * This will not flush branches when switching into kernel
+- * threads. It will also not flush if we switch to idle
+- * thread and back to the same process. It will flush if we
+- * switch to a different non-dumpable process.
++ * switching into a processes that can't be ptrace by the
++ * current one (as in such case, attacker has much more
++ * convenient way how to tamper with the next process than
++ * branch buffer poisoning).
+ */
+- if (tsk && tsk->mm &&
+- tsk->mm->context.ctx_id != last_ctx_id &&
+- get_dumpable(tsk->mm) != SUID_DUMP_USER)
++ if (static_cpu_has(X86_FEATURE_USE_IBPB) &&
++ ibpb_needed(tsk, last_ctx_id))
+ indirect_branch_prediction_barrier();
+
+ if (IS_ENABLED(CONFIG_VMAP_STACK)) {
+--- a/include/linux/ptrace.h
++++ b/include/linux/ptrace.h
+@@ -62,14 +62,17 @@ extern void exit_ptrace(struct task_stru
+ #define PTRACE_MODE_READ 0x01
+ #define PTRACE_MODE_ATTACH 0x02
+ #define PTRACE_MODE_NOAUDIT 0x04
+-#define PTRACE_MODE_FSCREDS 0x08
+-#define PTRACE_MODE_REALCREDS 0x10
++#define PTRACE_MODE_FSCREDS 0x08
++#define PTRACE_MODE_REALCREDS 0x10
++#define PTRACE_MODE_SCHED 0x20
++#define PTRACE_MODE_IBPB 0x40
+
+ /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
+ #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
+ #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
+ #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
+ #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
++#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB)
+
+ /**
+ * ptrace_may_access - check whether the caller is permitted to access
+@@ -87,6 +90,20 @@ extern void exit_ptrace(struct task_stru
+ */
+ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
+
++/**
++ * ptrace_may_access - check whether the caller is permitted to access
++ * a target task.
++ * @task: target task
++ * @mode: selects type of access and caller credentials
++ *
++ * Returns true on success, false on denial.
++ *
++ * Similar to ptrace_may_access(). Only to be called from context switch
++ * code. Does not call into audit and the regular LSM hooks due to locking
++ * constraints.
++ */
++extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode);
++
+ static inline int ptrace_reparented(struct task_struct *child)
+ {
+ return !same_thread_group(child->real_parent, child->parent);
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -261,6 +261,9 @@ static int ptrace_check_attach(struct ta
+
+ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
+ {
++ if (mode & PTRACE_MODE_SCHED)
++ return false;
++
+ if (mode & PTRACE_MODE_NOAUDIT)
+ return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
+ else
+@@ -328,9 +331,16 @@ ok:
+ !ptrace_has_cap(mm->user_ns, mode)))
+ return -EPERM;
+
++ if (mode & PTRACE_MODE_SCHED)
++ return 0;
+ return security_ptrace_access_check(task, mode);
+ }
+
++bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode)
++{
++ return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED);
++}
++
+ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
+ {
+ int err;
--- /dev/null
+Subject: x86/speculation: Avoid __switch_to_xtra() calls
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:48 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 5635d99953f04b550738f6f4c1c532667c3fd872 upstream
+
+The TIF_SPEC_IB bit does not need to be evaluated in the decision to invoke
+__switch_to_xtra() when:
+
+ - CONFIG_SMP is disabled
+
+ - The conditional STIPB mode is disabled
+
+The TIF_SPEC_IB bit still controls IBPB in both cases so the TIF work mask
+checks might invoke __switch_to_xtra() for nothing if TIF_SPEC_IB is the
+only set bit in the work masks.
+
+Optimize it out by masking the bit at compile time for CONFIG_SMP=n and at
+run time when the static key controlling the conditional STIBP mode is
+disabled.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185005.374062201@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/thread_info.h | 13 +++++++++++--
+ arch/x86/kernel/process.h | 15 +++++++++++++++
+ 2 files changed, 26 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -149,9 +149,18 @@ struct thread_info {
+ _TIF_FSCHECK)
+
+ /* flags to check in __switch_to() */
+-#define _TIF_WORK_CTXSW \
++#define _TIF_WORK_CTXSW_BASE \
+ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
+- _TIF_SSBD|_TIF_SPEC_IB)
++ _TIF_SSBD)
++
++/*
++ * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
++ */
++#ifdef CONFIG_SMP
++# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB)
++#else
++# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE)
++#endif
+
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+--- a/arch/x86/kernel/process.h
++++ b/arch/x86/kernel/process.h
+@@ -2,6 +2,8 @@
+ //
+ // Code shared between 32 and 64 bit
+
++#include <asm/spec-ctrl.h>
++
+ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
+
+ /*
+@@ -14,6 +16,19 @@ static inline void switch_to_extra(struc
+ unsigned long next_tif = task_thread_info(next)->flags;
+ unsigned long prev_tif = task_thread_info(prev)->flags;
+
++ if (IS_ENABLED(CONFIG_SMP)) {
++ /*
++ * Avoid __switch_to_xtra() invocation when conditional
++ * STIPB is disabled and the only different bit is
++ * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not
++ * in the TIF_WORK_CTXSW masks.
++ */
++ if (!static_branch_likely(&switch_to_cond_stibp)) {
++ prev_tif &= ~_TIF_SPEC_IB;
++ next_tif &= ~_TIF_SPEC_IB;
++ }
++ }
++
+ /*
+ * __switch_to_xtra() handles debug registers, i/o bitmaps,
+ * speculation mitigations etc.
--- /dev/null
+Subject: x86/speculation: Clean up spectre_v2_parse_cmdline()
+From: Tim Chen tim.c.chen@linux.intel.com
+Date: Sun Nov 25 19:33:30 2018 +0100
+
+From: Tim Chen tim.c.chen@linux.intel.com
+
+commit 24848509aa55eac39d524b587b051f4e86df3c12 upstream
+
+Remove the unnecessary 'else' statement in spectre_v2_parse_cmdline()
+to save an indentation level.
+
+Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185003.688010903@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 27 +++++++++++++--------------
+ 1 file changed, 13 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -275,22 +275,21 @@ static enum spectre_v2_mitigation_cmd __
+
+ if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+ return SPECTRE_V2_CMD_NONE;
+- else {
+- ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
+- if (ret < 0)
+- return SPECTRE_V2_CMD_AUTO;
+
+- for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
+- if (!match_option(arg, ret, mitigation_options[i].option))
+- continue;
+- cmd = mitigation_options[i].cmd;
+- break;
+- }
++ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
++ if (ret < 0)
++ return SPECTRE_V2_CMD_AUTO;
+
+- if (i >= ARRAY_SIZE(mitigation_options)) {
+- pr_err("unknown option (%s). Switching to AUTO select\n", arg);
+- return SPECTRE_V2_CMD_AUTO;
+- }
++ for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
++ if (!match_option(arg, ret, mitigation_options[i].option))
++ continue;
++ cmd = mitigation_options[i].cmd;
++ break;
++ }
++
++ if (i >= ARRAY_SIZE(mitigation_options)) {
++ pr_err("unknown option (%s). Switching to AUTO select\n", arg);
++ return SPECTRE_V2_CMD_AUTO;
+ }
+
+ if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
--- /dev/null
+Subject: x86/speculation: Disable STIBP when enhanced IBRS is in use
+From: Tim Chen tim.c.chen@linux.intel.com
+Date: Sun Nov 25 19:33:33 2018 +0100
+
+From: Tim Chen tim.c.chen@linux.intel.com
+
+commit 34bce7c9690b1d897686aac89604ba7adc365556 upstream
+
+If enhanced IBRS is active, STIBP is redundant for mitigating Spectre v2
+user space exploits from hyperthread sibling.
+
+Disable STIBP when enhanced IBRS is used.
+
+Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185003.966801480@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -319,6 +319,10 @@ static bool stibp_needed(void)
+ if (spectre_v2_enabled == SPECTRE_V2_NONE)
+ return false;
+
++ /* Enhanced IBRS makes using STIBP unnecessary. */
++ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
++ return false;
++
+ if (!boot_cpu_has(X86_FEATURE_STIBP))
+ return false;
+
+@@ -843,6 +847,9 @@ static ssize_t l1tf_show_state(char *buf
+
+ static char *stibp_state(void)
+ {
++ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
++ return "";
++
+ if (x86_spec_ctrl_base & SPEC_CTRL_STIBP)
+ return ", STIBP";
+ else
--- /dev/null
+Subject: x86/speculation: Enable cross-hyperthread spectre v2 STIBP mitigation
+From: Jiri Kosina jkosina@suse.cz
+Date: Tue Sep 25 14:38:55 2018 +0200
+
+From: Jiri Kosina jkosina@suse.cz
+
+commit 53c613fe6349994f023245519265999eed75957f upstream
+
+STIBP is a feature provided by certain Intel ucodes / CPUs. This feature
+(once enabled) prevents cross-hyperthread control of decisions made by
+indirect branch predictors.
+
+Enable this feature if
+
+- the CPU is vulnerable to spectre v2
+- the CPU supports SMT and has SMT siblings online
+- spectre_v2 mitigation autoselection is enabled (default)
+
+After some previous discussion, this leaves STIBP on all the time, as wrmsr
+on crossing kernel boundary is a no-no. This could perhaps later be a bit
+more optimized (like disabling it in NOHZ, experiment with disabling it in
+idle, etc) if needed.
+
+Note that the synchronization of the mask manipulation via newly added
+spec_ctrl_mutex is currently not strictly needed, as the only updater is
+already being serialized by cpu_add_remove_lock, but let's make this a
+little bit more future-proof.
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: "WoodhouseDavid" <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: "SchauflerCasey" <casey.schaufler@intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1809251438240.15880@cbobk.fhfr.pm
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 57 ++++++++++++++++++++++++++++++++++++++++-----
+ kernel/cpu.c | 11 +++++++-
+ 2 files changed, 61 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -34,12 +34,10 @@ static void __init spectre_v2_select_mit
+ static void __init ssb_select_mitigation(void);
+ static void __init l1tf_select_mitigation(void);
+
+-/*
+- * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+- * writes to SPEC_CTRL contain whatever reserved bits have been set.
+- */
+-u64 __ro_after_init x86_spec_ctrl_base;
++/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
++u64 x86_spec_ctrl_base;
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
++static DEFINE_MUTEX(spec_ctrl_mutex);
+
+ /*
+ * The vendor and possibly platform specific bits which can be modified in
+@@ -324,6 +322,46 @@ static enum spectre_v2_mitigation_cmd __
+ return cmd;
+ }
+
++static bool stibp_needed(void)
++{
++ if (spectre_v2_enabled == SPECTRE_V2_NONE)
++ return false;
++
++ if (!boot_cpu_has(X86_FEATURE_STIBP))
++ return false;
++
++ return true;
++}
++
++static void update_stibp_msr(void *info)
++{
++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++}
++
++void arch_smt_update(void)
++{
++ u64 mask;
++
++ if (!stibp_needed())
++ return;
++
++ mutex_lock(&spec_ctrl_mutex);
++ mask = x86_spec_ctrl_base;
++ if (cpu_smt_control == CPU_SMT_ENABLED)
++ mask |= SPEC_CTRL_STIBP;
++ else
++ mask &= ~SPEC_CTRL_STIBP;
++
++ if (mask != x86_spec_ctrl_base) {
++ pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
++ cpu_smt_control == CPU_SMT_ENABLED ?
++ "Enabling" : "Disabling");
++ x86_spec_ctrl_base = mask;
++ on_each_cpu(update_stibp_msr, NULL, 1);
++ }
++ mutex_unlock(&spec_ctrl_mutex);
++}
++
+ static void __init spectre_v2_select_mitigation(void)
+ {
+ enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -423,6 +461,9 @@ specv2_set_mode:
+ setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
+ pr_info("Enabling Restricted Speculation for firmware calls\n");
+ }
++
++ /* Enable STIBP if appropriate */
++ arch_smt_update();
+ }
+
+ #undef pr_fmt
+@@ -813,6 +854,8 @@ static ssize_t l1tf_show_state(char *buf
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ char *buf, unsigned int bug)
+ {
++ int ret;
++
+ if (!boot_cpu_has_bug(bug))
+ return sprintf(buf, "Not affected\n");
+
+@@ -827,10 +870,12 @@ static ssize_t cpu_show_common(struct de
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+
+ case X86_BUG_SPECTRE_V2:
+- return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
++ ret = sprintf(buf, "%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
++ (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
+ spectre_v2_module_string());
++ return ret;
+
+ case X86_BUG_SPEC_STORE_BYPASS:
+ return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2045,6 +2045,12 @@ static void cpuhp_online_cpu_device(unsi
+ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+ }
+
++/*
++ * Architectures that need SMT-specific errata handling during SMT hotplug
++ * should override this.
++ */
++void __weak arch_smt_update(void) { };
++
+ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+ {
+ int cpu, ret = 0;
+@@ -2071,8 +2077,10 @@ static int cpuhp_smt_disable(enum cpuhp_
+ */
+ cpuhp_offline_cpu_device(cpu);
+ }
+- if (!ret)
++ if (!ret) {
+ cpu_smt_control = ctrlval;
++ arch_smt_update();
++ }
+ cpu_maps_update_done();
+ return ret;
+ }
+@@ -2083,6 +2091,7 @@ static int cpuhp_smt_enable(void)
+
+ cpu_maps_update_begin();
+ cpu_smt_control = CPU_SMT_ENABLED;
++ arch_smt_update();
+ for_each_present_cpu(cpu) {
+ /* Skip online CPUs and CPUs on offline nodes */
+ if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
--- /dev/null
+Subject: x86/speculation: Enable prctl mode for spectre_v2_user
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:54 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 7cc765a67d8e04ef7d772425ca5a2a1e2b894c15 upstream
+
+Now that all prerequisites are in place:
+
+ - Add the prctl command line option
+
+ - Default the 'auto' mode to 'prctl'
+
+ - When SMT state changes, update the static key which controls the
+ conditional STIBP evaluation on context switch.
+
+ - At init update the static key which controls the conditional IBPB
+ evaluation on context switch.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185005.958421388@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 7 +++-
+ arch/x86/kernel/cpu/bugs.c | 41 ++++++++++++++++++------
+ 2 files changed, 38 insertions(+), 10 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -4036,9 +4036,14 @@
+ off - Unconditionally disable mitigations. Is
+ enforced by spectre_v2=off
+
++ prctl - Indirect branch speculation is enabled,
++ but mitigation can be enabled via prctl
++ per thread. The mitigation control state
++ is inherited on fork.
++
+ auto - Kernel selects the mitigation depending on
+ the available CPU features and vulnerability.
+- Default is off.
++ Default is prctl.
+
+ Not specifying this option is equivalent to
+ spectre_v2_user=auto.
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -254,11 +254,13 @@ enum spectre_v2_user_cmd {
+ SPECTRE_V2_USER_CMD_NONE,
+ SPECTRE_V2_USER_CMD_AUTO,
+ SPECTRE_V2_USER_CMD_FORCE,
++ SPECTRE_V2_USER_CMD_PRCTL,
+ };
+
+ static const char * const spectre_v2_user_strings[] = {
+ [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
+ [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
++ [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
+ };
+
+ static const struct {
+@@ -269,6 +271,7 @@ static const struct {
+ { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
+ { "off", SPECTRE_V2_USER_CMD_NONE, false },
+ { "on", SPECTRE_V2_USER_CMD_FORCE, true },
++ { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
+ };
+
+ static void __init spec_v2_user_print_cond(const char *reason, bool secure)
+@@ -323,12 +326,15 @@ spectre_v2_user_select_mitigation(enum s
+ smt_possible = false;
+
+ switch (spectre_v2_parse_user_cmdline(v2_cmd)) {
+- case SPECTRE_V2_USER_CMD_AUTO:
+ case SPECTRE_V2_USER_CMD_NONE:
+ goto set_mode;
+ case SPECTRE_V2_USER_CMD_FORCE:
+ mode = SPECTRE_V2_USER_STRICT;
+ break;
++ case SPECTRE_V2_USER_CMD_AUTO:
++ case SPECTRE_V2_USER_CMD_PRCTL:
++ mode = SPECTRE_V2_USER_PRCTL;
++ break;
+ }
+
+ /* Initialize Indirect Branch Prediction Barrier */
+@@ -339,6 +345,9 @@ spectre_v2_user_select_mitigation(enum s
+ case SPECTRE_V2_USER_STRICT:
+ static_branch_enable(&switch_mm_always_ibpb);
+ break;
++ case SPECTRE_V2_USER_PRCTL:
++ static_branch_enable(&switch_mm_cond_ibpb);
++ break;
+ default:
+ break;
+ }
+@@ -351,6 +360,12 @@ spectre_v2_user_select_mitigation(enum s
+ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ return;
+
++ /*
++ * If SMT is not possible or STIBP is not available clear the STIPB
++ * mode.
++ */
++ if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
++ mode = SPECTRE_V2_USER_NONE;
+ set_mode:
+ spectre_v2_user = mode;
+ /* Only print the STIBP mode when SMT possible */
+@@ -549,6 +564,15 @@ static void update_stibp_strict(void)
+ on_each_cpu(update_stibp_msr, NULL, 1);
+ }
+
++/* Update the static key controlling the evaluation of TIF_SPEC_IB */
++static void update_indir_branch_cond(void)
++{
++ if (sched_smt_active())
++ static_branch_enable(&switch_to_cond_stibp);
++ else
++ static_branch_disable(&switch_to_cond_stibp);
++}
++
+ void arch_smt_update(void)
+ {
+ /* Enhanced IBRS implies STIBP. No update required. */
+@@ -564,6 +588,7 @@ void arch_smt_update(void)
+ update_stibp_strict();
+ break;
+ case SPECTRE_V2_USER_PRCTL:
++ update_indir_branch_cond();
+ break;
+ }
+
+@@ -1035,7 +1060,8 @@ static char *stibp_state(void)
+ case SPECTRE_V2_USER_STRICT:
+ return ", STIBP: forced";
+ case SPECTRE_V2_USER_PRCTL:
+- return "";
++ if (static_key_enabled(&switch_to_cond_stibp))
++ return ", STIBP: conditional";
+ }
+ return "";
+ }
+@@ -1043,14 +1069,11 @@ static char *stibp_state(void)
+ static char *ibpb_state(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+- switch (spectre_v2_user) {
+- case SPECTRE_V2_USER_NONE:
+- return ", IBPB: disabled";
+- case SPECTRE_V2_USER_STRICT:
++ if (static_key_enabled(&switch_mm_always_ibpb))
+ return ", IBPB: always-on";
+- case SPECTRE_V2_USER_PRCTL:
+- return "";
+- }
++ if (static_key_enabled(&switch_mm_cond_ibpb))
++ return ", IBPB: conditional";
++ return ", IBPB: disabled";
+ }
+ return "";
+ }
--- /dev/null
+Subject: x86/speculation: Mark string arrays const correctly
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:42 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 8770709f411763884535662744a3786a1806afd3 upstream
+
+checkpatch.pl muttered when reshuffling the code:
+ WARNING: static const char * array should probably be static const char * const
+
+Fix up all the string arrays.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185004.800018931@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -236,7 +236,7 @@ enum spectre_v2_mitigation_cmd {
+ SPECTRE_V2_CMD_RETPOLINE_AMD,
+ };
+
+-static const char *spectre_v2_strings[] = {
++static const char * const spectre_v2_strings[] = {
+ [SPECTRE_V2_NONE] = "Vulnerable",
+ [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
+ [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
+@@ -473,7 +473,7 @@ enum ssb_mitigation_cmd {
+ SPEC_STORE_BYPASS_CMD_SECCOMP,
+ };
+
+-static const char *ssb_strings[] = {
++static const char * const ssb_strings[] = {
+ [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
+ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
+ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
+@@ -813,7 +813,7 @@ early_param("l1tf", l1tf_cmdline);
+ #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
+
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
+-static const char *l1tf_vmx_states[] = {
++static const char * const l1tf_vmx_states[] = {
+ [VMENTER_L1D_FLUSH_AUTO] = "auto",
+ [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
+ [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
--- /dev/null
+Subject: x86/speculation: Move STIPB/IBPB string conditionals out of cpu_show_common()
+From: Tim Chen tim.c.chen@linux.intel.com
+Date: Sun Nov 25 19:33:32 2018 +0100
+
+From: Tim Chen tim.c.chen@linux.intel.com
+
+commit a8f76ae41cd633ac00be1b3019b1eb4741be3828 upstream
+
+The Spectre V2 printout in cpu_show_common() handles conditionals for the
+various mitigation methods directly in the sprintf() argument list. That's
+hard to read and will become unreadable if more complex decisions need to
+be made for a particular method.
+
+Move the conditionals for STIBP and IBPB string selection into helper
+functions, so they can be extended later on.
+
+Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185003.874479208@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -841,6 +841,22 @@ static ssize_t l1tf_show_state(char *buf
+ }
+ #endif
+
++static char *stibp_state(void)
++{
++ if (x86_spec_ctrl_base & SPEC_CTRL_STIBP)
++ return ", STIBP";
++ else
++ return "";
++}
++
++static char *ibpb_state(void)
++{
++ if (boot_cpu_has(X86_FEATURE_USE_IBPB))
++ return ", IBPB";
++ else
++ return "";
++}
++
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ char *buf, unsigned int bug)
+ {
+@@ -859,9 +875,9 @@ static ssize_t cpu_show_common(struct de
+
+ case X86_BUG_SPECTRE_V2:
+ return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+- boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
++ ibpb_state(),
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+- (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
++ stibp_state(),
+ boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
+ spectre_v2_module_string());
+
--- /dev/null
+Subject: x86/speculation: Prepare arch_smt_update() for PRCTL mode
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:52 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 6893a959d7fdebbab5f5aa112c277d5a44435ba1 upstream
+
+The upcoming fine grained per task STIBP control needs to be updated on CPU
+hotplug as well.
+
+Split out the code which controls the strict mode so the prctl control code
+can be added later. Mark the SMP function call argument __unused while at it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185005.759457117@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 46 ++++++++++++++++++++++++---------------------
+ 1 file changed, 25 insertions(+), 21 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -527,40 +527,44 @@ specv2_set_mode:
+ arch_smt_update();
+ }
+
+-static bool stibp_needed(void)
++static void update_stibp_msr(void * __unused)
+ {
+- /* Enhanced IBRS makes using STIBP unnecessary. */
+- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+- return false;
+-
+- /* Check for strict user mitigation mode */
+- return spectre_v2_user == SPECTRE_V2_USER_STRICT;
++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ }
+
+-static void update_stibp_msr(void *info)
++/* Update x86_spec_ctrl_base in case SMT state changed. */
++static void update_stibp_strict(void)
+ {
+- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++ u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
++
++ if (sched_smt_active())
++ mask |= SPEC_CTRL_STIBP;
++
++ if (mask == x86_spec_ctrl_base)
++ return;
++
++ pr_info("Update user space SMT mitigation: STIBP %s\n",
++ mask & SPEC_CTRL_STIBP ? "always-on" : "off");
++ x86_spec_ctrl_base = mask;
++ on_each_cpu(update_stibp_msr, NULL, 1);
+ }
+
+ void arch_smt_update(void)
+ {
+- u64 mask;
+-
+- if (!stibp_needed())
++ /* Enhanced IBRS implies STIBP. No update required. */
++ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ return;
+
+ mutex_lock(&spec_ctrl_mutex);
+
+- mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
+- if (sched_smt_active())
+- mask |= SPEC_CTRL_STIBP;
+-
+- if (mask != x86_spec_ctrl_base) {
+- pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
+- mask & SPEC_CTRL_STIBP ? "Enabling" : "Disabling");
+- x86_spec_ctrl_base = mask;
+- on_each_cpu(update_stibp_msr, NULL, 1);
++ switch (spectre_v2_user) {
++ case SPECTRE_V2_USER_NONE:
++ break;
++ case SPECTRE_V2_USER_STRICT:
++ update_stibp_strict();
++ break;
+ }
++
+ mutex_unlock(&spec_ctrl_mutex);
+ }
+
--- /dev/null
+Subject: x86/speculation: Prepare for conditional IBPB in switch_mm()
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:49 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 4c71a2b6fd7e42814aa68a6dec88abf3b42ea573 upstream
+
+The IBPB speculation barrier is issued from switch_mm() when the kernel
+switches to a user space task with a different mm than the user space task
+which ran last on the same CPU.
+
+An additional optimization is to avoid IBPB when the incoming task can be
+ptraced by the outgoing task. This optimization only works when switching
+directly between two user space tasks. When switching from a kernel task to
+a user space task the optimization fails because the previous task cannot
+be accessed anymore. So for quite some scenarios the optimization is just
+adding overhead.
+
+The upcoming conditional IBPB support will issue IBPB only for user space
+tasks which have the TIF_SPEC_IB bit set. This requires to handle the
+following cases:
+
+ 1) Switch from a user space task (potential attacker) which has
+ TIF_SPEC_IB set to a user space task (potential victim) which has
+ TIF_SPEC_IB not set.
+
+ 2) Switch from a user space task (potential attacker) which has
+ TIF_SPEC_IB not set to a user space task (potential victim) which has
+ TIF_SPEC_IB set.
+
+This needs to be optimized for the case where the IBPB can be avoided when
+only kernel threads ran in between user space tasks which belong to the
+same process.
+
+The current check whether two tasks belong to the same context is using the
+tasks context id. While correct, it's simpler to use the mm pointer because
+it allows to mangle the TIF_SPEC_IB bit into it. The context id based
+mechanism requires extra storage, which creates worse code.
+
+When a task is scheduled out its TIF_SPEC_IB bit is mangled as bit 0 into
+the per CPU storage which is used to track the last user space mm which was
+running on a CPU. This bit can be used together with the TIF_SPEC_IB bit of
+the incoming task to make the decision whether IBPB needs to be issued or
+not to cover the two cases above.
+
+As conditional IBPB is going to be the default, remove the dubious ptrace
+check for the IBPB always case and simply issue IBPB always when the
+process changes.
+
+Move the storage to a different place in the struct as the original one
+created a hole.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185005.466447057@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 2
+ arch/x86/include/asm/tlbflush.h | 8 +-
+ arch/x86/kernel/cpu/bugs.c | 29 +++++++-
+ arch/x86/mm/tlb.c | 114 ++++++++++++++++++++++++++---------
+ 4 files changed, 118 insertions(+), 35 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -312,6 +312,8 @@ do { \
+ } while (0)
+
+ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
++DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
++DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
+ #endif /* __ASSEMBLY__ */
+
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -185,10 +185,14 @@ struct tlb_state {
+
+ #define LOADED_MM_SWITCHING ((struct mm_struct *)1)
+
++ /* Last user mm for optimizing IBPB */
++ union {
++ struct mm_struct *last_user_mm;
++ unsigned long last_user_mm_ibpb;
++ };
++
+ u16 loaded_mm_asid;
+ u16 next_asid;
+- /* last user mm's ctx id */
+- u64 last_ctx_id;
+
+ /*
+ * We can be in one of several states:
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -55,6 +55,10 @@ u64 __ro_after_init x86_amd_ls_cfg_ssbd_
+
+ /* Control conditional STIPB in switch_to() */
+ DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
++/* Control conditional IBPB in switch_mm() */
++DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
++/* Control unconditional IBPB in switch_mm() */
++DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
+ void __init check_bugs(void)
+ {
+@@ -330,7 +334,17 @@ spectre_v2_user_select_mitigation(enum s
+ /* Initialize Indirect Branch Prediction Barrier */
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+- pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
++
++ switch (mode) {
++ case SPECTRE_V2_USER_STRICT:
++ static_branch_enable(&switch_mm_always_ibpb);
++ break;
++ default:
++ break;
++ }
++
++ pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
++ mode == SPECTRE_V2_USER_STRICT ? "always-on" : "conditional");
+ }
+
+ /* If enhanced IBRS is enabled no STIPB required */
+@@ -952,10 +966,15 @@ static char *stibp_state(void)
+
+ static char *ibpb_state(void)
+ {
+- if (boot_cpu_has(X86_FEATURE_USE_IBPB))
+- return ", IBPB";
+- else
+- return "";
++ if (boot_cpu_has(X86_FEATURE_IBPB)) {
++ switch (spectre_v2_user) {
++ case SPECTRE_V2_USER_NONE:
++ return ", IBPB: disabled";
++ case SPECTRE_V2_USER_STRICT:
++ return ", IBPB: always-on";
++ }
++ }
++ return "";
+ }
+
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -7,7 +7,6 @@
+ #include <linux/export.h>
+ #include <linux/cpu.h>
+ #include <linux/debugfs.h>
+-#include <linux/ptrace.h>
+
+ #include <asm/tlbflush.h>
+ #include <asm/mmu_context.h>
+@@ -31,6 +30,12 @@
+ */
+
+ /*
++ * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
++ * stored in cpu_tlb_state.last_user_mm_ibpb.
++ */
++#define LAST_USER_MM_IBPB 0x1UL
++
++/*
+ * We get here when we do something requiring a TLB invalidation
+ * but could not go invalidate all of the contexts. We do the
+ * necessary invalidation by clearing out the 'ctx_id' which
+@@ -181,17 +186,87 @@ static void sync_current_stack_to_mm(str
+ }
+ }
+
+-static bool ibpb_needed(struct task_struct *tsk, u64 last_ctx_id)
++static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
++{
++ unsigned long next_tif = task_thread_info(next)->flags;
++ unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
++
++ return (unsigned long)next->mm | ibpb;
++}
++
++static void cond_ibpb(struct task_struct *next)
+ {
++ if (!next || !next->mm)
++ return;
++
+ /*
+- * Check if the current (previous) task has access to the memory
+- * of the @tsk (next) task. If access is denied, make sure to
+- * issue a IBPB to stop user->user Spectre-v2 attacks.
+- *
+- * Note: __ptrace_may_access() returns 0 or -ERRNO.
++ * Both, the conditional and the always IBPB mode use the mm
++ * pointer to avoid the IBPB when switching between tasks of the
++ * same process. Using the mm pointer instead of mm->context.ctx_id
++ * opens a hypothetical hole vs. mm_struct reuse, which is more or
++ * less impossible to control by an attacker. Aside of that it
++ * would only affect the first schedule so the theoretically
++ * exposed data is not really interesting.
+ */
+- return (tsk && tsk->mm && tsk->mm->context.ctx_id != last_ctx_id &&
+- ptrace_may_access_sched(tsk, PTRACE_MODE_SPEC_IBPB));
++ if (static_branch_likely(&switch_mm_cond_ibpb)) {
++ unsigned long prev_mm, next_mm;
++
++ /*
++ * This is a bit more complex than the always mode because
++ * it has to handle two cases:
++ *
++ * 1) Switch from a user space task (potential attacker)
++ * which has TIF_SPEC_IB set to a user space task
++ * (potential victim) which has TIF_SPEC_IB not set.
++ *
++ * 2) Switch from a user space task (potential attacker)
++ * which has TIF_SPEC_IB not set to a user space task
++ * (potential victim) which has TIF_SPEC_IB set.
++ *
++ * This could be done by unconditionally issuing IBPB when
++ * a task which has TIF_SPEC_IB set is either scheduled in
++ * or out. Though that results in two flushes when:
++ *
++ * - the same user space task is scheduled out and later
++ * scheduled in again and only a kernel thread ran in
++ * between.
++ *
++ * - a user space task belonging to the same process is
++ * scheduled in after a kernel thread ran in between
++ *
++ * - a user space task belonging to the same process is
++ * scheduled in immediately.
++ *
++ * Optimize this with reasonably small overhead for the
++ * above cases. Mangle the TIF_SPEC_IB bit into the mm
++ * pointer of the incoming task which is stored in
++ * cpu_tlbstate.last_user_mm_ibpb for comparison.
++ */
++ next_mm = mm_mangle_tif_spec_ib(next);
++ prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
++
++ /*
++ * Issue IBPB only if the mm's are different and one or
++ * both have the IBPB bit set.
++ */
++ if (next_mm != prev_mm &&
++ (next_mm | prev_mm) & LAST_USER_MM_IBPB)
++ indirect_branch_prediction_barrier();
++
++ this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
++ }
++
++ if (static_branch_unlikely(&switch_mm_always_ibpb)) {
++ /*
++ * Only flush when switching to a user space task with a
++ * different context than the user space task which ran
++ * last on this CPU.
++ */
++ if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
++ indirect_branch_prediction_barrier();
++ this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
++ }
++ }
+ }
+
+ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+@@ -262,22 +337,13 @@ void switch_mm_irqs_off(struct mm_struct
+ } else {
+ u16 new_asid;
+ bool need_flush;
+- u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
+
+ /*
+ * Avoid user/user BTB poisoning by flushing the branch
+ * predictor when switching between processes. This stops
+ * one process from doing Spectre-v2 attacks on another.
+- *
+- * As an optimization, flush indirect branches only when
+- * switching into a processes that can't be ptrace by the
+- * current one (as in such case, attacker has much more
+- * convenient way how to tamper with the next process than
+- * branch buffer poisoning).
+ */
+- if (static_cpu_has(X86_FEATURE_USE_IBPB) &&
+- ibpb_needed(tsk, last_ctx_id))
+- indirect_branch_prediction_barrier();
++ cond_ibpb(tsk);
+
+ if (IS_ENABLED(CONFIG_VMAP_STACK)) {
+ /*
+@@ -327,14 +393,6 @@ void switch_mm_irqs_off(struct mm_struct
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
+ }
+
+- /*
+- * Record last user mm's context id, so we can avoid
+- * flushing branch buffer with IBPB if we switch back
+- * to the same user.
+- */
+- if (next != &init_mm)
+- this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+-
+ /* Make sure we write CR3 before loaded_mm. */
+ barrier();
+
+@@ -415,7 +473,7 @@ void initialize_tlbstate_and_flush(void)
+ write_cr3(build_cr3(mm->pgd, 0));
+
+ /* Reinitialize tlbstate. */
+- this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id);
++ this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
+ this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
+ this_cpu_write(cpu_tlbstate.next_asid, 1);
+ this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
--- /dev/null
+Subject: x86/speculation: Prepare for per task indirect branch speculation control
+From: Tim Chen tim.c.chen@linux.intel.com
+Date: Sun Nov 25 19:33:46 2018 +0100
+
+From: Tim Chen tim.c.chen@linux.intel.com
+
+commit 5bfbe3ad5840d941b89bcac54b821ba14f50a0ba upstream
+
+To avoid the overhead of STIBP always on, it's necessary to allow per task
+control of STIBP.
+
+Add a new task flag TIF_SPEC_IB and evaluate it during context switch if
+SMT is active and flag evaluation is enabled by the speculation control
+code. Add the conditional evaluation to x86_virt_spec_ctrl() as well so the
+guest/host switch works properly.
+
+This has no effect because TIF_SPEC_IB cannot be set yet and the static key
+which controls evaluation is off. Preparatory patch for adding the control
+code.
+
+[ tglx: Simplify the context switch logic and make the TIF evaluation
+ depend on SMP=y and on the static key controlling the conditional
+ update. Rename it to TIF_SPEC_IB because it controls both STIBP and
+ IBPB ]
+
+Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185005.176917199@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/msr-index.h | 5 +++--
+ arch/x86/include/asm/spec-ctrl.h | 12 ++++++++++++
+ arch/x86/include/asm/thread_info.h | 5 ++++-
+ arch/x86/kernel/cpu/bugs.c | 4 ++++
+ arch/x86/kernel/process.c | 20 ++++++++++++++++++--
+ 5 files changed, 41 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -41,9 +41,10 @@
+
+ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+ #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+-#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
++#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
++#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
+ #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
+-#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
++#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+
+ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(
+ return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+ }
+
++static inline u64 stibp_tif_to_spec_ctrl(u64 tifn)
++{
++ BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
++ return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
++}
++
+ static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
+ {
+ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+ return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+ }
+
++static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl)
++{
++ BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
++ return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
++}
++
+ static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
+ {
+ return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -85,6 +85,7 @@ struct thread_info {
+ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
++#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
+ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
+ #define TIF_UPROBE 12 /* breakpointed or singlestepping */
+ #define TIF_PATCH_PENDING 13 /* pending live patching update */
+@@ -112,6 +113,7 @@ struct thread_info {
+ #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
++#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
+ #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
+ #define _TIF_UPROBE (1 << TIF_UPROBE)
+ #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
+@@ -148,7 +150,8 @@ struct thread_info {
+
+ /* flags to check in __switch_to() */
+ #define _TIF_WORK_CTXSW \
+- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
++ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
++ _TIF_SSBD|_TIF_SPEC_IB)
+
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -147,6 +147,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl,
+ static_cpu_has(X86_FEATURE_AMD_SSBD))
+ hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
+
++ /* Conditional STIBP enabled? */
++ if (static_branch_unlikely(&switch_to_cond_stibp))
++ hostval |= stibp_tif_to_spec_ctrl(ti->flags);
++
+ if (hostval != guestval) {
+ msrval = setguest ? guestval : hostval;
+ wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -407,11 +407,17 @@ static __always_inline void amd_set_ssb_
+ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
+ unsigned long tifn)
+ {
++ unsigned long tif_diff = tifp ^ tifn;
+ u64 msr = x86_spec_ctrl_base;
+ bool updmsr = false;
+
+- /* If TIF_SSBD is different, select the proper mitigation method */
+- if ((tifp ^ tifn) & _TIF_SSBD) {
++ /*
++ * If TIF_SSBD is different, select the proper mitigation
++ * method. Note that if SSBD mitigation is disabled or permanentely
++ * enabled this branch can't be taken because nothing can set
++ * TIF_SSBD.
++ */
++ if (tif_diff & _TIF_SSBD) {
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+ amd_set_ssb_virt_state(tifn);
+ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+@@ -423,6 +429,16 @@ static __always_inline void __speculatio
+ }
+ }
+
++ /*
++ * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
++ * otherwise avoid the MSR write.
++ */
++ if (IS_ENABLED(CONFIG_SMP) &&
++ static_branch_unlikely(&switch_to_cond_stibp)) {
++ updmsr |= !!(tif_diff & _TIF_SPEC_IB);
++ msr |= stibp_tif_to_spec_ctrl(tifn);
++ }
++
+ if (updmsr)
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+ }
--- /dev/null
+Subject: x86/speculation: Prevent stale SPEC_CTRL msr content
+From: Thomas Gleixner tglx@linutronix.de
+Date: Wed Nov 28 10:56:57 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 6d991ba509ebcfcc908e009d1db51972a4f7a064 upstream
+
+The seccomp speculation control operates on all tasks of a process, but
+only the current task of a process can update the MSR immediately. For the
+other threads the update is deferred to the next context switch.
+
+This creates the following situation with Process A and B:
+
+Process A task 2 and Process B task 1 are pinned on CPU1. Process A task 2
+does not have the speculation control TIF bit set. Process B task 1 has the
+speculation control TIF bit set.
+
+CPU0 CPU1
+ MSR bit is set
+ ProcB.T1 schedules out
+ ProcA.T2 schedules in
+ MSR bit is cleared
+ProcA.T1
+ seccomp_update()
+ set TIF bit on ProcA.T2
+ ProcB.T1 schedules in
+ MSR is not updated <-- FAIL
+
+This happens because the context switch code tries to avoid the MSR update
+if the speculation control TIF bits of the incoming and the outgoing task
+are the same. In the worst case ProcB.T1 and ProcA.T2 are the only tasks
+scheduling back and forth on CPU1, which keeps the MSR stale forever.
+
+In theory this could be remedied by IPIs, but chasing the remote task which
+could be migrated is complex and full of races.
+
+The straight forward solution is to avoid the asychronous update of the TIF
+bit and defer it to the next context switch. The speculation control state
+is stored in task_struct::atomic_flags by the prctl and seccomp updates
+already.
+
+Add a new TIF_SPEC_FORCE_UPDATE bit and set this after updating the
+atomic_flags. Check the bit on context switch and force a synchronous
+update of the speculation control if set. Use the same mechanism for
+updating the current task.
+
+Reported-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1811272247140.1875@nanos.tec.linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/spec-ctrl.h | 6 +-----
+ arch/x86/include/asm/thread_info.h | 4 +++-
+ arch/x86/kernel/cpu/bugs.c | 18 +++++++-----------
+ arch/x86/kernel/process.c | 30 +++++++++++++++++++++++++++++-
+ 4 files changed, 40 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -83,10 +83,6 @@ static inline void speculative_store_byp
+ #endif
+
+ extern void speculation_ctrl_update(unsigned long tif);
+-
+-static inline void speculation_ctrl_update_current(void)
+-{
+- speculation_ctrl_update(current_thread_info()->flags);
+-}
++extern void speculation_ctrl_update_current(void);
+
+ #endif
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -86,6 +86,7 @@ struct thread_info {
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
+ #define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
++#define TIF_SPEC_FORCE_UPDATE 10 /* Force speculation MSR update in context switch */
+ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
+ #define TIF_UPROBE 12 /* breakpointed or singlestepping */
+ #define TIF_PATCH_PENDING 13 /* pending live patching update */
+@@ -114,6 +115,7 @@ struct thread_info {
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
+ #define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
++#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
+ #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
+ #define _TIF_UPROBE (1 << TIF_UPROBE)
+ #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
+@@ -151,7 +153,7 @@ struct thread_info {
+ /* flags to check in __switch_to() */
+ #define _TIF_WORK_CTXSW_BASE \
+ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
+- _TIF_SSBD)
++ _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
+
+ /*
+ * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -699,14 +699,10 @@ static void ssb_select_mitigation(void)
+ #undef pr_fmt
+ #define pr_fmt(fmt) "Speculation prctl: " fmt
+
+-static void task_update_spec_tif(struct task_struct *tsk, int tifbit, bool on)
++static void task_update_spec_tif(struct task_struct *tsk)
+ {
+- bool update;
+-
+- if (on)
+- update = !test_and_set_tsk_thread_flag(tsk, tifbit);
+- else
+- update = test_and_clear_tsk_thread_flag(tsk, tifbit);
++ /* Force the update of the real TIF bits */
++ set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
+
+ /*
+ * Immediately update the speculation control MSRs for the current
+@@ -716,7 +712,7 @@ static void task_update_spec_tif(struct
+ * This can only happen for SECCOMP mitigation. For PRCTL it's
+ * always the current task.
+ */
+- if (tsk == current && update)
++ if (tsk == current)
+ speculation_ctrl_update_current();
+ }
+
+@@ -732,16 +728,16 @@ static int ssb_prctl_set(struct task_str
+ if (task_spec_ssb_force_disable(task))
+ return -EPERM;
+ task_clear_spec_ssb_disable(task);
+- task_update_spec_tif(task, TIF_SSBD, false);
++ task_update_spec_tif(task);
+ break;
+ case PR_SPEC_DISABLE:
+ task_set_spec_ssb_disable(task);
+- task_update_spec_tif(task, TIF_SSBD, true);
++ task_update_spec_tif(task);
+ break;
+ case PR_SPEC_FORCE_DISABLE:
+ task_set_spec_ssb_disable(task);
+ task_set_spec_ssb_force_disable(task);
+- task_update_spec_tif(task, TIF_SSBD, true);
++ task_update_spec_tif(task);
+ break;
+ default:
+ return -ERANGE;
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -446,6 +446,18 @@ static __always_inline void __speculatio
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+ }
+
++static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
++{
++ if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
++ if (task_spec_ssb_disable(tsk))
++ set_tsk_thread_flag(tsk, TIF_SSBD);
++ else
++ clear_tsk_thread_flag(tsk, TIF_SSBD);
++ }
++ /* Return the updated threadinfo flags*/
++ return task_thread_info(tsk)->flags;
++}
++
+ void speculation_ctrl_update(unsigned long tif)
+ {
+ /* Forced update. Make sure all relevant TIF flags are different */
+@@ -454,6 +466,14 @@ void speculation_ctrl_update(unsigned lo
+ preempt_enable();
+ }
+
++/* Called from seccomp/prctl update */
++void speculation_ctrl_update_current(void)
++{
++ preempt_disable();
++ speculation_ctrl_update(speculation_ctrl_update_tif(current));
++ preempt_enable();
++}
++
+ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
+ {
+ struct thread_struct *prev, *next;
+@@ -485,7 +505,15 @@ void __switch_to_xtra(struct task_struct
+ if ((tifp ^ tifn) & _TIF_NOCPUID)
+ set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
+
+- __speculation_ctrl_update(tifp, tifn);
++ if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
++ __speculation_ctrl_update(tifp, tifn);
++ } else {
++ speculation_ctrl_update_tif(prev_p);
++ tifn = speculation_ctrl_update_tif(next_p);
++
++ /* Enforce MSR update to ensure consistent state */
++ __speculation_ctrl_update(~tifn, tifn);
++ }
+ }
+
+ /*
--- /dev/null
+Subject: x86/speculation: Propagate information about RSB filling mitigation to sysfs
+From: Jiri Kosina jkosina@suse.cz
+Date: Tue Sep 25 14:39:28 2018 +0200
+
+From: Jiri Kosina jkosina@suse.cz
+
+commit bb4b3b7762735cdaba5a40fd94c9303d9ffa147a upstream
+
+If spectrev2 mitigation has been enabled, RSB is filled on context switch
+in order to protect from various classes of spectrev2 attacks.
+
+If this mitigation is enabled, say so in sysfs for spectrev2.
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: "WoodhouseDavid" <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: "SchauflerCasey" <casey.schaufler@intel.com>
+Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1809251438580.15880@cbobk.fhfr.pm
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -870,10 +870,11 @@ static ssize_t cpu_show_common(struct de
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+
+ case X86_BUG_SPECTRE_V2:
+- ret = sprintf(buf, "%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
++ ret = sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+ (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
++ boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
+ spectre_v2_module_string());
+ return ret;
+
--- /dev/null
+Subject: x86/speculation: Provide IBPB always command line options
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:56 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 55a974021ec952ee460dc31ca08722158639de72 upstream
+
+Provide the possibility to enable IBPB always in combination with 'prctl'
+and 'seccomp'.
+
+Add the extra command line options and rework the IBPB selection to
+evaluate the command instead of the mode selected by the STIPB switch case.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185006.144047038@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 12 ++++++++
+ arch/x86/kernel/cpu/bugs.c | 34 ++++++++++++++++--------
+ 2 files changed, 35 insertions(+), 11 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -4041,11 +4041,23 @@
+ per thread. The mitigation control state
+ is inherited on fork.
+
++ prctl,ibpb
++ - Like "prctl" above, but only STIBP is
++ controlled per thread. IBPB is issued
++ always when switching between different user
++ space processes.
++
+ seccomp
+ - Same as "prctl" above, but all seccomp
+ threads will enable the mitigation unless
+ they explicitly opt out.
+
++ seccomp,ibpb
++ - Like "seccomp" above, but only STIBP is
++ controlled per thread. IBPB is issued
++ always when switching between different
++ user space processes.
++
+ auto - Kernel selects the mitigation depending on
+ the available CPU features and vulnerability.
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -255,7 +255,9 @@ enum spectre_v2_user_cmd {
+ SPECTRE_V2_USER_CMD_AUTO,
+ SPECTRE_V2_USER_CMD_FORCE,
+ SPECTRE_V2_USER_CMD_PRCTL,
++ SPECTRE_V2_USER_CMD_PRCTL_IBPB,
+ SPECTRE_V2_USER_CMD_SECCOMP,
++ SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
+ };
+
+ static const char * const spectre_v2_user_strings[] = {
+@@ -270,11 +272,13 @@ static const struct {
+ enum spectre_v2_user_cmd cmd;
+ bool secure;
+ } v2_user_options[] __initdata = {
+- { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
+- { "off", SPECTRE_V2_USER_CMD_NONE, false },
+- { "on", SPECTRE_V2_USER_CMD_FORCE, true },
+- { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
+- { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
++ { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
++ { "off", SPECTRE_V2_USER_CMD_NONE, false },
++ { "on", SPECTRE_V2_USER_CMD_FORCE, true },
++ { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
++ { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
++ { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
++ { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
+ };
+
+ static void __init spec_v2_user_print_cond(const char *reason, bool secure)
+@@ -320,6 +324,7 @@ spectre_v2_user_select_mitigation(enum s
+ {
+ enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
+ bool smt_possible = IS_ENABLED(CONFIG_SMP);
++ enum spectre_v2_user_cmd cmd;
+
+ if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
+ return;
+@@ -328,17 +333,20 @@ spectre_v2_user_select_mitigation(enum s
+ cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+ smt_possible = false;
+
+- switch (spectre_v2_parse_user_cmdline(v2_cmd)) {
++ cmd = spectre_v2_parse_user_cmdline(v2_cmd);
++ switch (cmd) {
+ case SPECTRE_V2_USER_CMD_NONE:
+ goto set_mode;
+ case SPECTRE_V2_USER_CMD_FORCE:
+ mode = SPECTRE_V2_USER_STRICT;
+ break;
+ case SPECTRE_V2_USER_CMD_PRCTL:
++ case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
+ mode = SPECTRE_V2_USER_PRCTL;
+ break;
+ case SPECTRE_V2_USER_CMD_AUTO:
+ case SPECTRE_V2_USER_CMD_SECCOMP:
++ case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+ if (IS_ENABLED(CONFIG_SECCOMP))
+ mode = SPECTRE_V2_USER_SECCOMP;
+ else
+@@ -350,12 +358,15 @@ spectre_v2_user_select_mitigation(enum s
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+
+- switch (mode) {
+- case SPECTRE_V2_USER_STRICT:
++ switch (cmd) {
++ case SPECTRE_V2_USER_CMD_FORCE:
++ case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
++ case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+ static_branch_enable(&switch_mm_always_ibpb);
+ break;
+- case SPECTRE_V2_USER_PRCTL:
+- case SPECTRE_V2_USER_SECCOMP:
++ case SPECTRE_V2_USER_CMD_PRCTL:
++ case SPECTRE_V2_USER_CMD_AUTO:
++ case SPECTRE_V2_USER_CMD_SECCOMP:
+ static_branch_enable(&switch_mm_cond_ibpb);
+ break;
+ default:
+@@ -363,7 +374,8 @@ spectre_v2_user_select_mitigation(enum s
+ }
+
+ pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
+- mode == SPECTRE_V2_USER_STRICT ? "always-on" : "conditional");
++ static_key_enabled(&switch_mm_always_ibpb) ?
++ "always-on" : "conditional");
+ }
+
+ /* If enhanced IBRS is enabled no STIPB required */
--- /dev/null
+Subject: x86/speculation: Remove unnecessary ret variable in cpu_show_common()
+From: Tim Chen tim.c.chen@linux.intel.com
+Date: Sun Nov 25 19:33:31 2018 +0100
+
+From: Tim Chen tim.c.chen@linux.intel.com
+
+commit b86bda0426853bfe8a3506c7d2a5b332760ae46b upstream
+
+Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185003.783903657@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -844,8 +844,6 @@ static ssize_t l1tf_show_state(char *buf
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ char *buf, unsigned int bug)
+ {
+- int ret;
+-
+ if (!boot_cpu_has_bug(bug))
+ return sprintf(buf, "Not affected\n");
+
+@@ -860,13 +858,12 @@ static ssize_t cpu_show_common(struct de
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+
+ case X86_BUG_SPECTRE_V2:
+- ret = sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
++ return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+ (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
+ boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
+ spectre_v2_module_string());
+- return ret;
+
+ case X86_BUG_SPEC_STORE_BYPASS:
+ return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
--- /dev/null
+Subject: x86/speculation: Rename SSBD update functions
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:34 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 26c4d75b234040c11728a8acb796b3a85ba7507c upstream
+
+During context switch, the SSBD bit in SPEC_CTRL MSR is updated according
+to changes of the TIF_SSBD flag in the current and next running task.
+
+Currently, only the bit controlling speculative store bypass disable in
+SPEC_CTRL MSR is updated and the related update functions all have
+"speculative_store" or "ssb" in their names.
+
+For enhanced mitigation control other bits in SPEC_CTRL MSR need to be
+updated as well, which makes the SSB names inadequate.
+
+Rename the "speculative_store*" functions to a more generic name. No
+functional change.
+
+Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185004.058866968@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/spec-ctrl.h | 6 +++---
+ arch/x86/kernel/cpu/bugs.c | 4 ++--
+ arch/x86/kernel/process.c | 12 ++++++------
+ 3 files changed, 11 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -70,11 +70,11 @@ extern void speculative_store_bypass_ht_
+ static inline void speculative_store_bypass_ht_init(void) { }
+ #endif
+
+-extern void speculative_store_bypass_update(unsigned long tif);
++extern void speculation_ctrl_update(unsigned long tif);
+
+-static inline void speculative_store_bypass_update_current(void)
++static inline void speculation_ctrl_update_current(void)
+ {
+- speculative_store_bypass_update(current_thread_info()->flags);
++ speculation_ctrl_update(current_thread_info()->flags);
+ }
+
+ #endif
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -199,7 +199,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl,
+ tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
+ ssbd_spec_ctrl_to_tif(hostval);
+
+- speculative_store_bypass_update(tif);
++ speculation_ctrl_update(tif);
+ }
+ }
+ EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
+@@ -629,7 +629,7 @@ static int ssb_prctl_set(struct task_str
+ * mitigation until it is next scheduled.
+ */
+ if (task == current && update)
+- speculative_store_bypass_update_current();
++ speculation_ctrl_update_current();
+
+ return 0;
+ }
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -398,27 +398,27 @@ static __always_inline void amd_set_ssb_
+ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
+ }
+
+-static __always_inline void intel_set_ssb_state(unsigned long tifn)
++static __always_inline void spec_ctrl_update_msr(unsigned long tifn)
+ {
+ u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+ }
+
+-static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
++static __always_inline void __speculation_ctrl_update(unsigned long tifn)
+ {
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
+ amd_set_ssb_virt_state(tifn);
+ else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ amd_set_core_ssb_state(tifn);
+ else
+- intel_set_ssb_state(tifn);
++ spec_ctrl_update_msr(tifn);
+ }
+
+-void speculative_store_bypass_update(unsigned long tif)
++void speculation_ctrl_update(unsigned long tif)
+ {
+ preempt_disable();
+- __speculative_store_bypass_update(tif);
++ __speculation_ctrl_update(tif);
+ preempt_enable();
+ }
+
+@@ -455,7 +455,7 @@ void __switch_to_xtra(struct task_struct
+ set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
+
+ if ((tifp ^ tifn) & _TIF_SSBD)
+- __speculative_store_bypass_update(tifn);
++ __speculation_ctrl_update(tifn);
+ }
+
+ /*
--- /dev/null
+Subject: x86/speculation: Reorder the spec_v2 code
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:41 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 15d6b7aab0793b2de8a05d8a828777dd24db424e upstream
+
+Reorder the code so it is better grouped. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185004.707122879@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 168 ++++++++++++++++++++++-----------------------
+ 1 file changed, 84 insertions(+), 84 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -123,29 +123,6 @@ void __init check_bugs(void)
+ #endif
+ }
+
+-/* The kernel command line selection */
+-enum spectre_v2_mitigation_cmd {
+- SPECTRE_V2_CMD_NONE,
+- SPECTRE_V2_CMD_AUTO,
+- SPECTRE_V2_CMD_FORCE,
+- SPECTRE_V2_CMD_RETPOLINE,
+- SPECTRE_V2_CMD_RETPOLINE_GENERIC,
+- SPECTRE_V2_CMD_RETPOLINE_AMD,
+-};
+-
+-static const char *spectre_v2_strings[] = {
+- [SPECTRE_V2_NONE] = "Vulnerable",
+- [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
+- [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
+- [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
+-};
+-
+-#undef pr_fmt
+-#define pr_fmt(fmt) "Spectre V2 : " fmt
+-
+-static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+- SPECTRE_V2_NONE;
+-
+ void
+ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
+ {
+@@ -215,6 +192,12 @@ static void x86_amd_ssb_disable(void)
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
++#undef pr_fmt
++#define pr_fmt(fmt) "Spectre V2 : " fmt
++
++static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
++ SPECTRE_V2_NONE;
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+@@ -236,18 +219,6 @@ static inline const char *spectre_v2_mod
+ static inline const char *spectre_v2_module_string(void) { return ""; }
+ #endif
+
+-static void __init spec2_print_if_insecure(const char *reason)
+-{
+- if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+- pr_info("%s selected on command line.\n", reason);
+-}
+-
+-static void __init spec2_print_if_secure(const char *reason)
+-{
+- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+- pr_info("%s selected on command line.\n", reason);
+-}
+-
+ static inline bool match_option(const char *arg, int arglen, const char *opt)
+ {
+ int len = strlen(opt);
+@@ -255,24 +226,53 @@ static inline bool match_option(const ch
+ return len == arglen && !strncmp(arg, opt, len);
+ }
+
++/* The kernel command line selection for spectre v2 */
++enum spectre_v2_mitigation_cmd {
++ SPECTRE_V2_CMD_NONE,
++ SPECTRE_V2_CMD_AUTO,
++ SPECTRE_V2_CMD_FORCE,
++ SPECTRE_V2_CMD_RETPOLINE,
++ SPECTRE_V2_CMD_RETPOLINE_GENERIC,
++ SPECTRE_V2_CMD_RETPOLINE_AMD,
++};
++
++static const char *spectre_v2_strings[] = {
++ [SPECTRE_V2_NONE] = "Vulnerable",
++ [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
++ [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
++ [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
++};
++
+ static const struct {
+ const char *option;
+ enum spectre_v2_mitigation_cmd cmd;
+ bool secure;
+ } mitigation_options[] = {
+- { "off", SPECTRE_V2_CMD_NONE, false },
+- { "on", SPECTRE_V2_CMD_FORCE, true },
+- { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
+- { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
+- { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
+- { "auto", SPECTRE_V2_CMD_AUTO, false },
++ { "off", SPECTRE_V2_CMD_NONE, false },
++ { "on", SPECTRE_V2_CMD_FORCE, true },
++ { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
++ { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
++ { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
++ { "auto", SPECTRE_V2_CMD_AUTO, false },
+ };
+
++static void __init spec2_print_if_insecure(const char *reason)
++{
++ if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
++ pr_info("%s selected on command line.\n", reason);
++}
++
++static void __init spec2_print_if_secure(const char *reason)
++{
++ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
++ pr_info("%s selected on command line.\n", reason);
++}
++
+ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
+ {
++ enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
+ char arg[20];
+ int ret, i;
+- enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
+
+ if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+ return SPECTRE_V2_CMD_NONE;
+@@ -315,48 +315,6 @@ static enum spectre_v2_mitigation_cmd __
+ return cmd;
+ }
+
+-static bool stibp_needed(void)
+-{
+- if (spectre_v2_enabled == SPECTRE_V2_NONE)
+- return false;
+-
+- /* Enhanced IBRS makes using STIBP unnecessary. */
+- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+- return false;
+-
+- if (!boot_cpu_has(X86_FEATURE_STIBP))
+- return false;
+-
+- return true;
+-}
+-
+-static void update_stibp_msr(void *info)
+-{
+- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+-}
+-
+-void arch_smt_update(void)
+-{
+- u64 mask;
+-
+- if (!stibp_needed())
+- return;
+-
+- mutex_lock(&spec_ctrl_mutex);
+-
+- mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
+- if (sched_smt_active())
+- mask |= SPEC_CTRL_STIBP;
+-
+- if (mask != x86_spec_ctrl_base) {
+- pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
+- mask & SPEC_CTRL_STIBP ? "Enabling" : "Disabling");
+- x86_spec_ctrl_base = mask;
+- on_each_cpu(update_stibp_msr, NULL, 1);
+- }
+- mutex_unlock(&spec_ctrl_mutex);
+-}
+-
+ static void __init spectre_v2_select_mitigation(void)
+ {
+ enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -459,6 +417,48 @@ specv2_set_mode:
+ arch_smt_update();
+ }
+
++static bool stibp_needed(void)
++{
++ if (spectre_v2_enabled == SPECTRE_V2_NONE)
++ return false;
++
++ /* Enhanced IBRS makes using STIBP unnecessary. */
++ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
++ return false;
++
++ if (!boot_cpu_has(X86_FEATURE_STIBP))
++ return false;
++
++ return true;
++}
++
++static void update_stibp_msr(void *info)
++{
++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++}
++
++void arch_smt_update(void)
++{
++ u64 mask;
++
++ if (!stibp_needed())
++ return;
++
++ mutex_lock(&spec_ctrl_mutex);
++
++ mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
++ if (sched_smt_active())
++ mask |= SPEC_CTRL_STIBP;
++
++ if (mask != x86_spec_ctrl_base) {
++ pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
++ mask & SPEC_CTRL_STIBP ? "Enabling" : "Disabling");
++ x86_spec_ctrl_base = mask;
++ on_each_cpu(update_stibp_msr, NULL, 1);
++ }
++ mutex_unlock(&spec_ctrl_mutex);
++}
++
+ #undef pr_fmt
+ #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
+
--- /dev/null
+Subject: x86/speculation: Reorganize speculation control MSRs update
+From: Tim Chen tim.c.chen@linux.intel.com
+Date: Sun Nov 25 19:33:35 2018 +0100
+
+From: Tim Chen tim.c.chen@linux.intel.com
+
+commit 01daf56875ee0cd50ed496a09b20eb369b45dfa5 upstream
+
+The logic to detect whether there's a change in the previous and next
+task's flag relevant to update speculation control MSRs is spread out
+across multiple functions.
+
+Consolidate all checks needed for updating speculation control MSRs into
+the new __speculation_ctrl_update() helper function.
+
+This makes it easy to pick the right speculation control MSR and the bits
+in MSR_IA32_SPEC_CTRL that need updating based on TIF flags changes.
+
+Originally-by: Thomas Lendacky <Thomas.Lendacky@amd.com>
+Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185004.151077005@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/process.c | 42 +++++++++++++++++++++++++++---------------
+ 1 file changed, 27 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -398,27 +398,40 @@ static __always_inline void amd_set_ssb_
+ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
+ }
+
+-static __always_inline void spec_ctrl_update_msr(unsigned long tifn)
++/*
++ * Update the MSRs managing speculation control, during context switch.
++ *
++ * tifp: Previous task's thread flags
++ * tifn: Next task's thread flags
++ */
++static __always_inline void __speculation_ctrl_update(unsigned long tifp,
++ unsigned long tifn)
+ {
+- u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
++ u64 msr = x86_spec_ctrl_base;
++ bool updmsr = false;
+
+- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+-}
++ /* If TIF_SSBD is different, select the proper mitigation method */
++ if ((tifp ^ tifn) & _TIF_SSBD) {
++ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
++ amd_set_ssb_virt_state(tifn);
++ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
++ amd_set_core_ssb_state(tifn);
++ } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
++ static_cpu_has(X86_FEATURE_AMD_SSBD)) {
++ msr |= ssbd_tif_to_spec_ctrl(tifn);
++ updmsr = true;
++ }
++ }
+
+-static __always_inline void __speculation_ctrl_update(unsigned long tifn)
+-{
+- if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
+- amd_set_ssb_virt_state(tifn);
+- else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+- amd_set_core_ssb_state(tifn);
+- else
+- spec_ctrl_update_msr(tifn);
++ if (updmsr)
++ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+ }
+
+ void speculation_ctrl_update(unsigned long tif)
+ {
++ /* Forced update. Make sure all relevant TIF flags are different */
+ preempt_disable();
+- __speculation_ctrl_update(tif);
++ __speculation_ctrl_update(~tif, tif);
+ preempt_enable();
+ }
+
+@@ -454,8 +467,7 @@ void __switch_to_xtra(struct task_struct
+ if ((tifp ^ tifn) & _TIF_NOCPUID)
+ set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
+
+- if ((tifp ^ tifn) & _TIF_SSBD)
+- __speculation_ctrl_update(tifn);
++ __speculation_ctrl_update(tifp, tifn);
+ }
+
+ /*
--- /dev/null
+Subject: x86/speculation: Rework SMT state change
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:39 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit a74cfffb03b73d41e08f84c2e5c87dec0ce3db9f upstream
+
+arch_smt_update() is only called when the sysfs SMT control knob is
+changed. This means that when SMT is enabled in the sysfs control knob the
+system is considered to have SMT active even if all siblings are offline.
+
+To allow finegrained control of the speculation mitigations, the actual SMT
+state is more interesting than the fact that siblings could be enabled.
+
+Rework the code, so arch_smt_update() is invoked from each individual CPU
+hotplug function, and simplify the update function while at it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185004.521974984@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 11 +++++------
+ include/linux/sched/smt.h | 2 ++
+ kernel/cpu.c | 15 +++++++++------
+ 3 files changed, 16 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -14,6 +14,7 @@
+ #include <linux/module.h>
+ #include <linux/nospec.h>
+ #include <linux/prctl.h>
++#include <linux/sched/smt.h>
+
+ #include <asm/spec-ctrl.h>
+ #include <asm/cmdline.h>
+@@ -342,16 +343,14 @@ void arch_smt_update(void)
+ return;
+
+ mutex_lock(&spec_ctrl_mutex);
+- mask = x86_spec_ctrl_base;
+- if (cpu_smt_control == CPU_SMT_ENABLED)
++
++ mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
++ if (sched_smt_active())
+ mask |= SPEC_CTRL_STIBP;
+- else
+- mask &= ~SPEC_CTRL_STIBP;
+
+ if (mask != x86_spec_ctrl_base) {
+ pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
+- cpu_smt_control == CPU_SMT_ENABLED ?
+- "Enabling" : "Disabling");
++ mask & SPEC_CTRL_STIBP ? "Enabling" : "Disabling");
+ x86_spec_ctrl_base = mask;
+ on_each_cpu(update_stibp_msr, NULL, 1);
+ }
+--- a/include/linux/sched/smt.h
++++ b/include/linux/sched/smt.h
+@@ -15,4 +15,6 @@ static __always_inline bool sched_smt_ac
+ static inline bool sched_smt_active(void) { return false; }
+ #endif
+
++void arch_smt_update(void);
++
+ #endif
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -10,6 +10,7 @@
+ #include <linux/sched/signal.h>
+ #include <linux/sched/hotplug.h>
+ #include <linux/sched/task.h>
++#include <linux/sched/smt.h>
+ #include <linux/unistd.h>
+ #include <linux/cpu.h>
+ #include <linux/oom.h>
+@@ -347,6 +348,12 @@ void cpu_hotplug_enable(void)
+ EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
+ #endif /* CONFIG_HOTPLUG_CPU */
+
++/*
++ * Architectures that need SMT-specific errata handling during SMT hotplug
++ * should override this.
++ */
++void __weak arch_smt_update(void) { }
++
+ #ifdef CONFIG_HOTPLUG_SMT
+ enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+ EXPORT_SYMBOL_GPL(cpu_smt_control);
+@@ -998,6 +1005,7 @@ out:
+ * concurrent CPU hotplug via cpu_add_remove_lock.
+ */
+ lockup_detector_cleanup();
++ arch_smt_update();
+ return ret;
+ }
+
+@@ -1126,6 +1134,7 @@ static int _cpu_up(unsigned int cpu, int
+ ret = cpuhp_up_callbacks(cpu, st, target);
+ out:
+ cpus_write_unlock();
++ arch_smt_update();
+ return ret;
+ }
+
+@@ -2045,12 +2054,6 @@ static void cpuhp_online_cpu_device(unsi
+ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+ }
+
+-/*
+- * Architectures that need SMT-specific errata handling during SMT hotplug
+- * should override this.
+- */
+-void __weak arch_smt_update(void) { };
+-
+ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+ {
+ int cpu, ret = 0;
--- /dev/null
+Subject: x86/speculation: Split out TIF update
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:51 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit e6da8bb6f9abb2628381904b24163c770e630bac upstream
+
+The update of the TIF_SSBD flag and the conditional speculation control MSR
+update is done in the ssb_prctl_set() function directly. The upcoming prctl
+support for controlling indirect branch speculation via STIBP needs the
+same mechanism.
+
+Split the code out and make it reusable. Reword the comment about updates
+for other tasks.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185005.652305076@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 35 +++++++++++++++++++++++------------
+ 1 file changed, 23 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -699,10 +699,29 @@ static void ssb_select_mitigation(void)
+ #undef pr_fmt
+ #define pr_fmt(fmt) "Speculation prctl: " fmt
+
+-static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
++static void task_update_spec_tif(struct task_struct *tsk, int tifbit, bool on)
+ {
+ bool update;
+
++ if (on)
++ update = !test_and_set_tsk_thread_flag(tsk, tifbit);
++ else
++ update = test_and_clear_tsk_thread_flag(tsk, tifbit);
++
++ /*
++ * Immediately update the speculation control MSRs for the current
++ * task, but for a non-current task delay setting the CPU
++ * mitigation until it is scheduled next.
++ *
++ * This can only happen for SECCOMP mitigation. For PRCTL it's
++ * always the current task.
++ */
++ if (tsk == current && update)
++ speculation_ctrl_update_current();
++}
++
++static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
++{
+ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
+ ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
+ return -ENXIO;
+@@ -713,28 +732,20 @@ static int ssb_prctl_set(struct task_str
+ if (task_spec_ssb_force_disable(task))
+ return -EPERM;
+ task_clear_spec_ssb_disable(task);
+- update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
++ task_update_spec_tif(task, TIF_SSBD, false);
+ break;
+ case PR_SPEC_DISABLE:
+ task_set_spec_ssb_disable(task);
+- update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
++ task_update_spec_tif(task, TIF_SSBD, true);
+ break;
+ case PR_SPEC_FORCE_DISABLE:
+ task_set_spec_ssb_disable(task);
+ task_set_spec_ssb_force_disable(task);
+- update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
++ task_update_spec_tif(task, TIF_SSBD, true);
+ break;
+ default:
+ return -ERANGE;
+ }
+-
+- /*
+- * If being set on non-current task, delay setting the CPU
+- * mitigation until it is next scheduled.
+- */
+- if (task == current && update)
+- speculation_ctrl_update_current();
+-
+ return 0;
+ }
+
--- /dev/null
+Subject: x86/speculation: Unify conditional spectre v2 print functions
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sun Nov 25 19:33:44 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit 495d470e9828500e0155027f230449ac5e29c025 upstream
+
+There is no point in having two functions and a conditional at the call
+site.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185004.986890749@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 17 ++++-------------
+ 1 file changed, 4 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -256,15 +256,9 @@ static const struct {
+ { "auto", SPECTRE_V2_CMD_AUTO, false },
+ };
+
+-static void __init spec2_print_if_insecure(const char *reason)
++static void __init spec_v2_print_cond(const char *reason, bool secure)
+ {
+- if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+- pr_info("%s selected on command line.\n", reason);
+-}
+-
+-static void __init spec2_print_if_secure(const char *reason)
+-{
+- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
++ if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
+ pr_info("%s selected on command line.\n", reason);
+ }
+
+@@ -307,11 +301,8 @@ static enum spectre_v2_mitigation_cmd __
+ return SPECTRE_V2_CMD_AUTO;
+ }
+
+- if (mitigation_options[i].secure)
+- spec2_print_if_secure(mitigation_options[i].option);
+- else
+- spec2_print_if_insecure(mitigation_options[i].option);
+-
++ spec_v2_print_cond(mitigation_options[i].option,
++ mitigation_options[i].secure);
+ return cmd;
+ }
+
--- /dev/null
+Subject: x86/speculation: Update the TIF_SSBD comment
+From: Tim Chen tim.c.chen@linux.intel.com
+Date: Sun Nov 25 19:33:29 2018 +0100
+
+From: Tim Chen tim.c.chen@linux.intel.com
+
+commit 8eb729b77faf83ac4c1f363a9ad68d042415f24c upstream
+
+"Reduced Data Speculation" is an obsolete term. The correct new name is
+"Speculative store bypass disable" - which is abbreviated into SSBD.
+
+Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185003.593893901@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/thread_info.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -81,7 +81,7 @@ struct thread_info {
+ #define TIF_SIGPENDING 2 /* signal pending */
+ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
+-#define TIF_SSBD 5 /* Reduced data speculation */
++#define TIF_SSBD 5 /* Speculative store bypass disable */
+ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */