]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
Merge commit 'its-for-linus-20250509-merge' into x86/core, to resolve conflicts
authorIngo Molnar <mingo@kernel.org>
Tue, 13 May 2025 08:47:10 +0000 (10:47 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 13 May 2025 08:47:10 +0000 (10:47 +0200)
 Conflicts:
Documentation/admin-guide/hw-vuln/index.rst
arch/x86/include/asm/cpufeatures.h
arch/x86/kernel/alternative.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
drivers/base/cpu.c
include/linux/cpu.h

Signed-off-by: Ingo Molnar <mingo@kernel.org>
22 files changed:
1  2 
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/admin-guide/hw-vuln/index.rst
arch/x86/Kconfig
arch/x86/include/asm/alternative.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/alternative.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/module.c
arch/x86/kernel/static_call.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kvm/x86.c
arch/x86/lib/retpoline.S
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/net/bpf_jit_comp.c
drivers/base/cpu.c
include/linux/cpu.h
tools/objtool/arch/x86/decode.c

index cf1511145927901d1a0f57ea714119f21d82d382,ce296b8430fc987358329d7697cd1990e4d10892..09890a8f3ee906f891866949b710fb2d8404d139
@@@ -23,4 -23,4 +23,5 @@@ are configurable at compile, boot or ru
     gather_data_sampling
     reg-file-data-sampling
     rsb
 +   old_microcode
+    indirect-target-selection
Simple merge
Simple merge
index 7642310276a8fdf905261534262d2ca8078a2d6c,39e61212ac9a916dbb2ceb1b0b95e8d71303c547..f67a93fc9391dec7d5defd9bb4b1cb622ec222e5
  #define X86_FEATURE_CLEAR_BHB_LOOP    (21*32+ 1) /* Clear branch history at syscall entry using SW loop */
  #define X86_FEATURE_BHI_CTRL          (21*32+ 2) /* BHI_DIS_S HW control available */
  #define X86_FEATURE_CLEAR_BHB_HW      (21*32+ 3) /* BHI_DIS_S HW control enabled */
 -#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
 -#define X86_FEATURE_AMD_FAST_CPPC     (21*32 + 5) /* Fast CPPC */
 -#define X86_FEATURE_AMD_HETEROGENEOUS_CORES (21*32 + 6) /* Heterogeneous Core Topology */
 -#define X86_FEATURE_AMD_WORKLOAD_CLASS        (21*32 + 7) /* Workload Classification */
 -#define X86_FEATURE_PREFER_YMM                (21*32 + 8) /* Avoid ZMM registers due to downclocking */
 -#define X86_FEATURE_INDIRECT_THUNK_ITS        (21*32 + 9) /* Use thunk for indirect branches in lower half of cacheline */
 +#define X86_FEATURE_CLEAR_BHB_VMEXIT  (21*32+ 4) /* Clear branch history at vmexit using SW loop */
 +#define X86_FEATURE_AMD_FAST_CPPC     (21*32+ 5) /* Fast CPPC */
 +#define X86_FEATURE_AMD_HTR_CORES     (21*32+ 6) /* Heterogeneous Core Topology */
 +#define X86_FEATURE_AMD_WORKLOAD_CLASS        (21*32+ 7) /* Workload Classification */
 +#define X86_FEATURE_PREFER_YMM                (21*32+ 8) /* Avoid ZMM registers due to downclocking */
 +#define X86_FEATURE_APX                       (21*32+ 9) /* Advanced Performance Extensions */
++#define X86_FEATURE_INDIRECT_THUNK_ITS        (21*32+10) /* Use thunk for indirect branches in lower half of cacheline */
  
  /*
   * BUG word(s)
  #define X86_BUG_TDX_PW_MCE            X86_BUG(31) /* "tdx_pw_mce" CPU may incur #MC if non-TD software does partial write to TDX private memory */
  
  /* BUG word 2 */
 -#define X86_BUG_SRSO                  X86_BUG(1*32 + 0) /* "srso" AMD SRSO bug */
 -#define X86_BUG_DIV0                  X86_BUG(1*32 + 1) /* "div0" AMD DIV0 speculation bug */
 -#define X86_BUG_RFDS                  X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
 -#define X86_BUG_BHI                   X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */
 -#define X86_BUG_IBPB_NO_RET           X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
 -#define X86_BUG_SPECTRE_V2_USER               X86_BUG(1*32 + 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */
 -#define X86_BUG_ITS                   X86_BUG(1*32 + 6) /* "its" CPU is affected by Indirect Target Selection */
 -#define X86_BUG_ITS_NATIVE_ONLY               X86_BUG(1*32 + 7) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
 +#define X86_BUG_SRSO                  X86_BUG( 1*32+ 0) /* "srso" AMD SRSO bug */
 +#define X86_BUG_DIV0                  X86_BUG( 1*32+ 1) /* "div0" AMD DIV0 speculation bug */
 +#define X86_BUG_RFDS                  X86_BUG( 1*32+ 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
 +#define X86_BUG_BHI                   X86_BUG( 1*32+ 3) /* "bhi" CPU is affected by Branch History Injection */
 +#define X86_BUG_IBPB_NO_RET           X86_BUG( 1*32+ 4) /* "ibpb_no_ret" IBPB omits return target predictions */
 +#define X86_BUG_SPECTRE_V2_USER               X86_BUG( 1*32+ 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */
 +#define X86_BUG_OLD_MICROCODE         X86_BUG( 1*32+ 6) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */
++#define X86_BUG_ITS                   X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */
++#define X86_BUG_ITS_NATIVE_ONLY               X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
 +
  #endif /* _ASM_X86_CPUFEATURES_H */
Simple merge
Simple merge
index ddbc303e41e367afe59c5d159c748c541060a778,48fd04e9011483cfa180197e0bbaed3c46455637..2385528792b213e3f05aa5af6ab99470ecfea2d8
@@@ -1,14 -1,39 +1,17 @@@
  // SPDX-License-Identifier: GPL-2.0-only
  #define pr_fmt(fmt) "SMP alternatives: " fmt
  
 -#include <linux/module.h>
 -#include <linux/sched.h>
 +#include <linux/mmu_context.h>
  #include <linux/perf_event.h>
 -#include <linux/mutex.h>
 -#include <linux/list.h>
 -#include <linux/stringify.h>
 -#include <linux/highmem.h>
 -#include <linux/mm.h>
  #include <linux/vmalloc.h>
  #include <linux/memory.h>
 -#include <linux/stop_machine.h>
 -#include <linux/slab.h>
 -#include <linux/kdebug.h>
 -#include <linux/kprobes.h>
 -#include <linux/mmu_context.h>
 -#include <linux/bsearch.h>
 -#include <linux/sync_core.h>
+ #include <linux/execmem.h>
 +
  #include <asm/text-patching.h>
 -#include <asm/alternative.h>
 -#include <asm/sections.h>
 -#include <asm/mce.h>
 -#include <asm/nmi.h>
 -#include <asm/cacheflush.h>
 -#include <asm/tlbflush.h>
  #include <asm/insn.h>
 -#include <asm/io.h>
 -#include <asm/fixmap.h>
 -#include <asm/paravirt.h>
 -#include <asm/asm-prototypes.h>
 -#include <asm/cfi.h>
+ #include <asm/ibt.h>
+ #include <asm/set_memory.h>
 +#include <asm/nmi.h>
  
  int __read_mostly alternatives_patched;
  
index a938fb4add658b98f9d2847ee962be19b0d9dc69,8596ce85026c0dd8aeb79871350f71a1e2c9b113..47c74c4ae2064c45b7e841428976c989fdadbec1
  
  #include "cpu.h"
  
 +/*
 + * Speculation Vulnerability Handling
 + *
 + * Each vulnerability is handled with the following functions:
 + *   <vuln>_select_mitigation() -- Selects a mitigation to use.  This should
 + *                               take into account all relevant command line
 + *                               options.
 + *   <vuln>_update_mitigation() -- This is called after all vulnerabilities have
 + *                               selected a mitigation, in case the selection
 + *                               may want to change based on other choices
 + *                               made.  This function is optional.
 + *   <vuln>_apply_mitigation() -- Enable the selected mitigation.
 + *
 + * The compile-time mitigation in all cases should be AUTO.  An explicit
 + * command-line option can override AUTO.  If no such option is
 + * provided, <vuln>_select_mitigation() will override AUTO to the best
 + * mitigation option.
 + */
 +
  static void __init spectre_v1_select_mitigation(void);
 +static void __init spectre_v1_apply_mitigation(void);
  static void __init spectre_v2_select_mitigation(void);
 +static void __init spectre_v2_update_mitigation(void);
 +static void __init spectre_v2_apply_mitigation(void);
  static void __init retbleed_select_mitigation(void);
 +static void __init retbleed_update_mitigation(void);
 +static void __init retbleed_apply_mitigation(void);
  static void __init spectre_v2_user_select_mitigation(void);
 +static void __init spectre_v2_user_update_mitigation(void);
 +static void __init spectre_v2_user_apply_mitigation(void);
  static void __init ssb_select_mitigation(void);
 +static void __init ssb_apply_mitigation(void);
  static void __init l1tf_select_mitigation(void);
 +static void __init l1tf_apply_mitigation(void);
  static void __init mds_select_mitigation(void);
 -static void __init md_clear_update_mitigation(void);
 -static void __init md_clear_select_mitigation(void);
 +static void __init mds_update_mitigation(void);
 +static void __init mds_apply_mitigation(void);
  static void __init taa_select_mitigation(void);
 +static void __init taa_update_mitigation(void);
 +static void __init taa_apply_mitigation(void);
  static void __init mmio_select_mitigation(void);
 +static void __init mmio_update_mitigation(void);
 +static void __init mmio_apply_mitigation(void);
 +static void __init rfds_select_mitigation(void);
 +static void __init rfds_update_mitigation(void);
 +static void __init rfds_apply_mitigation(void);
  static void __init srbds_select_mitigation(void);
 +static void __init srbds_apply_mitigation(void);
  static void __init l1d_flush_select_mitigation(void);
  static void __init srso_select_mitigation(void);
 +static void __init srso_update_mitigation(void);
 +static void __init srso_apply_mitigation(void);
  static void __init gds_select_mitigation(void);
 +static void __init gds_apply_mitigation(void);
 +static void __init bhi_select_mitigation(void);
 +static void __init bhi_update_mitigation(void);
 +static void __init bhi_apply_mitigation(void);
+ static void __init its_select_mitigation(void);
  
  /* The base value of the SPEC_CTRL MSR without task-specific bits set */
  u64 x86_spec_ctrl_base;
@@@ -204,56 -172,22 +213,57 @@@ void __init cpu_select_mitigations(void
        spectre_v2_user_select_mitigation();
        ssb_select_mitigation();
        l1tf_select_mitigation();
 -      md_clear_select_mitigation();
 +      mds_select_mitigation();
 +      taa_select_mitigation();
 +      mmio_select_mitigation();
 +      rfds_select_mitigation();
        srbds_select_mitigation();
        l1d_flush_select_mitigation();
 -
 -      /*
 -       * srso_select_mitigation() depends and must run after
 -       * retbleed_select_mitigation().
 -       */
        srso_select_mitigation();
        gds_select_mitigation();
+       its_select_mitigation();
 +      bhi_select_mitigation();
 +
 +      /*
 +       * After mitigations are selected, some may need to update their
 +       * choices.
 +       */
 +      spectre_v2_update_mitigation();
 +      /*
 +       * retbleed_update_mitigation() relies on the state set by
 +       * spectre_v2_update_mitigation(); specifically it wants to know about
 +       * spectre_v2=ibrs.
 +       */
 +      retbleed_update_mitigation();
 +
 +      /*
 +       * spectre_v2_user_update_mitigation() depends on
 +       * retbleed_update_mitigation(), specifically the STIBP
 +       * selection is forced for UNRET or IBPB.
 +       */
 +      spectre_v2_user_update_mitigation();
 +      mds_update_mitigation();
 +      taa_update_mitigation();
 +      mmio_update_mitigation();
 +      rfds_update_mitigation();
 +      bhi_update_mitigation();
 +      /* srso_update_mitigation() depends on retbleed_update_mitigation(). */
 +      srso_update_mitigation();
 +
 +      spectre_v1_apply_mitigation();
 +      spectre_v2_apply_mitigation();
 +      retbleed_apply_mitigation();
 +      spectre_v2_user_apply_mitigation();
 +      ssb_apply_mitigation();
 +      l1tf_apply_mitigation();
 +      mds_apply_mitigation();
 +      taa_apply_mitigation();
 +      mmio_apply_mitigation();
 +      rfds_apply_mitigation();
 +      srbds_apply_mitigation();
 +      srso_apply_mitigation();
 +      gds_apply_mitigation();
 +      bhi_apply_mitigation();
  }
  
  /*
@@@ -1323,8 -1173,169 +1333,147 @@@ static void __init retbleed_apply_mitig
        if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
            (retbleed_nosmt || cpu_mitigations_auto_nosmt()))
                cpu_smt_disable(false);
 -
 -      /*
 -       * Let IBRS trump all on Intel without affecting the effects of the
 -       * retbleed= cmdline option except for call depth based stuffing
 -       */
 -      if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
 -              switch (spectre_v2_enabled) {
 -              case SPECTRE_V2_IBRS:
 -                      retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
 -                      break;
 -              case SPECTRE_V2_EIBRS:
 -              case SPECTRE_V2_EIBRS_RETPOLINE:
 -              case SPECTRE_V2_EIBRS_LFENCE:
 -                      retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
 -                      break;
 -              default:
 -                      if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
 -                              pr_err(RETBLEED_INTEL_MSG);
 -              }
 -      }
 -
 -      pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
  }
  
+ #undef pr_fmt
+ #define pr_fmt(fmt)     "ITS: " fmt
+ enum its_mitigation_cmd {
+       ITS_CMD_OFF,
+       ITS_CMD_ON,
+       ITS_CMD_VMEXIT,
+       ITS_CMD_RSB_STUFF,
+ };
+ enum its_mitigation {
+       ITS_MITIGATION_OFF,
+       ITS_MITIGATION_VMEXIT_ONLY,
+       ITS_MITIGATION_ALIGNED_THUNKS,
+       ITS_MITIGATION_RETPOLINE_STUFF,
+ };
+ static const char * const its_strings[] = {
+       [ITS_MITIGATION_OFF]                    = "Vulnerable",
+       [ITS_MITIGATION_VMEXIT_ONLY]            = "Mitigation: Vulnerable, KVM: Not affected",
+       [ITS_MITIGATION_ALIGNED_THUNKS]         = "Mitigation: Aligned branch/return thunks",
+       [ITS_MITIGATION_RETPOLINE_STUFF]        = "Mitigation: Retpolines, Stuffing RSB",
+ };
+ static enum its_mitigation its_mitigation __ro_after_init = ITS_MITIGATION_ALIGNED_THUNKS;
+ static enum its_mitigation_cmd its_cmd __ro_after_init =
+       IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_CMD_ON : ITS_CMD_OFF;
+ static int __init its_parse_cmdline(char *str)
+ {
+       if (!str)
+               return -EINVAL;
+       if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
+               pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
+               return 0;
+       }
+       if (!strcmp(str, "off")) {
+               its_cmd = ITS_CMD_OFF;
+       } else if (!strcmp(str, "on")) {
+               its_cmd = ITS_CMD_ON;
+       } else if (!strcmp(str, "force")) {
+               its_cmd = ITS_CMD_ON;
+               setup_force_cpu_bug(X86_BUG_ITS);
+       } else if (!strcmp(str, "vmexit")) {
+               its_cmd = ITS_CMD_VMEXIT;
+       } else if (!strcmp(str, "stuff")) {
+               its_cmd = ITS_CMD_RSB_STUFF;
+       } else {
+               pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
+       }
+       return 0;
+ }
+ early_param("indirect_target_selection", its_parse_cmdline);
+ static void __init its_select_mitigation(void)
+ {
+       enum its_mitigation_cmd cmd = its_cmd;
+       if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) {
+               its_mitigation = ITS_MITIGATION_OFF;
+               return;
+       }
+       /* Retpoline+CDT mitigates ITS, bail out */
+       if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+           boot_cpu_has(X86_FEATURE_CALL_DEPTH)) {
+               its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
+               goto out;
+       }
+       /* Exit early to avoid irrelevant warnings */
+       if (cmd == ITS_CMD_OFF) {
+               its_mitigation = ITS_MITIGATION_OFF;
+               goto out;
+       }
+       if (spectre_v2_enabled == SPECTRE_V2_NONE) {
+               pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
+               its_mitigation = ITS_MITIGATION_OFF;
+               goto out;
+       }
+       if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
+           !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
+               pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
+               its_mitigation = ITS_MITIGATION_OFF;
+               goto out;
+       }
+       if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
+               pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
+               its_mitigation = ITS_MITIGATION_OFF;
+               goto out;
+       }
+       if (boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
+               pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
+               its_mitigation = ITS_MITIGATION_OFF;
+               goto out;
+       }
+       if (cmd == ITS_CMD_RSB_STUFF &&
+           (!boot_cpu_has(X86_FEATURE_RETPOLINE) || !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING))) {
+               pr_err("RSB stuff mitigation not supported, using default\n");
+               cmd = ITS_CMD_ON;
+       }
+       switch (cmd) {
+       case ITS_CMD_OFF:
+               its_mitigation = ITS_MITIGATION_OFF;
+               break;
+       case ITS_CMD_VMEXIT:
+               if (boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) {
+                       its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
+                       goto out;
+               }
+               fallthrough;
+       case ITS_CMD_ON:
+               its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
+               if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
+                       setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
+               setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+               set_return_thunk(its_return_thunk);
+               break;
+       case ITS_CMD_RSB_STUFF:
+               its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
+               setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+               setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
+               set_return_thunk(call_depth_return_thunk);
+               if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) {
+                       retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
+                       pr_info("Retbleed mitigation updated to stuffing\n");
+               }
+               break;
+       }
+ out:
+       pr_info("%s\n", its_strings[its_mitigation]);
+ }
  #undef pr_fmt
  #define pr_fmt(fmt)     "Spectre V2 : " fmt
  
@@@ -2806,52 -2833,8 +2955,52 @@@ static void __init srso_apply_mitigatio
        if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
                setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
  
 -      if (srso_mitigation != SRSO_MITIGATION_NONE)
 -              pr_info("%s\n", srso_strings[srso_mitigation]);
 +      if (srso_mitigation == SRSO_MITIGATION_NONE) {
 +              if (boot_cpu_has(X86_FEATURE_SBPB))
 +                      x86_pred_cmd = PRED_CMD_SBPB;
 +              return;
 +      }
 +
 +      switch (srso_mitigation) {
 +      case SRSO_MITIGATION_SAFE_RET:
 +      case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
 +              /*
 +               * Enable the return thunk for generated code
 +               * like ftrace, static_call, etc.
 +               */
 +              setup_force_cpu_cap(X86_FEATURE_RETHUNK);
 +              setup_force_cpu_cap(X86_FEATURE_UNRET);
 +
 +              if (boot_cpu_data.x86 == 0x19) {
 +                      setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
-                       x86_return_thunk = srso_alias_return_thunk;
++                              set_return_thunk(srso_alias_return_thunk);
 +              } else {
 +                      setup_force_cpu_cap(X86_FEATURE_SRSO);
-                       x86_return_thunk = srso_return_thunk;
++                              set_return_thunk(srso_return_thunk);
 +              }
 +              break;
 +      case SRSO_MITIGATION_IBPB:
 +              setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
 +              /*
 +               * IBPB on entry already obviates the need for
 +               * software-based untraining so clear those in case some
 +               * other mitigation like Retbleed has selected them.
 +               */
 +              setup_clear_cpu_cap(X86_FEATURE_UNRET);
 +              setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
 +              fallthrough;
 +      case SRSO_MITIGATION_IBPB_ON_VMEXIT:
 +              setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
 +              /*
 +               * There is no need for RSB filling: entry_ibpb() ensures
 +               * all predictions, including the RSB, are invalidated,
 +               * regardless of IBPB implementation.
 +               */
 +              setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
 +              break;
 +      default:
 +              break;
 +      }
  }
  
  #undef pr_fmt
@@@ -2963,14 -2949,11 +3112,19 @@@ static ssize_t rfds_show_state(char *bu
        return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
  }
  
 +static ssize_t old_microcode_show_state(char *buf)
 +{
 +      if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 +              return sysfs_emit(buf, "Unknown: running under hypervisor");
 +
 +      return sysfs_emit(buf, "Vulnerable\n");
 +}
 +
+ static ssize_t its_show_state(char *buf)
+ {
+       return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
+ }
  static char *stibp_state(void)
  {
        if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
@@@ -3152,9 -3136,9 +3306,12 @@@ static ssize_t cpu_show_common(struct d
        case X86_BUG_RFDS:
                return rfds_show_state(buf);
  
 +      case X86_BUG_OLD_MICROCODE:
 +              return old_microcode_show_state(buf);
 +
+       case X86_BUG_ITS:
+               return its_show_state(buf);
        default:
                break;
        }
@@@ -3232,10 -3219,10 +3389,15 @@@ ssize_t cpu_show_reg_file_data_sampling
        return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
  }
  
 +ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf)
 +{
 +      return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE);
 +}
++
+ ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+       return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
+ }
  #endif
  
  void __warn_thunk(void)
index 34efb9d2519a91bbda6f66478df7aa8c6def6a37,0ff057ff11ce93acc08ffbccec2b9b9e8b42a3b8..114aaaf6ae8a36c3b14c44d169d8661045085637
@@@ -1320,42 -1325,32 +1327,68 @@@ static bool __init vulnerable_to_rfds(u
        return cpu_matches(cpu_vuln_blacklist, RFDS);
  }
  
+ static bool __init vulnerable_to_its(u64 x86_arch_cap_msr)
+ {
+       /* The "immunity" bit trumps everything else: */
+       if (x86_arch_cap_msr & ARCH_CAP_ITS_NO)
+               return false;
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return false;
+       /* None of the affected CPUs have BHI_CTRL */
+       if (boot_cpu_has(X86_FEATURE_BHI_CTRL))
+               return false;
+       /*
+        * If a VMM did not expose ITS_NO, assume that a guest could
+        * be running on a vulnerable hardware or may migrate to such
+        * hardware.
+        */
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+               return true;
+       if (cpu_matches(cpu_vuln_blacklist, ITS))
+               return true;
+       return false;
+ }
 +static struct x86_cpu_id cpu_latest_microcode[] = {
 +#include "microcode/intel-ucode-defs.h"
 +      {}
 +};
 +
 +static bool __init cpu_has_old_microcode(void)
 +{
 +      const struct x86_cpu_id *m = x86_match_cpu(cpu_latest_microcode);
 +
 +      /* Give unknown CPUs a pass: */
 +      if (!m) {
 +              /* Intel CPUs should be in the list. Warn if not: */
 +              if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
 +                      pr_info("x86/CPU: Model not found in latest microcode list\n");
 +              return false;
 +      }
 +
 +      /*
 +       * Hosts usually lie to guests with a super high microcode
 +       * version. Just ignore what hosts tell guests:
 +       */
 +      if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 +              return false;
 +
 +      /* Consider all debug microcode to be old: */
 +      if (boot_cpu_data.microcode & BIT(31))
 +              return true;
 +
 +      /* Give new microcode a pass: */
 +      if (boot_cpu_data.microcode >= m->driver_data)
 +              return false;
 +
 +      /* Uh oh, too old: */
 +      return true;
 +}
 +
  static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
  {
        u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 1c4359366cd7602974c1b74519e3cca579af246a,50651435577c8f52fedd86e0b6c9edec84c2545d..7779ab0ca7ce62b92e61f65e368fdb7b75e0384b
@@@ -600,7 -600,7 +600,8 @@@ CPU_SHOW_VULN_FALLBACK(spec_rstack_over
  CPU_SHOW_VULN_FALLBACK(gds);
  CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
  CPU_SHOW_VULN_FALLBACK(ghostwrite);
 +CPU_SHOW_VULN_FALLBACK(old_microcode);
+ CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
  
  static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
  static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@@ -617,7 -617,7 +618,8 @@@ static DEVICE_ATTR(spec_rstack_overflow
  static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
  static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
  static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
 +static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL);
+ static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
  
  static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_meltdown.attr,
        &dev_attr_gather_data_sampling.attr,
        &dev_attr_reg_file_data_sampling.attr,
        &dev_attr_ghostwrite.attr,
 +      &dev_attr_old_microcode.attr,
+       &dev_attr_indirect_target_selection.attr,
        NULL
  };
  
index 1f5cfc4cc04f5273f063da15b2ce710b16c5b8a5,3aa955102b349a97d9f5776799dc57e74299cff0..e6089abc28e2c825e48984a15bd748b477670760
@@@ -78,8 -78,8 +78,10 @@@ extern ssize_t cpu_show_gds(struct devi
  extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
                                               struct device_attribute *attr, char *buf);
  extern ssize_t cpu_show_ghostwrite(struct device *dev, struct device_attribute *attr, char *buf);
 +extern ssize_t cpu_show_old_microcode(struct device *dev,
 +                                    struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
+                                                 struct device_attribute *attr, char *buf);
  
  extern __printf(4, 5)
  struct device *cpu_device_create(struct device *parent, void *drvdata,
Simple merge