--- /dev/null
+From a546842de80a1da8c10cf58fa4428cf96451b1cb Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Thu, 9 Dec 2021 15:13:24 +0000
+Subject: arm64: bpf: Add BHB mitigation to the epilogue for cBPF programs
+
+From: James Morse <james.morse@arm.com>
+
+commit 0dfefc2ea2f29ced2416017d7e5b1253a54c2735 upstream.
+
+A malicious BPF program may manipulate the branch history to influence
+what the hardware speculates will happen next.
+
+On exit from a BPF program, emit the BHB mititgation sequence.
+
+This is only applied for 'classic' cBPF programs that are loaded by
+seccomp.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/spectre.h | 1
+ arch/arm64/kernel/proton-pack.c | 2 -
+ arch/arm64/net/bpf_jit_comp.c | 54 ++++++++++++++++++++++++++++++++++++---
+ 3 files changed, 52 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/include/asm/spectre.h
++++ b/arch/arm64/include/asm/spectre.h
+@@ -97,6 +97,7 @@ enum mitigation_state arm64_get_meltdown
+
+ enum mitigation_state arm64_get_spectre_bhb_state(void);
+ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
++extern bool __nospectre_bhb;
+ u8 get_spectre_bhb_loop_value(void);
+ bool is_spectre_bhb_fw_mitigated(void);
+ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -1020,7 +1020,7 @@ static void this_cpu_set_vectors(enum ar
+ isb();
+ }
+
+-static bool __read_mostly __nospectre_bhb;
++bool __read_mostly __nospectre_bhb;
+ static int __init parse_spectre_bhb_param(char *str)
+ {
+ __nospectre_bhb = true;
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -7,6 +7,7 @@
+
+ #define pr_fmt(fmt) "bpf_jit: " fmt
+
++#include <linux/arm-smccc.h>
+ #include <linux/bitfield.h>
+ #include <linux/bpf.h>
+ #include <linux/filter.h>
+@@ -17,6 +18,7 @@
+ #include <asm/asm-extable.h>
+ #include <asm/byteorder.h>
+ #include <asm/cacheflush.h>
++#include <asm/cpufeature.h>
+ #include <asm/debug-monitors.h>
+ #include <asm/insn.h>
+ #include <asm/patching.h>
+@@ -857,7 +859,48 @@ static void build_plt(struct jit_ctx *ct
+ plt->target = (u64)&dummy_tramp;
+ }
+
+-static void build_epilogue(struct jit_ctx *ctx)
++/* Clobbers BPF registers 1-4, aka x0-x3 */
++static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx)
++{
++ const u8 r1 = bpf2a64[BPF_REG_1]; /* aka x0 */
++ u8 k = get_spectre_bhb_loop_value();
++
++ if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
++ cpu_mitigations_off() || __nospectre_bhb ||
++ arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE)
++ return;
++
++ if (supports_clearbhb(SCOPE_SYSTEM)) {
++ emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx);
++ return;
++ }
++
++ if (k) {
++ emit_a64_mov_i64(r1, k, ctx);
++ emit(A64_B(1), ctx);
++ emit(A64_SUBS_I(true, r1, r1, 1), ctx);
++ emit(A64_B_(A64_COND_NE, -2), ctx);
++ emit(aarch64_insn_gen_dsb(AARCH64_INSN_MB_ISH), ctx);
++ emit(aarch64_insn_get_isb_value(), ctx);
++ }
++
++ if (is_spectre_bhb_fw_mitigated()) {
++ emit(A64_ORR_I(false, r1, AARCH64_INSN_REG_ZR,
++ ARM_SMCCC_ARCH_WORKAROUND_3), ctx);
++ switch (arm_smccc_1_1_get_conduit()) {
++ case SMCCC_CONDUIT_HVC:
++ emit(aarch64_insn_get_hvc_value(), ctx);
++ break;
++ case SMCCC_CONDUIT_SMC:
++ emit(aarch64_insn_get_smc_value(), ctx);
++ break;
++ default:
++ pr_err_once("Firmware mitigation enabled with unknown conduit\n");
++ }
++ }
++}
++
++static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
+ {
+ const u8 r0 = bpf2a64[BPF_REG_0];
+ const u8 ptr = bpf2a64[TCCNT_PTR];
+@@ -870,10 +913,13 @@ static void build_epilogue(struct jit_ct
+
+ emit(A64_POP(A64_ZR, ptr, A64_SP), ctx);
+
++ if (was_classic)
++ build_bhb_mitigation(ctx);
++
+ /* Restore FP/LR registers */
+ emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
+
+- /* Set return value */
++ /* Move the return value from bpf:r0 (aka x7) to x0 */
+ emit(A64_MOV(1, A64_R(0), r0), ctx);
+
+ /* Authenticate lr */
+@@ -1817,7 +1863,7 @@ struct bpf_prog *bpf_int_jit_compile(str
+ }
+
+ ctx.epilogue_offset = ctx.idx;
+- build_epilogue(&ctx);
++ build_epilogue(&ctx, was_classic);
+ build_plt(&ctx);
+
+ extable_align = __alignof__(struct exception_table_entry);
+@@ -1880,7 +1926,7 @@ skip_init_ctx:
+ goto out_free_hdr;
+ }
+
+- build_epilogue(&ctx);
++ build_epilogue(&ctx, was_classic);
+ build_plt(&ctx);
+
+ /* Extra pass to validate JITed code. */
--- /dev/null
+From 06d9eb1c5b3a9d5aea9f4946f5c83abcb33fdf89 Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Tue, 29 Apr 2025 16:03:38 +0100
+Subject: arm64: bpf: Only mitigate cBPF programs loaded by unprivileged users
+
+From: James Morse <james.morse@arm.com>
+
+commit f300769ead032513a68e4a02e806393402e626f8 upstream.
+
+Support for eBPF programs loaded by unprivileged users is typically
+disabled. This means only cBPF programs need to be mitigated for BHB.
+
+In addition, only mitigate cBPF programs that were loaded by an
+unprivileged user. Privileged users can also load the same program
+via eBPF, making the mitigation pointless.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/net/bpf_jit_comp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -870,6 +870,9 @@ static void __maybe_unused build_bhb_mit
+ arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE)
+ return;
+
++ if (capable(CAP_SYS_ADMIN))
++ return;
++
+ if (supports_clearbhb(SCOPE_SYSTEM)) {
+ emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx);
+ return;
--- /dev/null
+From c30190cddb6491733461d32f3db3b05b4010c8a4 Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Thu, 9 Dec 2021 15:12:19 +0000
+Subject: arm64: insn: Add support for encoding DSB
+
+From: James Morse <james.morse@arm.com>
+
+commit 63de8abd97ddb9b758bd8f915ecbd18e1f1a87a0 upstream.
+
+To generate code in the eBPF epilogue that uses the DSB instruction,
+insn.c needs a heler to encode the type and domain.
+
+Re-use the crm encoding logic from the DMB instruction.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/insn.h | 1
+ arch/arm64/lib/insn.c | 60 +++++++++++++++++++++++++-----------------
+ 2 files changed, 38 insertions(+), 23 deletions(-)
+
+--- a/arch/arm64/include/asm/insn.h
++++ b/arch/arm64/include/asm/insn.h
+@@ -693,6 +693,7 @@ u32 aarch64_insn_gen_cas(enum aarch64_in
+ }
+ #endif
+ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
++u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type);
+ u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
+ enum aarch64_insn_system_register sysreg);
+
+--- a/arch/arm64/lib/insn.c
++++ b/arch/arm64/lib/insn.c
+@@ -5,6 +5,7 @@
+ *
+ * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
+ */
++#include <linux/bitfield.h>
+ #include <linux/bitops.h>
+ #include <linux/bug.h>
+ #include <linux/printk.h>
+@@ -1471,48 +1472,61 @@ u32 aarch64_insn_gen_extr(enum aarch64_i
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
+ }
+
+-u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
++static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type)
+ {
+- u32 opt;
+- u32 insn;
+-
+ switch (type) {
+ case AARCH64_INSN_MB_SY:
+- opt = 0xf;
+- break;
++ return 0xf;
+ case AARCH64_INSN_MB_ST:
+- opt = 0xe;
+- break;
++ return 0xe;
+ case AARCH64_INSN_MB_LD:
+- opt = 0xd;
+- break;
++ return 0xd;
+ case AARCH64_INSN_MB_ISH:
+- opt = 0xb;
+- break;
++ return 0xb;
+ case AARCH64_INSN_MB_ISHST:
+- opt = 0xa;
+- break;
++ return 0xa;
+ case AARCH64_INSN_MB_ISHLD:
+- opt = 0x9;
+- break;
++ return 0x9;
+ case AARCH64_INSN_MB_NSH:
+- opt = 0x7;
+- break;
++ return 0x7;
+ case AARCH64_INSN_MB_NSHST:
+- opt = 0x6;
+- break;
++ return 0x6;
+ case AARCH64_INSN_MB_NSHLD:
+- opt = 0x5;
+- break;
++ return 0x5;
+ default:
+- pr_err("%s: unknown dmb type %d\n", __func__, type);
++ pr_err("%s: unknown barrier type %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
++}
++
++u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
++{
++ u32 opt;
++ u32 insn;
++
++ opt = __get_barrier_crm_val(type);
++ if (opt == AARCH64_BREAK_FAULT)
++ return AARCH64_BREAK_FAULT;
+
+ insn = aarch64_insn_get_dmb_value();
+ insn &= ~GENMASK(11, 8);
+ insn |= (opt << 8);
+
++ return insn;
++}
++
++u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type)
++{
++ u32 opt, insn;
++
++ opt = __get_barrier_crm_val(type);
++ if (opt == AARCH64_BREAK_FAULT)
++ return AARCH64_BREAK_FAULT;
++
++ insn = aarch64_insn_get_dsb_base_value();
++ insn &= ~GENMASK(11, 8);
++ insn |= (opt << 8);
++
+ return insn;
+ }
+
--- /dev/null
+From 87515e9bd159b4ca1c0564435858d52f61874b2a Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Mon, 12 Aug 2024 17:50:22 +0100
+Subject: arm64: proton-pack: Add new CPUs 'k' values for branch mitigation
+
+From: James Morse <james.morse@arm.com>
+
+commit efe676a1a7554219eae0b0dcfe1e0cdcc9ef9aef upstream.
+
+Update the list of 'k' values for the branch mitigation from arm's
+website.
+
+Add the values for Cortex-X1C. The MIDR_EL1 value can be found here:
+https://developer.arm.com/documentation/101968/0002/Register-descriptions/AArch>
+
+Link: https://developer.arm.com/documentation/110280/2-0/?lang=en
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cputype.h | 2 ++
+ arch/arm64/kernel/proton-pack.c | 1 +
+ 2 files changed, 3 insertions(+)
+
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -81,6 +81,7 @@
+ #define ARM_CPU_PART_CORTEX_A78AE 0xD42
+ #define ARM_CPU_PART_CORTEX_X1 0xD44
+ #define ARM_CPU_PART_CORTEX_A510 0xD46
++#define ARM_CPU_PART_CORTEX_X1C 0xD4C
+ #define ARM_CPU_PART_CORTEX_A520 0xD80
+ #define ARM_CPU_PART_CORTEX_A710 0xD47
+ #define ARM_CPU_PART_CORTEX_A715 0xD4D
+@@ -166,6 +167,7 @@
+ #define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
+ #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
+ #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
++#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
+ #define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
+ #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
+ #define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -891,6 +891,7 @@ static u8 spectre_bhb_loop_affected(void
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
--- /dev/null
+From 53595ef6306c8b6e45a1160bc0e0522eb1efc269 Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Tue, 29 Apr 2025 13:55:17 +0100
+Subject: arm64: proton-pack: Expose whether the branchy loop k value
+
+From: James Morse <james.morse@arm.com>
+
+commit a1152be30a043d2d4dcb1683415f328bf3c51978 upstream.
+
+Add a helper to expose the k value of the branchy loop. This is needed
+by the BPF JIT to generate the mitigation sequence in BPF programs.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/spectre.h | 1 +
+ arch/arm64/kernel/proton-pack.c | 5 +++++
+ 2 files changed, 6 insertions(+)
+
+--- a/arch/arm64/include/asm/spectre.h
++++ b/arch/arm64/include/asm/spectre.h
+@@ -97,6 +97,7 @@ enum mitigation_state arm64_get_meltdown
+
+ enum mitigation_state arm64_get_spectre_bhb_state(void);
+ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
++u8 get_spectre_bhb_loop_value(void);
+ bool is_spectre_bhb_fw_mitigated(void);
+ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
+ bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -998,6 +998,11 @@ bool is_spectre_bhb_affected(const struc
+ return true;
+ }
+
++u8 get_spectre_bhb_loop_value(void)
++{
++ return max_bhb_k;
++}
++
+ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
+ {
+ const char *v = arm64_get_bp_hardening_vector(slot);
--- /dev/null
+From 11e265d8e9509ace345ee6cdf3af8219beddfa58 Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Mon, 19 Aug 2024 14:15:53 +0100
+Subject: arm64: proton-pack: Expose whether the platform is mitigated by firmware
+
+From: James Morse <james.morse@arm.com>
+
+commit e7956c92f396a44eeeb6eaf7a5b5e1ad24db6748 upstream.
+
+is_spectre_bhb_fw_affected() allows the caller to determine if the CPU
+is known to need a firmware mitigation. CPUs are either on the list
+of CPUs we know about, or firmware has been queried and reported that
+the platform is affected - and mitigated by firmware.
+
+This helper is not useful to determine if the platform is mitigated
+by firmware. A CPU could be on the know list, but the firmware may
+not be implemented. Its affected but not mitigated.
+
+spectre_bhb_enable_mitigation() handles this distinction by checking
+the firmware state before enabling the mitigation.
+
+Add a helper to expose this state. This will be used by the BPF JIT
+to determine if calling firmware for a mitigation is necessary and
+supported.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/spectre.h | 1 +
+ arch/arm64/kernel/proton-pack.c | 5 +++++
+ 2 files changed, 6 insertions(+)
+
+--- a/arch/arm64/include/asm/spectre.h
++++ b/arch/arm64/include/asm/spectre.h
+@@ -97,6 +97,7 @@ enum mitigation_state arm64_get_meltdown
+
+ enum mitigation_state arm64_get_spectre_bhb_state(void);
+ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
++bool is_spectre_bhb_fw_mitigated(void);
+ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
+ bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
+
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -1093,6 +1093,11 @@ void spectre_bhb_enable_mitigation(const
+ update_mitigation_state(&spectre_bhb_state, state);
+ }
+
++bool is_spectre_bhb_fw_mitigated(void)
++{
++ return test_bit(BHB_FW, &system_bhb_mitigations);
++}
++
+ /* Patched to NOP when enabled */
+ void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
+ __le32 *origptr,
--- /dev/null
+From 952e3a8819c040a937c2b840862c41928de417f9 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Fri, 11 Apr 2025 15:36:38 -0700
+Subject: Documentation: x86/bugs/its: Add ITS documentation
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit 1ac116ce6468670eeda39345a5585df308243dca upstream.
+
+Add the admin-guide for Indirect Target Selection (ITS).
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/index.rst | 1
+ Documentation/admin-guide/hw-vuln/indirect-target-selection.rst | 168 ++++++++++
+ 2 files changed, 169 insertions(+)
+ create mode 100644 Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
+
+--- a/Documentation/admin-guide/hw-vuln/index.rst
++++ b/Documentation/admin-guide/hw-vuln/index.rst
+@@ -22,3 +22,4 @@ are configurable at compile, boot or run
+ srso
+ gather_data_sampling
+ reg-file-data-sampling
++ indirect-target-selection
+--- /dev/null
++++ b/Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
+@@ -0,0 +1,168 @@
++.. SPDX-License-Identifier: GPL-2.0
++
++Indirect Target Selection (ITS)
++===============================
++
++ITS is a vulnerability in some Intel CPUs that support Enhanced IBRS and were
++released before Alder Lake. ITS may allow an attacker to control the prediction
++of indirect branches and RETs located in the lower half of a cacheline.
++
++ITS is assigned CVE-2024-28956 with a CVSS score of 4.7 (Medium).
++
++Scope of Impact
++---------------
++- **eIBRS Guest/Host Isolation**: Indirect branches in KVM/kernel may still be
++ predicted with unintended target corresponding to a branch in the guest.
++
++- **Intra-Mode BTI**: In-kernel training such as through cBPF or other native
++ gadgets.
++
++- **Indirect Branch Prediction Barrier (IBPB)**: After an IBPB, indirect
++ branches may still be predicted with targets corresponding to direct branches
++ executed prior to the IBPB. This is fixed by the IPU 2025.1 microcode, which
++ should be available via distro updates. Alternatively microcode can be
++ obtained from Intel's github repository [#f1]_.
++
++Affected CPUs
++-------------
++Below is the list of ITS affected CPUs [#f2]_ [#f3]_:
++
++ ======================== ============ ==================== ===============
++ Common name Family_Model eIBRS Intra-mode BTI
++ Guest/Host Isolation
++ ======================== ============ ==================== ===============
++ SKYLAKE_X (step >= 6) 06_55H Affected Affected
++ ICELAKE_X 06_6AH Not affected Affected
++ ICELAKE_D 06_6CH Not affected Affected
++ ICELAKE_L 06_7EH Not affected Affected
++ TIGERLAKE_L 06_8CH Not affected Affected
++ TIGERLAKE 06_8DH Not affected Affected
++ KABYLAKE_L (step >= 12) 06_8EH Affected Affected
++ KABYLAKE (step >= 13) 06_9EH Affected Affected
++ COMETLAKE 06_A5H Affected Affected
++ COMETLAKE_L 06_A6H Affected Affected
++ ROCKETLAKE 06_A7H Not affected Affected
++ ======================== ============ ==================== ===============
++
++- All affected CPUs enumerate Enhanced IBRS feature.
++- IBPB isolation is affected on all ITS affected CPUs, and need a microcode
++ update for mitigation.
++- None of the affected CPUs enumerate BHI_CTRL which was introduced in Golden
++ Cove (Alder Lake and Sapphire Rapids). This can help guests to determine the
++ host's affected status.
++- Intel Atom CPUs are not affected by ITS.
++
++Mitigation
++----------
++As only the indirect branches and RETs that have their last byte of instruction
++in the lower half of the cacheline are vulnerable to ITS, the basic idea behind
++the mitigation is to not allow indirect branches in the lower half.
++
++This is achieved by relying on existing retpoline support in the kernel, and in
++compilers. ITS-vulnerable retpoline sites are runtime patched to point to newly
++added ITS-safe thunks. These safe thunks consists of indirect branch in the
++second half of the cacheline. Not all retpoline sites are patched to thunks, if
++a retpoline site is evaluated to be ITS-safe, it is replaced with an inline
++indirect branch.
++
++Dynamic thunks
++~~~~~~~~~~~~~~
++From a dynamically allocated pool of safe-thunks, each vulnerable site is
++replaced with a new thunk, such that they get a unique address. This could
++improve the branch prediction accuracy. Also, it is a defense-in-depth measure
++against aliasing.
++
++Note, for simplicity, indirect branches in eBPF programs are always replaced
++with a jump to a static thunk in __x86_indirect_its_thunk_array. If required,
++in future this can be changed to use dynamic thunks.
++
++All vulnerable RETs are replaced with a static thunk, they do not use dynamic
++thunks. This is because RETs get their prediction from RSB mostly that does not
++depend on source address. RETs that underflow RSB may benefit from dynamic
++thunks. But, RETs significantly outnumber indirect branches, and any benefit
++from a unique source address could be outweighed by the increased icache
++footprint and iTLB pressure.
++
++Retpoline
++~~~~~~~~~
++Retpoline sequence also mitigates ITS-unsafe indirect branches. For this
++reason, when retpoline is enabled, ITS mitigation only relocates the RETs to
++safe thunks. Unless user requested the RSB-stuffing mitigation.
++
++RSB Stuffing
++~~~~~~~~~~~~
++RSB-stuffing via Call Depth Tracking is a mitigation for Retbleed RSB-underflow
++attacks. And it also mitigates RETs that are vulnerable to ITS.
++
++Mitigation in guests
++^^^^^^^^^^^^^^^^^^^^
++All guests deploy ITS mitigation by default, irrespective of eIBRS enumeration
++and Family/Model of the guest. This is because eIBRS feature could be hidden
++from a guest. One exception to this is when a guest enumerates BHI_DIS_S, which
++indicates that the guest is running on an unaffected host.
++
++To prevent guests from unnecessarily deploying the mitigation on unaffected
++platforms, Intel has defined ITS_NO bit(62) in MSR IA32_ARCH_CAPABILITIES. When
++a guest sees this bit set, it should not enumerate the ITS bug. Note, this bit
++is not set by any hardware, but is **intended for VMMs to synthesize** it for
++guests as per the host's affected status.
++
++Mitigation options
++^^^^^^^^^^^^^^^^^^
++The ITS mitigation can be controlled using the "indirect_target_selection"
++kernel parameter. The available options are:
++
++ ======== ===================================================================
++ on (default) Deploy the "Aligned branch/return thunks" mitigation.
++ If spectre_v2 mitigation enables retpoline, aligned-thunks are only
++ deployed for the affected RET instructions. Retpoline mitigates
++ indirect branches.
++
++ off Disable ITS mitigation.
++
++ vmexit Equivalent to "=on" if the CPU is affected by guest/host isolation
++ part of ITS. Otherwise, mitigation is not deployed. This option is
++ useful when host userspace is not in the threat model, and only
++ attacks from guest to host are considered.
++
++ stuff Deploy RSB-fill mitigation when retpoline is also deployed.
++ Otherwise, deploy the default mitigation. When retpoline mitigation
++ is enabled, RSB-stuffing via Call-Depth-Tracking also mitigates
++ ITS.
++
++ force Force the ITS bug and deploy the default mitigation.
++ ======== ===================================================================
++
++Sysfs reporting
++---------------
++
++The sysfs file showing ITS mitigation status is:
++
++ /sys/devices/system/cpu/vulnerabilities/indirect_target_selection
++
++Note, microcode mitigation status is not reported in this file.
++
++The possible values in this file are:
++
++.. list-table::
++
++ * - Not affected
++ - The processor is not vulnerable.
++ * - Vulnerable
++ - System is vulnerable and no mitigation has been applied.
++ * - Vulnerable, KVM: Not affected
++ - System is vulnerable to intra-mode BTI, but not affected by eIBRS
++ guest/host isolation.
++ * - Mitigation: Aligned branch/return thunks
++ - The mitigation is enabled, affected indirect branches and RETs are
++ relocated to safe thunks.
++ * - Mitigation: Retpolines, Stuffing RSB
++ - The mitigation is enabled using retpoline and RSB stuffing.
++
++References
++----------
++.. [#f1] Microcode repository - https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files
++
++.. [#f2] Affected Processors list - https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
++
++.. [#f3] Affected Processors list (machine readable) - https://github.com/intel/Intel-affected-processor-list
--- /dev/null
+From 5baf25880861e6e0ae26a9b3857c76ff0c5de6a5 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Tue, 24 Dec 2024 16:09:28 -0800
+Subject: selftest/x86/bugs: Add selftests for ITS
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit 7a9b709e7cc5ce1ffb84ce07bf6d157e1de758df upstream.
+
+Below are the tests added for Indirect Target Selection (ITS):
+
+- its_sysfs.py - Check if sysfs reflects the correct mitigation status for
+ the mitigation selected via the kernel cmdline.
+
+- its_permutations.py - tests mitigation selection with cmdline
+ permutations with other bugs like spectre_v2 and retbleed.
+
+- its_indirect_alignment.py - verifies that for addresses in
+ .retpoline_sites section that belong to lower half of cacheline are
+ patched to ITS-safe thunk. Typical output looks like below:
+
+ Site 49: function symbol: __x64_sys_restart_syscall+0x1f <0xffffffffbb1509af>
+ # vmlinux: 0xffffffff813509af: jmp 0xffffffff81f5a8e0
+ # kcore: 0xffffffffbb1509af: jmpq *%rax
+ # ITS thunk NOT expected for site 49
+ # PASSED: Found *%rax
+ #
+ Site 50: function symbol: __resched_curr+0xb0 <0xffffffffbb181910>
+ # vmlinux: 0xffffffff81381910: jmp 0xffffffff81f5a8e0
+ # kcore: 0xffffffffbb181910: jmp 0xffffffffc02000fc
+ # ITS thunk expected for site 50
+ # PASSED: Found 0xffffffffc02000fc -> jmpq *%rax <scattered-thunk?>
+
+- its_ret_alignment.py - verifies that for addresses in .return_sites
+ section that belong to lower half of cacheline are patched to
+ its_return_thunk. Typical output looks like below:
+
+ Site 97: function symbol: collect_event+0x48 <0xffffffffbb007f18>
+ # vmlinux: 0xffffffff81207f18: jmp 0xffffffff81f5b500
+ # kcore: 0xffffffffbb007f18: jmp 0xffffffffbbd5b560
+ # PASSED: Found jmp 0xffffffffbbd5b560 <its_return_thunk>
+ #
+ Site 98: function symbol: collect_event+0xa4 <0xffffffffbb007f74>
+ # vmlinux: 0xffffffff81207f74: jmp 0xffffffff81f5b500
+ # kcore: 0xffffffffbb007f74: retq
+ # PASSED: Found retq
+
+Some of these tests have dependency on tools like virtme-ng[1] and drgn[2].
+When the dependencies are not met, the test will be skipped.
+
+[1] https://github.com/arighi/virtme-ng
+[2] https://github.com/osandov/drgn
+
+Co-developed-by: Tao Zhang <tao1.zhang@linux.intel.com>
+Signed-off-by: Tao Zhang <tao1.zhang@linux.intel.com>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/Makefile | 1
+ tools/testing/selftests/x86/bugs/Makefile | 3
+ tools/testing/selftests/x86/bugs/common.py | 164 +++++++++++++
+ tools/testing/selftests/x86/bugs/its_indirect_alignment.py | 150 +++++++++++
+ tools/testing/selftests/x86/bugs/its_permutations.py | 109 ++++++++
+ tools/testing/selftests/x86/bugs/its_ret_alignment.py | 139 +++++++++++
+ tools/testing/selftests/x86/bugs/its_sysfs.py | 65 +++++
+ 7 files changed, 631 insertions(+)
+ create mode 100644 tools/testing/selftests/x86/bugs/Makefile
+ create mode 100755 tools/testing/selftests/x86/bugs/common.py
+ create mode 100755 tools/testing/selftests/x86/bugs/its_indirect_alignment.py
+ create mode 100755 tools/testing/selftests/x86/bugs/its_permutations.py
+ create mode 100755 tools/testing/selftests/x86/bugs/its_ret_alignment.py
+ create mode 100755 tools/testing/selftests/x86/bugs/its_sysfs.py
+
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -115,6 +115,7 @@ TARGETS += user_events
+ TARGETS += vDSO
+ TARGETS += mm
+ TARGETS += x86
++TARGETS += x86/bugs
+ TARGETS += zram
+ #Please keep the TARGETS list alphabetically sorted
+ # Run "make quicktest=1 run_tests" or
+--- /dev/null
++++ b/tools/testing/selftests/x86/bugs/Makefile
+@@ -0,0 +1,3 @@
++TEST_PROGS := its_sysfs.py its_permutations.py its_indirect_alignment.py its_ret_alignment.py
++TEST_FILES := common.py
++include ../../lib.mk
+--- /dev/null
++++ b/tools/testing/selftests/x86/bugs/common.py
+@@ -0,0 +1,164 @@
++#!/usr/bin/env python3
++# SPDX-License-Identifier: GPL-2.0
++#
++# Copyright (c) 2025 Intel Corporation
++#
++# This contains kselftest framework adapted common functions for testing
++# mitigation for x86 bugs.
++
++import os, sys, re, shutil
++
++sys.path.insert(0, '../../kselftest')
++import ksft
++
++def read_file(path):
++ if not os.path.exists(path):
++ return None
++ with open(path, 'r') as file:
++ return file.read().strip()
++
++def cpuinfo_has(arg):
++ cpuinfo = read_file('/proc/cpuinfo')
++ if arg in cpuinfo:
++ return True
++ return False
++
++def cmdline_has(arg):
++ cmdline = read_file('/proc/cmdline')
++ if arg in cmdline:
++ return True
++ return False
++
++def cmdline_has_either(args):
++ cmdline = read_file('/proc/cmdline')
++ for arg in args:
++ if arg in cmdline:
++ return True
++ return False
++
++def cmdline_has_none(args):
++ return not cmdline_has_either(args)
++
++def cmdline_has_all(args):
++ cmdline = read_file('/proc/cmdline')
++ for arg in args:
++ if arg not in cmdline:
++ return False
++ return True
++
++def get_sysfs(bug):
++ return read_file("/sys/devices/system/cpu/vulnerabilities/" + bug)
++
++def sysfs_has(bug, mitigation):
++ status = get_sysfs(bug)
++ if mitigation in status:
++ return True
++ return False
++
++def sysfs_has_either(bugs, mitigations):
++ for bug in bugs:
++ for mitigation in mitigations:
++ if sysfs_has(bug, mitigation):
++ return True
++ return False
++
++def sysfs_has_none(bugs, mitigations):
++ return not sysfs_has_either(bugs, mitigations)
++
++def sysfs_has_all(bugs, mitigations):
++ for bug in bugs:
++ for mitigation in mitigations:
++ if not sysfs_has(bug, mitigation):
++ return False
++ return True
++
++def bug_check_pass(bug, found):
++ ksft.print_msg(f"\nFound: {found}")
++ # ksft.print_msg(f"\ncmdline: {read_file('/proc/cmdline')}")
++ ksft.test_result_pass(f'{bug}: {found}')
++
++def bug_check_fail(bug, found, expected):
++ ksft.print_msg(f'\nFound:\t {found}')
++ ksft.print_msg(f'Expected:\t {expected}')
++ ksft.print_msg(f"\ncmdline: {read_file('/proc/cmdline')}")
++ ksft.test_result_fail(f'{bug}: {found}')
++
++def bug_status_unknown(bug, found):
++ ksft.print_msg(f'\nUnknown status: {found}')
++ ksft.print_msg(f"\ncmdline: {read_file('/proc/cmdline')}")
++ ksft.test_result_fail(f'{bug}: {found}')
++
++def basic_checks_sufficient(bug, mitigation):
++ if not mitigation:
++ bug_status_unknown(bug, "None")
++ return True
++ elif mitigation == "Not affected":
++ ksft.test_result_pass(bug)
++ return True
++ elif mitigation == "Vulnerable":
++ if cmdline_has_either([f'{bug}=off', 'mitigations=off']):
++ bug_check_pass(bug, mitigation)
++ return True
++ return False
++
++def get_section_info(vmlinux, section_name):
++ from elftools.elf.elffile import ELFFile
++ with open(vmlinux, 'rb') as f:
++ elffile = ELFFile(f)
++ section = elffile.get_section_by_name(section_name)
++ if section is None:
++ ksft.print_msg("Available sections in vmlinux:")
++ for sec in elffile.iter_sections():
++ ksft.print_msg(sec.name)
++ raise ValueError(f"Section {section_name} not found in {vmlinux}")
++ return section['sh_addr'], section['sh_offset'], section['sh_size']
++
++def get_patch_sites(vmlinux, offset, size):
++ import struct
++ output = []
++ with open(vmlinux, 'rb') as f:
++ f.seek(offset)
++ i = 0
++ while i < size:
++ data = f.read(4) # s32
++ if not data:
++ break
++ sym_offset = struct.unpack('<i', data)[0] + i
++ i += 4
++ output.append(sym_offset)
++ return output
++
++def get_instruction_from_vmlinux(elffile, section, virtual_address, target_address):
++ from capstone import Cs, CS_ARCH_X86, CS_MODE_64
++ section_start = section['sh_addr']
++ section_end = section_start + section['sh_size']
++
++ if not (section_start <= target_address < section_end):
++ return None
++
++ offset = target_address - section_start
++ code = section.data()[offset:offset + 16]
++
++ cap = init_capstone()
++ for instruction in cap.disasm(code, target_address):
++ if instruction.address == target_address:
++ return instruction
++ return None
++
++def init_capstone():
++ from capstone import Cs, CS_ARCH_X86, CS_MODE_64, CS_OPT_SYNTAX_ATT
++ cap = Cs(CS_ARCH_X86, CS_MODE_64)
++ cap.syntax = CS_OPT_SYNTAX_ATT
++ return cap
++
++def get_runtime_kernel():
++ import drgn
++ return drgn.program_from_kernel()
++
++def check_dependencies_or_skip(modules, script_name="unknown test"):
++ for mod in modules:
++ try:
++ __import__(mod)
++ except ImportError:
++ ksft.test_result_skip(f"Skipping {script_name}: missing module '{mod}'")
++ ksft.finished()
+--- /dev/null
++++ b/tools/testing/selftests/x86/bugs/its_indirect_alignment.py
+@@ -0,0 +1,150 @@
++#!/usr/bin/env python3
++# SPDX-License-Identifier: GPL-2.0
++#
++# Copyright (c) 2025 Intel Corporation
++#
++# Test for indirect target selection (ITS) mitigation.
++#
++# Test if indirect CALL/JMP are correctly patched by evaluating
++# the vmlinux .retpoline_sites in /proc/kcore.
++
++# Install dependencies
++# add-apt-repository ppa:michel-slm/kernel-utils
++# apt update
++# apt install -y python3-drgn python3-pyelftools python3-capstone
++#
++# Best to copy the vmlinux at a standard location:
++# mkdir -p /usr/lib/debug/lib/modules/$(uname -r)
++# cp $VMLINUX /usr/lib/debug/lib/modules/$(uname -r)/vmlinux
++#
++# Usage: ./its_indirect_alignment.py [vmlinux]
++
++import os, sys, argparse
++from pathlib import Path
++
++this_dir = os.path.dirname(os.path.realpath(__file__))
++sys.path.insert(0, this_dir + '/../../kselftest')
++import ksft
++import common as c
++
++bug = "indirect_target_selection"
++
++mitigation = c.get_sysfs(bug)
++if not mitigation or "Aligned branch/return thunks" not in mitigation:
++ ksft.test_result_skip("Skipping its_indirect_alignment.py: Aligned branch/return thunks not enabled")
++ ksft.finished()
++
++if c.sysfs_has("spectre_v2", "Retpolines"):
++ ksft.test_result_skip("Skipping its_indirect_alignment.py: Retpolines deployed")
++ ksft.finished()
++
++c.check_dependencies_or_skip(['drgn', 'elftools', 'capstone'], script_name="its_indirect_alignment.py")
++
++from elftools.elf.elffile import ELFFile
++from drgn.helpers.common.memory import identify_address
++
++cap = c.init_capstone()
++
++if len(os.sys.argv) > 1:
++ arg_vmlinux = os.sys.argv[1]
++ if not os.path.exists(arg_vmlinux):
++ ksft.test_result_fail(f"its_indirect_alignment.py: vmlinux not found at argument path: {arg_vmlinux}")
++ ksft.exit_fail()
++ os.makedirs(f"/usr/lib/debug/lib/modules/{os.uname().release}", exist_ok=True)
++ os.system(f'cp {arg_vmlinux} /usr/lib/debug/lib/modules/$(uname -r)/vmlinux')
++
++vmlinux = f"/usr/lib/debug/lib/modules/{os.uname().release}/vmlinux"
++if not os.path.exists(vmlinux):
++ ksft.test_result_fail(f"its_indirect_alignment.py: vmlinux not found at {vmlinux}")
++ ksft.exit_fail()
++
++ksft.print_msg(f"Using vmlinux: {vmlinux}")
++
++retpolines_start_vmlinux, retpolines_sec_offset, size = c.get_section_info(vmlinux, '.retpoline_sites')
++ksft.print_msg(f"vmlinux: Section .retpoline_sites (0x{retpolines_start_vmlinux:x}) found at 0x{retpolines_sec_offset:x} with size 0x{size:x}")
++
++sites_offset = c.get_patch_sites(vmlinux, retpolines_sec_offset, size)
++total_retpoline_tests = len(sites_offset)
++ksft.print_msg(f"Found {total_retpoline_tests} retpoline sites")
++
++prog = c.get_runtime_kernel()
++retpolines_start_kcore = prog.symbol('__retpoline_sites').address
++ksft.print_msg(f'kcore: __retpoline_sites: 0x{retpolines_start_kcore:x}')
++
++x86_indirect_its_thunk_r15 = prog.symbol('__x86_indirect_its_thunk_r15').address
++ksft.print_msg(f'kcore: __x86_indirect_its_thunk_r15: 0x{x86_indirect_its_thunk_r15:x}')
++
++tests_passed = 0
++tests_failed = 0
++tests_unknown = 0
++
++with open(vmlinux, 'rb') as f:
++ elffile = ELFFile(f)
++ text_section = elffile.get_section_by_name('.text')
++
++ for i in range(0, len(sites_offset)):
++ site = retpolines_start_kcore + sites_offset[i]
++ vmlinux_site = retpolines_start_vmlinux + sites_offset[i]
++ passed = unknown = failed = False
++ try:
++ vmlinux_insn = c.get_instruction_from_vmlinux(elffile, text_section, text_section['sh_addr'], vmlinux_site)
++ kcore_insn = list(cap.disasm(prog.read(site, 16), site))[0]
++ operand = kcore_insn.op_str
++ insn_end = site + kcore_insn.size - 1 # TODO handle Jcc.32 __x86_indirect_thunk_\reg
++ safe_site = insn_end & 0x20
++ site_status = "" if safe_site else "(unsafe)"
++
++ ksft.print_msg(f"\nSite {i}: {identify_address(prog, site)} <0x{site:x}> {site_status}")
++ ksft.print_msg(f"\tvmlinux: 0x{vmlinux_insn.address:x}:\t{vmlinux_insn.mnemonic}\t{vmlinux_insn.op_str}")
++ ksft.print_msg(f"\tkcore: 0x{kcore_insn.address:x}:\t{kcore_insn.mnemonic}\t{kcore_insn.op_str}")
++
++ if (site & 0x20) ^ (insn_end & 0x20):
++ ksft.print_msg(f"\tSite at safe/unsafe boundary: {str(kcore_insn.bytes)} {kcore_insn.mnemonic} {operand}")
++ if safe_site:
++ tests_passed += 1
++ passed = True
++ ksft.print_msg(f"\tPASSED: At safe address")
++ continue
++
++ if operand.startswith('0xffffffff'):
++ thunk = int(operand, 16)
++ if thunk > x86_indirect_its_thunk_r15:
++ insn_at_thunk = list(cap.disasm(prog.read(thunk, 16), thunk))[0]
++ operand += ' -> ' + insn_at_thunk.mnemonic + ' ' + insn_at_thunk.op_str + ' <dynamic-thunk?>'
++ if 'jmp' in insn_at_thunk.mnemonic and thunk & 0x20:
++ ksft.print_msg(f"\tPASSED: Found {operand} at safe address")
++ passed = True
++ if not passed:
++ if kcore_insn.operands[0].type == capstone.CS_OP_IMM:
++ operand += ' <' + prog.symbol(int(operand, 16)) + '>'
++ if '__x86_indirect_its_thunk_' in operand:
++ ksft.print_msg(f"\tPASSED: Found {operand}")
++ else:
++ ksft.print_msg(f"\tPASSED: Found direct branch: {kcore_insn}, ITS thunk not required.")
++ passed = True
++ else:
++ unknown = True
++ if passed:
++ tests_passed += 1
++ elif unknown:
++ ksft.print_msg(f"UNKNOWN: unexpected operand: {kcore_insn}")
++ tests_unknown += 1
++ else:
++ ksft.print_msg(f'\t************* FAILED *************')
++ ksft.print_msg(f"\tFound {kcore_insn.bytes} {kcore_insn.mnemonic} {operand}")
++ ksft.print_msg(f'\t**********************************')
++ tests_failed += 1
++ except Exception as e:
++ ksft.print_msg(f"UNKNOWN: An unexpected error occurred: {e}")
++ tests_unknown += 1
++
++ksft.print_msg(f"\n\nSummary:")
++ksft.print_msg(f"PASS: \t{tests_passed} \t/ {total_retpoline_tests}")
++ksft.print_msg(f"FAIL: \t{tests_failed} \t/ {total_retpoline_tests}")
++ksft.print_msg(f"UNKNOWN: \t{tests_unknown} \t/ {total_retpoline_tests}")
++
++if tests_failed == 0:
++ ksft.test_result_pass("All ITS return thunk sites passed")
++else:
++ ksft.test_result_fail(f"{tests_failed} ITS return thunk sites failed")
++ksft.finished()
+--- /dev/null
++++ b/tools/testing/selftests/x86/bugs/its_permutations.py
+@@ -0,0 +1,109 @@
++#!/usr/bin/env python3
++# SPDX-License-Identifier: GPL-2.0
++#
++# Copyright (c) 2025 Intel Corporation
++#
++# Test for indirect target selection (ITS) cmdline permutations with other bugs
++# like spectre_v2 and retbleed.
++
++import os, sys, subprocess, itertools, re, shutil
++
++test_dir = os.path.dirname(os.path.realpath(__file__))
++sys.path.insert(0, test_dir + '/../../kselftest')
++import ksft
++import common as c
++
++bug = "indirect_target_selection"
++mitigation = c.get_sysfs(bug)
++
++if not mitigation or "Not affected" in mitigation:
++ ksft.test_result_skip("Skipping its_permutations.py: not applicable")
++ ksft.finished()
++
++if shutil.which('vng') is None:
++ ksft.test_result_skip("Skipping its_permutations.py: virtme-ng ('vng') not found in PATH.")
++ ksft.finished()
++
++TEST = f"{test_dir}/its_sysfs.py"
++default_kparam = ['clearcpuid=hypervisor', 'panic=5', 'panic_on_warn=1', 'oops=panic', 'nmi_watchdog=1', 'hung_task_panic=1']
++
++DEBUG = " -v "
++
++# Install dependencies
++# https://github.com/arighi/virtme-ng
++# apt install virtme-ng
++BOOT_CMD = f"vng --run {test_dir}/../../../../../arch/x86/boot/bzImage "
++#BOOT_CMD += DEBUG
++
++bug = "indirect_target_selection"
++
++input_options = {
++ 'indirect_target_selection' : ['off', 'on', 'stuff', 'vmexit'],
++ 'retbleed' : ['off', 'stuff', 'auto'],
++ 'spectre_v2' : ['off', 'on', 'eibrs', 'retpoline', 'ibrs', 'eibrs,retpoline'],
++}
++
++def pretty_print(output):
++ OKBLUE = '\033[94m'
++ OKGREEN = '\033[92m'
++ WARNING = '\033[93m'
++ FAIL = '\033[91m'
++ ENDC = '\033[0m'
++ BOLD = '\033[1m'
++
++ # Define patterns and their corresponding colors
++ patterns = {
++ r"^ok \d+": OKGREEN,
++ r"^not ok \d+": FAIL,
++ r"^# Testing .*": OKBLUE,
++ r"^# Found: .*": WARNING,
++ r"^# Totals: .*": BOLD,
++ r"pass:([1-9]\d*)": OKGREEN,
++ r"fail:([1-9]\d*)": FAIL,
++ r"skip:([1-9]\d*)": WARNING,
++ }
++
++ # Apply colors based on patterns
++ for pattern, color in patterns.items():
++ output = re.sub(pattern, lambda match: f"{color}{match.group(0)}{ENDC}", output, flags=re.MULTILINE)
++
++ print(output)
++
++combinations = list(itertools.product(*input_options.values()))
++ksft.print_header()
++ksft.set_plan(len(combinations))
++
++logs = ""
++
++for combination in combinations:
++ append = ""
++ log = ""
++ for p in default_kparam:
++ append += f' --append={p}'
++ command = BOOT_CMD + append
++ test_params = ""
++ for i, key in enumerate(input_options.keys()):
++ param = f'{key}={combination[i]}'
++ test_params += f' {param}'
++ command += f" --append={param}"
++ command += f" -- {TEST}"
++ test_name = f"{bug} {test_params}"
++ pretty_print(f'# Testing {test_name}')
++ t = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
++ t.wait()
++ output, _ = t.communicate()
++ if t.returncode == 0:
++ ksft.test_result_pass(test_name)
++ else:
++ ksft.test_result_fail(test_name)
++ output = output.decode()
++ log += f" {output}"
++ pretty_print(log)
++ logs += output + "\n"
++
++# Optionally use tappy to parse the output
++# apt install python3-tappy
++with open("logs.txt", "w") as f:
++ f.write(logs)
++
++ksft.finished()
+--- /dev/null
++++ b/tools/testing/selftests/x86/bugs/its_ret_alignment.py
+@@ -0,0 +1,139 @@
++#!/usr/bin/env python3
++# SPDX-License-Identifier: GPL-2.0
++#
++# Copyright (c) 2025 Intel Corporation
++#
++# Test for indirect target selection (ITS) mitigation.
++#
++# Tests if the RETs are correctly patched by evaluating the
++# vmlinux .return_sites in /proc/kcore.
++#
++# Install dependencies
++# add-apt-repository ppa:michel-slm/kernel-utils
++# apt update
++# apt install -y python3-drgn python3-pyelftools python3-capstone
++#
++# Run on target machine
++# mkdir -p /usr/lib/debug/lib/modules/$(uname -r)
++# cp $VMLINUX /usr/lib/debug/lib/modules/$(uname -r)/vmlinux
++#
++# Usage: ./its_ret_alignment.py
++
++import os, sys, argparse
++from pathlib import Path
++
++this_dir = os.path.dirname(os.path.realpath(__file__))
++sys.path.insert(0, this_dir + '/../../kselftest')
++import ksft
++import common as c
++
++bug = "indirect_target_selection"
++mitigation = c.get_sysfs(bug)
++if not mitigation or "Aligned branch/return thunks" not in mitigation:
++ ksft.test_result_skip("Skipping its_ret_alignment.py: Aligned branch/return thunks not enabled")
++ ksft.finished()
++
++c.check_dependencies_or_skip(['drgn', 'elftools', 'capstone'], script_name="its_ret_alignment.py")
++
++from elftools.elf.elffile import ELFFile
++from drgn.helpers.common.memory import identify_address
++
++cap = c.init_capstone()
++
++if len(os.sys.argv) > 1:
++ arg_vmlinux = os.sys.argv[1]
++ if not os.path.exists(arg_vmlinux):
++ ksft.test_result_fail(f"its_ret_alignment.py: vmlinux not found at user-supplied path: {arg_vmlinux}")
++ ksft.exit_fail()
++ os.makedirs(f"/usr/lib/debug/lib/modules/{os.uname().release}", exist_ok=True)
++ os.system(f'cp {arg_vmlinux} /usr/lib/debug/lib/modules/$(uname -r)/vmlinux')
++
++vmlinux = f"/usr/lib/debug/lib/modules/{os.uname().release}/vmlinux"
++if not os.path.exists(vmlinux):
++ ksft.test_result_fail(f"its_ret_alignment.py: vmlinux not found at {vmlinux}")
++ ksft.exit_fail()
++
++ksft.print_msg(f"Using vmlinux: {vmlinux}")
++
++rethunks_start_vmlinux, rethunks_sec_offset, size = c.get_section_info(vmlinux, '.return_sites')
++ksft.print_msg(f"vmlinux: Section .return_sites (0x{rethunks_start_vmlinux:x}) found at 0x{rethunks_sec_offset:x} with size 0x{size:x}")
++
++sites_offset = c.get_patch_sites(vmlinux, rethunks_sec_offset, size)
++total_rethunk_tests = len(sites_offset)
++ksft.print_msg(f"Found {total_rethunk_tests} rethunk sites")
++
++prog = c.get_runtime_kernel()
++rethunks_start_kcore = prog.symbol('__return_sites').address
++ksft.print_msg(f'kcore: __rethunk_sites: 0x{rethunks_start_kcore:x}')
++
++its_return_thunk = prog.symbol('its_return_thunk').address
++ksft.print_msg(f'kcore: its_return_thunk: 0x{its_return_thunk:x}')
++
++tests_passed = 0
++tests_failed = 0
++tests_unknown = 0
++tests_skipped = 0
++
++with open(vmlinux, 'rb') as f:
++ elffile = ELFFile(f)
++ text_section = elffile.get_section_by_name('.text')
++
++ for i in range(len(sites_offset)):
++ site = rethunks_start_kcore + sites_offset[i]
++ vmlinux_site = rethunks_start_vmlinux + sites_offset[i]
++ try:
++ passed = unknown = failed = skipped = False
++
++ symbol = identify_address(prog, site)
++ vmlinux_insn = c.get_instruction_from_vmlinux(elffile, text_section, text_section['sh_addr'], vmlinux_site)
++ kcore_insn = list(cap.disasm(prog.read(site, 16), site))[0]
++
++ insn_end = site + kcore_insn.size - 1
++
++ safe_site = insn_end & 0x20
++ site_status = "" if safe_site else "(unsafe)"
++
++ ksft.print_msg(f"\nSite {i}: {symbol} <0x{site:x}> {site_status}")
++ ksft.print_msg(f"\tvmlinux: 0x{vmlinux_insn.address:x}:\t{vmlinux_insn.mnemonic}\t{vmlinux_insn.op_str}")
++ ksft.print_msg(f"\tkcore: 0x{kcore_insn.address:x}:\t{kcore_insn.mnemonic}\t{kcore_insn.op_str}")
++
++ if safe_site:
++ tests_passed += 1
++ passed = True
++ ksft.print_msg(f"\tPASSED: At safe address")
++ continue
++
++ if "jmp" in kcore_insn.mnemonic:
++ passed = True
++ elif "ret" not in kcore_insn.mnemonic:
++ skipped = True
++
++ if passed:
++ ksft.print_msg(f"\tPASSED: Found {kcore_insn.mnemonic} {kcore_insn.op_str}")
++ tests_passed += 1
++ elif skipped:
++ ksft.print_msg(f"\tSKIPPED: Found '{kcore_insn.mnemonic}'")
++ tests_skipped += 1
++ elif unknown:
++ ksft.print_msg(f"UNKNOWN: An unknown instruction: {kcore_insn}")
++ tests_unknown += 1
++ else:
++ ksft.print_msg(f'\t************* FAILED *************')
++ ksft.print_msg(f"\tFound {kcore_insn.mnemonic} {kcore_insn.op_str}")
++ ksft.print_msg(f'\t**********************************')
++ tests_failed += 1
++ except Exception as e:
++ ksft.print_msg(f"UNKNOWN: An unexpected error occurred: {e}")
++ tests_unknown += 1
++
++ksft.print_msg(f"\n\nSummary:")
++ksft.print_msg(f"PASSED: \t{tests_passed} \t/ {total_rethunk_tests}")
++ksft.print_msg(f"FAILED: \t{tests_failed} \t/ {total_rethunk_tests}")
++ksft.print_msg(f"SKIPPED: \t{tests_skipped} \t/ {total_rethunk_tests}")
++ksft.print_msg(f"UNKNOWN: \t{tests_unknown} \t/ {total_rethunk_tests}")
++
++if tests_failed == 0:
++ ksft.test_result_pass("All ITS return thunk sites passed.")
++else:
++ ksft.test_result_fail(f"{tests_failed} failed sites need ITS return thunks.")
++ksft.finished()
+--- /dev/null
++++ b/tools/testing/selftests/x86/bugs/its_sysfs.py
+@@ -0,0 +1,65 @@
++#!/usr/bin/env python3
++# SPDX-License-Identifier: GPL-2.0
++#
++# Copyright (c) 2025 Intel Corporation
++#
++# Test for Indirect Target Selection(ITS) mitigation sysfs status.
++
++import sys, os, re
++this_dir = os.path.dirname(os.path.realpath(__file__))
++sys.path.insert(0, this_dir + '/../../kselftest')
++import ksft
++
++from common import *
++
++bug = "indirect_target_selection"
++mitigation = get_sysfs(bug)
++
++ITS_MITIGATION_ALIGNED_THUNKS = "Mitigation: Aligned branch/return thunks"
++ITS_MITIGATION_RETPOLINE_STUFF = "Mitigation: Retpolines, Stuffing RSB"
++ITS_MITIGATION_VMEXIT_ONLY = "Mitigation: Vulnerable, KVM: Not affected"
++ITS_MITIGATION_VULNERABLE = "Vulnerable"
++
++def check_mitigation():
++ if mitigation == ITS_MITIGATION_ALIGNED_THUNKS:
++ if cmdline_has(f'{bug}=stuff') and sysfs_has("spectre_v2", "Retpolines"):
++ bug_check_fail(bug, ITS_MITIGATION_ALIGNED_THUNKS, ITS_MITIGATION_RETPOLINE_STUFF)
++ return
++ if cmdline_has(f'{bug}=vmexit') and cpuinfo_has('its_native_only'):
++ bug_check_fail(bug, ITS_MITIGATION_ALIGNED_THUNKS, ITS_MITIGATION_VMEXIT_ONLY)
++ return
++ bug_check_pass(bug, ITS_MITIGATION_ALIGNED_THUNKS)
++ return
++
++ if mitigation == ITS_MITIGATION_RETPOLINE_STUFF:
++ if cmdline_has(f'{bug}=stuff') and sysfs_has("spectre_v2", "Retpolines"):
++ bug_check_pass(bug, ITS_MITIGATION_RETPOLINE_STUFF)
++ return
++ if sysfs_has('retbleed', 'Stuffing'):
++ bug_check_pass(bug, ITS_MITIGATION_RETPOLINE_STUFF)
++ return
++ bug_check_fail(bug, ITS_MITIGATION_RETPOLINE_STUFF, ITS_MITIGATION_ALIGNED_THUNKS)
++
++ if mitigation == ITS_MITIGATION_VMEXIT_ONLY:
++ if cmdline_has(f'{bug}=vmexit') and cpuinfo_has('its_native_only'):
++ bug_check_pass(bug, ITS_MITIGATION_VMEXIT_ONLY)
++ return
++ bug_check_fail(bug, ITS_MITIGATION_VMEXIT_ONLY, ITS_MITIGATION_ALIGNED_THUNKS)
++
++ if mitigation == ITS_MITIGATION_VULNERABLE:
++ if sysfs_has("spectre_v2", "Vulnerable"):
++ bug_check_pass(bug, ITS_MITIGATION_VULNERABLE)
++ else:
++ bug_check_fail(bug, "Mitigation", ITS_MITIGATION_VULNERABLE)
++
++ bug_status_unknown(bug, mitigation)
++ return
++
++ksft.print_header()
++ksft.set_plan(1)
++ksft.print_msg(f'{bug}: {mitigation} ...')
++
++if not basic_checks_sufficient(bug, mitigation):
++ check_mitigation()
++
++ksft.finished()
mm-page_alloc-don-t-steal-single-pages-from-biggest-buddy.patch
mm-page_alloc-speed-up-fallbacks-in-rmqueue_bulk.patch
sched-eevdf-fix-se-slice-being-set-to-u64_max-and-resulting-crash.patch
+arm64-insn-add-support-for-encoding-dsb.patch
+arm64-proton-pack-expose-whether-the-platform-is-mitigated-by-firmware.patch
+arm64-proton-pack-expose-whether-the-branchy-loop-k-value.patch
+arm64-bpf-add-bhb-mitigation-to-the-epilogue-for-cbpf-programs.patch
+arm64-bpf-only-mitigate-cbpf-programs-loaded-by-unprivileged-users.patch
+arm64-proton-pack-add-new-cpus-k-values-for-branch-mitigation.patch
+x86-bpf-call-branch-history-clearing-sequence-on-exit.patch
+x86-bpf-add-ibhf-call-at-end-of-classic-bpf.patch
+x86-bhi-do-not-set-bhi_dis_s-in-32-bit-mode.patch
+x86-speculation-simplify-and-make-call_nospec-consistent.patch
+x86-speculation-add-a-conditional-cs-prefix-to-call_nospec.patch
+x86-speculation-remove-the-extra-ifdef-around-call_nospec.patch
+documentation-x86-bugs-its-add-its-documentation.patch
+x86-its-enumerate-indirect-target-selection-its-bug.patch
+x86-its-add-support-for-its-safe-indirect-thunk.patch
+x86-its-add-support-for-its-safe-return-thunk.patch
+x86-its-enable-indirect-target-selection-mitigation.patch
+x86-its-add-vmexit-option-to-skip-mitigation-on-some-cpus.patch
+x86-its-add-support-for-rsb-stuffing-mitigation.patch
+x86-its-align-rets-in-bhb-clear-sequence-to-avoid-thunking.patch
+x86-ibt-keep-ibt-disabled-during-alternative-patching.patch
+x86-its-use-dynamic-thunks-for-indirect-branches.patch
+selftest-x86-bugs-add-selftests-for-its.patch
--- /dev/null
+From 2f617a2de6c072b2a727bf324f98b4ccab296706 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Mon, 5 May 2025 14:35:12 -0700
+Subject: x86/bhi: Do not set BHI_DIS_S in 32-bit mode
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit 073fdbe02c69c43fb7c0d547ec265c7747d4a646 upstream.
+
+With the possibility of intra-mode BHI via cBPF, complete mitigation for
+BHI is to use IBHF (history fence) instruction with BHI_DIS_S set. Since
+this new instruction is only available in 64-bit mode, setting BHI_DIS_S in
+32-bit mode is only a partial mitigation.
+
+Do not set BHI_DIS_S in 32-bit mode so as to avoid reporting misleading
+mitigated status. With this change IBHF won't be used in 32-bit mode, also
+remove the CONFIG_X86_64 check from emit_spectre_bhb_barrier().
+
+Suggested-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 6 +++---
+ arch/x86/net/bpf_jit_comp.c | 5 +++--
+ 2 files changed, 6 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1684,11 +1684,11 @@ static void __init bhi_select_mitigation
+ return;
+ }
+
+- /* Mitigate in hardware if supported */
+- if (spec_ctrl_bhi_dis())
++ if (!IS_ENABLED(CONFIG_X86_64))
+ return;
+
+- if (!IS_ENABLED(CONFIG_X86_64))
++ /* Mitigate in hardware if supported */
++ if (spec_ctrl_bhi_dis())
+ return;
+
+ if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -1437,8 +1437,7 @@ static int emit_spectre_bhb_barrier(u8 *
+ /* Insert IBHF instruction */
+ if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) &&
+ cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) ||
+- (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW) &&
+- IS_ENABLED(CONFIG_X86_64))) {
++ cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) {
+ /*
+ * Add an Indirect Branch History Fence (IBHF). IBHF acts as a
+ * fence preventing branch history from before the fence from
+@@ -1448,6 +1447,8 @@ static int emit_spectre_bhb_barrier(u8 *
+ * hardware that doesn't need or support it. The REP and REX.W
+ * prefixes are required by the microcode, and they also ensure
+ * that the NOP is unlikely to be used in existing code.
++ *
++ * IBHF is not a valid instruction in 32-bit mode.
+ */
+ EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */
+ }
--- /dev/null
+From 72c3edb1f282d594c786a5bb2afc2bee72f41a2b Mon Sep 17 00:00:00 2001
+From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Date: Mon, 5 May 2025 14:35:12 -0700
+Subject: x86/bpf: Add IBHF call at end of classic BPF
+
+From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+
+commit 9f725eec8fc0b39bdc07dcc8897283c367c1a163 upstream.
+
+Classic BPF programs can be run by unprivileged users, allowing
+unprivileged code to execute inside the kernel. Attackers can use this to
+craft branch history in kernel mode that can influence the target of
+indirect branches.
+
+BHI_DIS_S provides user-kernel isolation of branch history, but cBPF can be
+used to bypass this protection by crafting branch history in kernel mode.
+To stop intra-mode attacks via cBPF programs, Intel created a new
+instruction Indirect Branch History Fence (IBHF). IBHF prevents the
+predicted targets of subsequent indirect branches from being influenced by
+branch history prior to the IBHF. IBHF is only effective while BHI_DIS_S is
+enabled.
+
+Add the IBHF instruction to cBPF jitted code's exit path. Add the new fence
+when the hardware mitigation is enabled (i.e., X86_FEATURE_CLEAR_BHB_HW is
+set) or after the software sequence (X86_FEATURE_CLEAR_BHB_LOOP) is being
+used in a virtual machine. Note that X86_FEATURE_CLEAR_BHB_HW and
+X86_FEATURE_CLEAR_BHB_LOOP are mutually exclusive, so the JIT compiler will
+only emit the new fence, not the SW sequence, when X86_FEATURE_CLEAR_BHB_HW
+is set.
+
+Hardware that enumerates BHI_NO basically has BHI_DIS_S protections always
+enabled, regardless of the value of BHI_DIS_S. Since BHI_DIS_S doesn't
+protect against intra-mode attacks, enumerate BHI bug on BHI_NO hardware as
+well.
+
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/common.c | 9 ++++++---
+ arch/x86/net/bpf_jit_comp.c | 19 +++++++++++++++++++
+ 2 files changed, 25 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1437,9 +1437,12 @@ static void __init cpu_set_bug_bits(stru
+ if (vulnerable_to_rfds(x86_arch_cap_msr))
+ setup_force_cpu_bug(X86_BUG_RFDS);
+
+- /* When virtualized, eIBRS could be hidden, assume vulnerable */
+- if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
+- !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
++ /*
++ * Intel parts with eIBRS are vulnerable to BHI attacks. Parts with
++ * BHI_NO still need to use the BHI mitigation to prevent Intra-mode
++ * attacks. When virtualized, eIBRS could be hidden, assume vulnerable.
++ */
++ if (!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
+ (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
+ boot_cpu_has(X86_FEATURE_HYPERVISOR)))
+ setup_force_cpu_bug(X86_BUG_BHI);
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -41,6 +41,8 @@ static u8 *emit_code(u8 *ptr, u32 bytes,
+ #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
+ #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
+ #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
++#define EMIT5(b1, b2, b3, b4, b5) \
++ do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0)
+
+ #define EMIT1_off32(b1, off) \
+ do { EMIT1(b1); EMIT(off, 4); } while (0)
+@@ -1432,6 +1434,23 @@ static int emit_spectre_bhb_barrier(u8 *
+ EMIT1(0x59); /* pop rcx */
+ EMIT1(0x58); /* pop rax */
+ }
++ /* Insert IBHF instruction */
++ if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) &&
++ cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) ||
++ (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW) &&
++ IS_ENABLED(CONFIG_X86_64))) {
++ /*
++ * Add an Indirect Branch History Fence (IBHF). IBHF acts as a
++ * fence preventing branch history from before the fence from
++ * affecting indirect branches after the fence. This is
++ * specifically used in cBPF jitted code to prevent Intra-mode
++ * BHI attacks. The IBHF instruction is designed to be a NOP on
++ * hardware that doesn't need or support it. The REP and REX.W
++ * prefixes are required by the microcode, and they also ensure
++ * that the NOP is unlikely to be used in existing code.
++ */
++ EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */
++ }
+ *pprog = prog;
+ return 0;
+ }
--- /dev/null
+From cf365c56c44494ff952fc8015cae50eed2bc996f Mon Sep 17 00:00:00 2001
+From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Date: Mon, 5 May 2025 14:35:12 -0700
+Subject: x86/bpf: Call branch history clearing sequence on exit
+
+From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+
+commit d4e89d212d401672e9cdfe825d947ee3a9fbe3f5 upstream.
+
+Classic BPF programs have been identified as potential vectors for
+intra-mode Branch Target Injection (BTI) attacks. Classic BPF programs can
+be run by unprivileged users. They allow unprivileged code to execute
+inside the kernel. Attackers can use unprivileged cBPF to craft branch
+history in kernel mode that can influence the target of indirect branches.
+
+Introduce a branch history buffer (BHB) clearing sequence during the JIT
+compilation of classic BPF programs. The clearing sequence is the same as
+is used in previous mitigations to protect syscalls. Since eBPF programs
+already have their own mitigations in place, only insert the call on
+classic programs that aren't run by privileged users.
+
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/net/bpf_jit_comp.c | 31 +++++++++++++++++++++++++++++++
+ 1 file changed, 31 insertions(+)
+
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -1412,6 +1412,30 @@ static void emit_shiftx(u8 **pprog, u32
+ #define LOAD_TAIL_CALL_CNT_PTR(stack) \
+ __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
+
++static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip,
++ struct bpf_prog *bpf_prog)
++{
++ u8 *prog = *pprog;
++ u8 *func;
++
++ if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) {
++ /* The clearing sequence clobbers eax and ecx. */
++ EMIT1(0x50); /* push rax */
++ EMIT1(0x51); /* push rcx */
++ ip += 2;
++
++ func = (u8 *)clear_bhb_loop;
++ ip += x86_call_depth_emit_accounting(&prog, func, ip);
++
++ if (emit_call(&prog, func, ip))
++ return -EINVAL;
++ EMIT1(0x59); /* pop rcx */
++ EMIT1(0x58); /* pop rax */
++ }
++ *pprog = prog;
++ return 0;
++}
++
+ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
+ int oldproglen, struct jit_context *ctx, bool jmp_padding)
+ {
+@@ -2402,6 +2426,13 @@ emit_jmp:
+ seen_exit = true;
+ /* Update cleanup_addr */
+ ctx->cleanup_addr = proglen;
++ if (bpf_prog_was_classic(bpf_prog) &&
++ !capable(CAP_SYS_ADMIN)) {
++ u8 *ip = image + addrs[i - 1];
++
++ if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
++ return -EINVAL;
++ }
+ if (bpf_prog->aux->exception_boundary) {
+ pop_callee_regs(&prog, all_callee_regs_used);
+ pop_r12(&prog);
--- /dev/null
+From 1abd15e72d84b0f66a5bc94d39f7138027cb75fc Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Sat, 3 May 2025 09:46:31 -0700
+Subject: x86/ibt: Keep IBT disabled during alternative patching
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit ebebe30794d38c51f71fe4951ba6af4159d9837d upstream.
+
+cfi_rewrite_callers() updates the fineIBT hash matching at the caller side,
+but except for paranoid-mode it relies on apply_retpoline() and friends for
+any ENDBR relocation. This could temporarily cause an indirect branch to
+land on a poisoned ENDBR.
+
+For instance, with para-virtualization enabled, a simple wrmsrl() could
+have an indirect branch pointing to native_write_msr() who's ENDBR has been
+relocated due to fineIBT:
+
+<wrmsrl>:
+ push %rbp
+ mov %rsp,%rbp
+ mov %esi,%eax
+ mov %rsi,%rdx
+ shr $0x20,%rdx
+ mov %edi,%edi
+ mov %rax,%rsi
+ call *0x21e65d0(%rip) # <pv_ops+0xb8>
+ ^^^^^^^^^^^^^^^^^^^^^^^
+
+Such an indirect call during the alternative patching could #CP if the
+caller is not *yet* adjusted for the new target ENDBR. To prevent a false
+ #CP, keep CET-IBT disabled until all callers are patched.
+
+Patching during the module load does not need to be guarded by IBT-disable
+because the module code is not executed until the patching is complete.
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/alternative.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -31,6 +31,7 @@
+ #include <asm/paravirt.h>
+ #include <asm/asm-prototypes.h>
+ #include <asm/cfi.h>
++#include <asm/ibt.h>
+
+ int __read_mostly alternatives_patched;
+
+@@ -1719,6 +1720,8 @@ static noinline void __init alt_reloc_se
+
+ void __init alternative_instructions(void)
+ {
++ u64 ibt;
++
+ int3_selftest();
+
+ /*
+@@ -1745,6 +1748,9 @@ void __init alternative_instructions(voi
+ */
+ paravirt_set_cap();
+
++ /* Keep CET-IBT disabled until caller/callee are patched */
++ ibt = ibt_save(/*disable*/ true);
++
+ __apply_fineibt(__retpoline_sites, __retpoline_sites_end,
+ __cfi_sites, __cfi_sites_end, true);
+
+@@ -1768,6 +1774,8 @@ void __init alternative_instructions(voi
+ */
+ apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
+
++ ibt_restore(ibt);
++
+ #ifdef CONFIG_SMP
+ /* Patch to UP if other cpus not imminent. */
+ if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
--- /dev/null
+From 2ab5a9b085907cafdb4a9ec2856acd1e7ee181c9 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Fri, 21 Jun 2024 21:17:21 -0700
+Subject: x86/its: Add support for ITS-safe indirect thunk
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit 8754e67ad4ac692c67ff1f99c0d07156f04ae40c upstream.
+
+Due to ITS, indirect branches in the lower half of a cacheline may be
+vulnerable to branch target injection attack.
+
+Introduce ITS-safe thunks to patch indirect branches in the lower half of
+cacheline with the thunk. Also thunk any eBPF generated indirect branches
+in emit_indirect_jump().
+
+Below category of indirect branches are not mitigated:
+
+- Indirect branches in the .init section are not mitigated because they are
+ discarded after boot.
+- Indirect branches that are explicitly marked retpoline-safe.
+
+Note that retpoline also mitigates the indirect branches against ITS. This
+is because the retpoline sequence fills an RSB entry before RET, and it
+does not suffer from RSB-underflow part of the ITS.
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/Kconfig | 11 ++++++++
+ arch/x86/include/asm/cpufeatures.h | 1
+ arch/x86/include/asm/nospec-branch.h | 4 +++
+ arch/x86/kernel/alternative.c | 45 ++++++++++++++++++++++++++++++++---
+ arch/x86/kernel/vmlinux.lds.S | 6 ++++
+ arch/x86/lib/retpoline.S | 28 +++++++++++++++++++++
+ arch/x86/net/bpf_jit_comp.c | 5 +++
+ 7 files changed, 96 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2747,6 +2747,17 @@ config MITIGATION_SSB
+ of speculative execution in a similar way to the Meltdown and Spectre
+ security vulnerabilities.
+
++config MITIGATION_ITS
++ bool "Enable Indirect Target Selection mitigation"
++ depends on CPU_SUP_INTEL && X86_64
++ depends on MITIGATION_RETPOLINE && MITIGATION_RETHUNK
++ default y
++ help
++ Enable Indirect Target Selection (ITS) mitigation. ITS is a bug in
++ BPU on some Intel CPUs that may allow Spectre V2 style attacks. If
++ disabled, mitigation cannot be enabled via cmdline.
++ See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
++
+ endif
+
+ config ARCH_HAS_ADD_PAGES
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -475,6 +475,7 @@
+ #define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */
+ #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
+ #define X86_FEATURE_FAST_CPPC (21*32 + 5) /* AMD Fast CPPC */
++#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 6) /* Use thunk for indirect branches in lower half of cacheline */
+
+ /*
+ * BUG word(s)
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -355,10 +355,14 @@
+ ".long 999b\n\t" \
+ ".popsection\n\t"
+
++#define ITS_THUNK_SIZE 64
++
+ typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
++typedef u8 its_thunk_t[ITS_THUNK_SIZE];
+ extern retpoline_thunk_t __x86_indirect_thunk_array[];
+ extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
+ extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
++extern its_thunk_t __x86_indirect_its_thunk_array[];
+
+ #ifdef CONFIG_MITIGATION_RETHUNK
+ extern void __x86_return_thunk(void);
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -581,7 +581,8 @@ static int emit_indirect(int op, int reg
+ return i;
+ }
+
+-static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
++static int __emit_trampoline(void *addr, struct insn *insn, u8 *bytes,
++ void *call_dest, void *jmp_dest)
+ {
+ u8 op = insn->opcode.bytes[0];
+ int i = 0;
+@@ -602,7 +603,7 @@ static int emit_call_track_retpoline(voi
+ switch (op) {
+ case CALL_INSN_OPCODE:
+ __text_gen_insn(bytes+i, op, addr+i,
+- __x86_indirect_call_thunk_array[reg],
++ call_dest,
+ CALL_INSN_SIZE);
+ i += CALL_INSN_SIZE;
+ break;
+@@ -610,7 +611,7 @@ static int emit_call_track_retpoline(voi
+ case JMP32_INSN_OPCODE:
+ clang_jcc:
+ __text_gen_insn(bytes+i, op, addr+i,
+- __x86_indirect_jump_thunk_array[reg],
++ jmp_dest,
+ JMP32_INSN_SIZE);
+ i += JMP32_INSN_SIZE;
+ break;
+@@ -625,6 +626,35 @@ clang_jcc:
+ return i;
+ }
+
++static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
++{
++ return __emit_trampoline(addr, insn, bytes,
++ __x86_indirect_call_thunk_array[reg],
++ __x86_indirect_jump_thunk_array[reg]);
++}
++
++#ifdef CONFIG_MITIGATION_ITS
++static int emit_its_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes)
++{
++ return __emit_trampoline(addr, insn, bytes,
++ __x86_indirect_its_thunk_array[reg],
++ __x86_indirect_its_thunk_array[reg]);
++}
++
++/* Check if an indirect branch is at ITS-unsafe address */
++static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg)
++{
++ if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
++ return false;
++
++ /* Indirect branch opcode is 2 or 3 bytes depending on reg */
++ addr += 1 + reg / 8;
++
++ /* Lower-half of the cacheline? */
++ return !(addr & 0x20);
++}
++#endif
++
+ /*
+ * Rewrite the compiler generated retpoline thunk calls.
+ *
+@@ -699,6 +729,15 @@ static int patch_retpoline(void *addr, s
+ bytes[i++] = 0xe8; /* LFENCE */
+ }
+
++#ifdef CONFIG_MITIGATION_ITS
++ /*
++ * Check if the address of last byte of emitted-indirect is in
++ * lower-half of the cacheline. Such branches need ITS mitigation.
++ */
++ if (cpu_wants_indirect_its_thunk_at((unsigned long)addr + i, reg))
++ return emit_its_trampoline(addr, insn, reg, bytes);
++#endif
++
+ ret = emit_indirect(op, reg, bytes + i);
+ if (ret < 0)
+ return ret;
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -530,4 +530,10 @@ INIT_PER_CPU(irq_stack_backing_store);
+ "SRSO function pair won't alias");
+ #endif
+
++#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
++. = ASSERT(__x86_indirect_its_thunk_rax & 0x20, "__x86_indirect_thunk_rax not in second half of cacheline");
++. = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thunks are not cacheline apart");
++. = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
++#endif
++
+ #endif /* CONFIG_X86_64 */
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -366,6 +366,34 @@ SYM_FUNC_END(call_depth_return_thunk)
+
+ #endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */
+
++#ifdef CONFIG_MITIGATION_ITS
++
++.macro ITS_THUNK reg
++
++SYM_INNER_LABEL(__x86_indirect_its_thunk_\reg, SYM_L_GLOBAL)
++ UNWIND_HINT_UNDEFINED
++ ANNOTATE_NOENDBR
++ ANNOTATE_RETPOLINE_SAFE
++ jmp *%\reg
++ int3
++ .align 32, 0xcc /* fill to the end of the line */
++ .skip 32, 0xcc /* skip to the next upper half */
++.endm
++
++/* ITS mitigation requires thunks be aligned to upper half of cacheline */
++.align 64, 0xcc
++.skip 32, 0xcc
++SYM_CODE_START(__x86_indirect_its_thunk_array)
++
++#define GEN(reg) ITS_THUNK reg
++#include <asm/GEN-for-each-reg.h>
++#undef GEN
++
++ .align 64, 0xcc
++SYM_CODE_END(__x86_indirect_its_thunk_array)
++
++#endif
++
+ /*
+ * This function name is magical and is used by -mfunction-return=thunk-extern
+ * for the compiler to generate JMPs to it.
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -639,7 +639,10 @@ static void emit_indirect_jump(u8 **ppro
+ {
+ u8 *prog = *pprog;
+
+- if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
++ if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
++ OPTIMIZER_HIDE_VAR(reg);
++ emit_jump(&prog, &__x86_indirect_its_thunk_array[reg], ip);
++ } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
+ EMIT_LFENCE();
+ EMIT2(0xFF, 0xE0 + reg);
+ } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
--- /dev/null
+From 14b308224d1da48916602d17fdec99fe41937bab Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Fri, 21 Jun 2024 21:17:21 -0700
+Subject: x86/its: Add support for ITS-safe return thunk
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit a75bf27fe41abe658c53276a0c486c4bf9adecfc upstream.
+
+RETs in the lower half of cacheline may be affected by ITS bug,
+specifically when the RSB-underflows. Use ITS-safe return thunk for such
+RETs.
+
+RETs that are not patched:
+
+- RET in retpoline sequence does not need to be patched, because the
+ sequence itself fills an RSB before RET.
+- RET in Call Depth Tracking (CDT) thunks __x86_indirect_{call|jump}_thunk
+ and call_depth_return_thunk are not patched because CDT by design
+ prevents RSB-underflow.
+- RETs in .init section are not reachable after init.
+- RETs that are explicitly marked safe with ANNOTATE_UNRET_SAFE.
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/alternative.h | 14 ++++++++++++++
+ arch/x86/include/asm/nospec-branch.h | 6 ++++++
+ arch/x86/kernel/alternative.c | 19 +++++++++++++++++--
+ arch/x86/kernel/ftrace.c | 2 +-
+ arch/x86/kernel/static_call.c | 4 ++--
+ arch/x86/kernel/vmlinux.lds.S | 4 ++++
+ arch/x86/lib/retpoline.S | 13 ++++++++++++-
+ arch/x86/net/bpf_jit_comp.c | 2 +-
+ 8 files changed, 57 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -134,6 +134,20 @@ static __always_inline int x86_call_dept
+ }
+ #endif
+
++#if defined(CONFIG_MITIGATION_RETHUNK) && defined(CONFIG_OBJTOOL)
++extern bool cpu_wants_rethunk(void);
++extern bool cpu_wants_rethunk_at(void *addr);
++#else
++static __always_inline bool cpu_wants_rethunk(void)
++{
++ return false;
++}
++static __always_inline bool cpu_wants_rethunk_at(void *addr)
++{
++ return false;
++}
++#endif
++
+ #ifdef CONFIG_SMP
+ extern void alternatives_smp_module_add(struct module *mod, char *name,
+ void *locks, void *locks_end,
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -386,6 +386,12 @@ static inline void srso_return_thunk(voi
+ static inline void srso_alias_return_thunk(void) {}
+ #endif
+
++#ifdef CONFIG_MITIGATION_ITS
++extern void its_return_thunk(void);
++#else
++static inline void its_return_thunk(void) {}
++#endif
++
+ extern void retbleed_return_thunk(void);
+ extern void srso_return_thunk(void);
+ extern void srso_alias_return_thunk(void);
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -809,6 +809,21 @@ void __init_or_module noinline apply_ret
+
+ #ifdef CONFIG_MITIGATION_RETHUNK
+
++bool cpu_wants_rethunk(void)
++{
++ return cpu_feature_enabled(X86_FEATURE_RETHUNK);
++}
++
++bool cpu_wants_rethunk_at(void *addr)
++{
++ if (!cpu_feature_enabled(X86_FEATURE_RETHUNK))
++ return false;
++ if (x86_return_thunk != its_return_thunk)
++ return true;
++
++ return !((unsigned long)addr & 0x20);
++}
++
+ /*
+ * Rewrite the compiler generated return thunk tail-calls.
+ *
+@@ -825,7 +840,7 @@ static int patch_return(void *addr, stru
+ int i = 0;
+
+ /* Patch the custom return thunks... */
+- if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
++ if (cpu_wants_rethunk_at(addr)) {
+ i = JMP32_INSN_SIZE;
+ __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
+ } else {
+@@ -842,7 +857,7 @@ void __init_or_module noinline apply_ret
+ {
+ s32 *s;
+
+- if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
++ if (cpu_wants_rethunk())
+ static_call_force_reinit();
+
+ for (s = start; s < end; s++) {
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -354,7 +354,7 @@ create_trampoline(struct ftrace_ops *ops
+ goto fail;
+
+ ip = trampoline + size;
+- if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
++ if (cpu_wants_rethunk_at(ip))
+ __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
+ else
+ memcpy(ip, retq, sizeof(retq));
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -81,7 +81,7 @@ static void __ref __static_call_transfor
+ break;
+
+ case RET:
+- if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
++ if (cpu_wants_rethunk_at(insn))
+ code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
+ else
+ code = &retinsn;
+@@ -90,7 +90,7 @@ static void __ref __static_call_transfor
+ case JCC:
+ if (!func) {
+ func = __static_call_return;
+- if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
++ if (cpu_wants_rethunk())
+ func = x86_return_thunk;
+ }
+
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -536,4 +536,8 @@ INIT_PER_CPU(irq_stack_backing_store);
+ . = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
+ #endif
+
++#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
++. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline");
++#endif
++
+ #endif /* CONFIG_X86_64 */
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -392,7 +392,18 @@ SYM_CODE_START(__x86_indirect_its_thunk_
+ .align 64, 0xcc
+ SYM_CODE_END(__x86_indirect_its_thunk_array)
+
+-#endif
++.align 64, 0xcc
++.skip 32, 0xcc
++SYM_CODE_START(its_return_thunk)
++ UNWIND_HINT_FUNC
++ ANNOTATE_NOENDBR
++ ANNOTATE_UNRET_SAFE
++ ret
++ int3
++SYM_CODE_END(its_return_thunk)
++EXPORT_SYMBOL(its_return_thunk)
++
++#endif /* CONFIG_MITIGATION_ITS */
+
+ /*
+ * This function name is magical and is used by -mfunction-return=thunk-extern
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -664,7 +664,7 @@ static void emit_return(u8 **pprog, u8 *
+ {
+ u8 *prog = *pprog;
+
+- if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
++ if (cpu_wants_rethunk()) {
+ emit_jump(&prog, x86_return_thunk, ip);
+ } else {
+ EMIT1(0xC3); /* ret */
--- /dev/null
+From 772e3b0936ff4a8574b7bd2a04a04003e7b8d0d3 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Mon, 2 Dec 2024 12:07:08 -0800
+Subject: x86/its: Add support for RSB stuffing mitigation
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit facd226f7e0c8ca936ac114aba43cb3e8b94e41e upstream.
+
+When retpoline mitigation is enabled for spectre-v2, enabling
+call-depth-tracking and RSB stuffing also mitigates ITS. Add cmdline option
+indirect_target_selection=stuff to allow enabling RSB stuffing mitigation.
+
+When retpoline mitigation is not enabled, =stuff option is ignored, and
+default mitigation for ITS is deployed.
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 3 +++
+ arch/x86/kernel/cpu/bugs.c | 19 +++++++++++++++++++
+ 2 files changed, 22 insertions(+)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2159,6 +2159,9 @@
+ mitigation.
+ vmexit: Only deploy mitigation if CPU is affected by
+ guest/host isolation part of ITS.
++ stuff: Deploy RSB-fill mitigation when retpoline is
++ also deployed. Otherwise, deploy the default
++ mitigation.
+
+ For details see:
+ Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1190,6 +1190,7 @@ enum its_mitigation_cmd {
+ ITS_CMD_OFF,
+ ITS_CMD_ON,
+ ITS_CMD_VMEXIT,
++ ITS_CMD_RSB_STUFF,
+ };
+
+ enum its_mitigation {
+@@ -1230,6 +1231,8 @@ static int __init its_parse_cmdline(char
+ setup_force_cpu_bug(X86_BUG_ITS);
+ } else if (!strcmp(str, "vmexit")) {
+ its_cmd = ITS_CMD_VMEXIT;
++ } else if (!strcmp(str, "stuff")) {
++ its_cmd = ITS_CMD_RSB_STUFF;
+ } else {
+ pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
+ }
+@@ -1281,6 +1284,12 @@ static void __init its_select_mitigation
+ goto out;
+ }
+
++ if (cmd == ITS_CMD_RSB_STUFF &&
++ (!boot_cpu_has(X86_FEATURE_RETPOLINE) || !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING))) {
++ pr_err("RSB stuff mitigation not supported, using default\n");
++ cmd = ITS_CMD_ON;
++ }
++
+ switch (cmd) {
+ case ITS_CMD_OFF:
+ its_mitigation = ITS_MITIGATION_OFF;
+@@ -1298,6 +1307,16 @@ static void __init its_select_mitigation
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ set_return_thunk(its_return_thunk);
+ break;
++ case ITS_CMD_RSB_STUFF:
++ its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
++ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
++ setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
++ set_return_thunk(call_depth_return_thunk);
++ if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) {
++ retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
++ pr_info("Retbleed mitigation updated to stuffing\n");
++ }
++ break;
+ }
+ out:
+ pr_info("%s\n", its_strings[its_mitigation]);
--- /dev/null
+From 63ee8b0a9f911692c4a8736594ae7d8bb97e4ed0 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Mon, 18 Nov 2024 09:53:12 -0800
+Subject: x86/its: Add "vmexit" option to skip mitigation on some CPUs
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit 2665281a07e19550944e8354a2024635a7b2714a upstream.
+
+Ice Lake generation CPUs are not affected by guest/host isolation part of
+ITS. If a user is only concerned about KVM guests, they can now choose a
+new cmdline option "vmexit" that will not deploy the ITS mitigation when
+CPU is not affected by guest/host isolation. This saves the performance
+overhead of ITS mitigation on Ice Lake gen CPUs.
+
+When "vmexit" option selected, if the CPU is affected by ITS guest/host
+isolation, the default ITS mitigation is deployed.
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 2 ++
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 11 +++++++++++
+ arch/x86/kernel/cpu/common.c | 19 ++++++++++++-------
+ 4 files changed, 26 insertions(+), 7 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2157,6 +2157,8 @@
+ off: Disable mitigation.
+ force: Force the ITS bug and deploy default
+ mitigation.
++ vmexit: Only deploy mitigation if CPU is affected by
++ guest/host isolation part of ITS.
+
+ For details see:
+ Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -528,4 +528,5 @@
+ #define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */
+ #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
+ #define X86_BUG_ITS X86_BUG(1*32 + 5) /* "its" CPU is affected by Indirect Target Selection */
++#define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1189,16 +1189,19 @@ do_cmd_auto:
+ enum its_mitigation_cmd {
+ ITS_CMD_OFF,
+ ITS_CMD_ON,
++ ITS_CMD_VMEXIT,
+ };
+
+ enum its_mitigation {
+ ITS_MITIGATION_OFF,
++ ITS_MITIGATION_VMEXIT_ONLY,
+ ITS_MITIGATION_ALIGNED_THUNKS,
+ ITS_MITIGATION_RETPOLINE_STUFF,
+ };
+
+ static const char * const its_strings[] = {
+ [ITS_MITIGATION_OFF] = "Vulnerable",
++ [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
+ [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
+ [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
+ };
+@@ -1225,6 +1228,8 @@ static int __init its_parse_cmdline(char
+ } else if (!strcmp(str, "force")) {
+ its_cmd = ITS_CMD_ON;
+ setup_force_cpu_bug(X86_BUG_ITS);
++ } else if (!strcmp(str, "vmexit")) {
++ its_cmd = ITS_CMD_VMEXIT;
+ } else {
+ pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
+ }
+@@ -1280,6 +1285,12 @@ static void __init its_select_mitigation
+ case ITS_CMD_OFF:
+ its_mitigation = ITS_MITIGATION_OFF;
+ break;
++ case ITS_CMD_VMEXIT:
++ if (boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) {
++ its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
++ goto out;
++ }
++ fallthrough;
+ case ITS_CMD_ON:
+ its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
+ if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1230,6 +1230,8 @@ static const __initconst struct x86_cpu_
+ #define RFDS BIT(7)
+ /* CPU is affected by Indirect Target Selection */
+ #define ITS BIT(8)
++/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
++#define ITS_NATIVE_ONLY BIT(9)
+
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
+@@ -1250,16 +1252,16 @@ static const struct x86_cpu_id cpu_vuln_
+ VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE, X86_STEPPINGS(0x0, 0xc), MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS),
+ VULNBL_INTEL_STEPPINGS(INTEL_CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
+- VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
+- VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS | ITS),
+- VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS | ITS),
++ VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
++ VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
++ VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
+ VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
+ VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED | ITS),
+ VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
+- VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE_L, X86_STEPPING_ANY, GDS | ITS),
+- VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE, X86_STEPPING_ANY, GDS | ITS),
++ VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE_L, X86_STEPPING_ANY, GDS | ITS | ITS_NATIVE_ONLY),
++ VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE, X86_STEPPING_ANY, GDS | ITS | ITS_NATIVE_ONLY),
+ VULNBL_INTEL_STEPPINGS(INTEL_LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
+- VULNBL_INTEL_STEPPINGS(INTEL_ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS),
++ VULNBL_INTEL_STEPPINGS(INTEL_ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
+ VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE_L, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE, X86_STEPPING_ANY, RFDS),
+@@ -1481,8 +1483,11 @@ static void __init cpu_set_bug_bits(stru
+ if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
+ setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
+
+- if (vulnerable_to_its(x86_arch_cap_msr))
++ if (vulnerable_to_its(x86_arch_cap_msr)) {
+ setup_force_cpu_bug(X86_BUG_ITS);
++ if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY))
++ setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
++ }
+
+ if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+ return;
--- /dev/null
+From 4c0e84d98fd9c667b049778212b051884d0bb2d8 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Fri, 2 May 2025 06:25:19 -0700
+Subject: x86/its: Align RETs in BHB clear sequence to avoid thunking
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit f0cd7091cc5a032c8870b4285305d9172569d126 upstream.
+
+The software mitigation for BHI is to execute BHB clear sequence at syscall
+entry, and possibly after a cBPF program. ITS mitigation thunks RETs in the
+lower half of the cacheline. This causes the RETs in the BHB clear sequence
+to be thunked as well, adding unnecessary branches to the BHB clear
+sequence.
+
+Since the sequence is in hot path, align the RET instructions in the
+sequence to avoid thunking.
+
+This is how disassembly clear_bhb_loop() looks like after this change:
+
+ 0x44 <+4>: mov $0x5,%ecx
+ 0x49 <+9>: call 0xffffffff81001d9b <clear_bhb_loop+91>
+ 0x4e <+14>: jmp 0xffffffff81001de5 <clear_bhb_loop+165>
+ 0x53 <+19>: int3
+ ...
+ 0x9b <+91>: call 0xffffffff81001dce <clear_bhb_loop+142>
+ 0xa0 <+96>: ret
+ 0xa1 <+97>: int3
+ ...
+ 0xce <+142>: mov $0x5,%eax
+ 0xd3 <+147>: jmp 0xffffffff81001dd6 <clear_bhb_loop+150>
+ 0xd5 <+149>: nop
+ 0xd6 <+150>: sub $0x1,%eax
+ 0xd9 <+153>: jne 0xffffffff81001dd3 <clear_bhb_loop+147>
+ 0xdb <+155>: sub $0x1,%ecx
+ 0xde <+158>: jne 0xffffffff81001d9b <clear_bhb_loop+91>
+ 0xe0 <+160>: ret
+ 0xe1 <+161>: int3
+ 0xe2 <+162>: int3
+ 0xe3 <+163>: int3
+ 0xe4 <+164>: int3
+ 0xe5 <+165>: lfence
+ 0xe8 <+168>: pop %rbp
+ 0xe9 <+169>: ret
+
+Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/entry/entry_64.S | 20 +++++++++++++++++---
+ 1 file changed, 17 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -1524,7 +1524,9 @@ SYM_CODE_END(rewind_stack_and_make_dead)
+ * ORC to unwind properly.
+ *
+ * The alignment is for performance and not for safety, and may be safely
+- * refactored in the future if needed.
++ * refactored in the future if needed. The .skips are for safety, to ensure
++ * that all RETs are in the second half of a cacheline to mitigate Indirect
++ * Target Selection, rather than taking the slowpath via its_return_thunk.
+ */
+ SYM_FUNC_START(clear_bhb_loop)
+ push %rbp
+@@ -1534,10 +1536,22 @@ SYM_FUNC_START(clear_bhb_loop)
+ call 1f
+ jmp 5f
+ .align 64, 0xcc
++ /*
++ * Shift instructions so that the RET is in the upper half of the
++ * cacheline and don't take the slowpath to its_return_thunk.
++ */
++ .skip 32 - (.Lret1 - 1f), 0xcc
+ ANNOTATE_INTRA_FUNCTION_CALL
+ 1: call 2f
+- RET
++.Lret1: RET
+ .align 64, 0xcc
++ /*
++ * As above shift instructions for RET at .Lret2 as well.
++ *
++ * This should be ideally be: .skip 32 - (.Lret2 - 2f), 0xcc
++ * but some Clang versions (e.g. 18) don't like this.
++ */
++ .skip 32 - 18, 0xcc
+ 2: movl $5, %eax
+ 3: jmp 4f
+ nop
+@@ -1545,7 +1559,7 @@ SYM_FUNC_START(clear_bhb_loop)
+ jnz 3b
+ sub $1, %ecx
+ jnz 1b
+- RET
++.Lret2: RET
+ 5: lfence
+ pop %rbp
+ RET
--- /dev/null
+From 223b9e240673c5907d3030837fe33a6ac45206fb Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Fri, 21 Jun 2024 20:23:23 -0700
+Subject: x86/its: Enable Indirect Target Selection mitigation
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit f4818881c47fd91fcb6d62373c57c7844e3de1c0 upstream.
+
+Indirect Target Selection (ITS) is a bug in some pre-ADL Intel CPUs with
+eIBRS. It affects prediction of indirect branch and RETs in the
+lower half of cacheline. Due to ITS such branches may get wrongly predicted
+to a target of (direct or indirect) branch that is located in the upper
+half of the cacheline.
+
+Scope of impact
+===============
+
+Guest/host isolation
+--------------------
+When eIBRS is used for guest/host isolation, the indirect branches in the
+VMM may still be predicted with targets corresponding to branches in the
+guest.
+
+Intra-mode
+----------
+cBPF or other native gadgets can be used for intra-mode training and
+disclosure using ITS.
+
+User/kernel isolation
+---------------------
+When eIBRS is enabled user/kernel isolation is not impacted.
+
+Indirect Branch Prediction Barrier (IBPB)
+-----------------------------------------
+After an IBPB, indirect branches may be predicted with targets
+corresponding to direct branches which were executed prior to IBPB. This is
+mitigated by a microcode update.
+
+Add cmdline parameter indirect_target_selection=off|on|force to control the
+mitigation to relocate the affected branches to an ITS-safe thunk i.e.
+located in the upper half of cacheline. Also add the sysfs reporting.
+
+When retpoline mitigation is deployed, ITS safe-thunks are not needed,
+because retpoline sequence is already ITS-safe. Similarly, when call depth
+tracking (CDT) mitigation is deployed (retbleed=stuff), ITS safe return
+thunk is not used, as CDT prevents RSB-underflow.
+
+To not overcomplicate things, ITS mitigation is not supported with
+spectre-v2 lfence;jmp mitigation. Moreover, it is less practical to deploy
+lfence;jmp mitigation on ITS affected parts anyways.
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 1
+ Documentation/admin-guide/kernel-parameters.txt | 13 +
+ arch/x86/kernel/cpu/bugs.c | 140 ++++++++++++++++++++-
+ drivers/base/cpu.c | 3
+ include/linux/cpu.h | 2
+ 5 files changed, 155 insertions(+), 4 deletions(-)
+
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -511,6 +511,7 @@ Description: information about CPUs hete
+
+ What: /sys/devices/system/cpu/vulnerabilities
+ /sys/devices/system/cpu/vulnerabilities/gather_data_sampling
++ /sys/devices/system/cpu/vulnerabilities/indirect_target_selection
+ /sys/devices/system/cpu/vulnerabilities/itlb_multihit
+ /sys/devices/system/cpu/vulnerabilities/l1tf
+ /sys/devices/system/cpu/vulnerabilities/mds
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2149,6 +2149,18 @@
+ different crypto accelerators. This option can be used
+ to achieve best performance for particular HW.
+
++ indirect_target_selection= [X86,Intel] Mitigation control for Indirect
++ Target Selection(ITS) bug in Intel CPUs. Updated
++ microcode is also required for a fix in IBPB.
++
++ on: Enable mitigation (default).
++ off: Disable mitigation.
++ force: Force the ITS bug and deploy default
++ mitigation.
++
++ For details see:
++ Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
++
+ init= [KNL]
+ Format: <full_path>
+ Run specified binary instead of /sbin/init as init
+@@ -3510,6 +3522,7 @@
+ expose users to several CPU vulnerabilities.
+ Equivalent to: if nokaslr then kpti=0 [ARM64]
+ gather_data_sampling=off [X86]
++ indirect_target_selection=off [X86]
+ kvm.nx_huge_pages=off [X86]
+ l1tf=off [X86]
+ mds=off [X86]
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -49,6 +49,7 @@ static void __init srbds_select_mitigati
+ static void __init l1d_flush_select_mitigation(void);
+ static void __init srso_select_mitigation(void);
+ static void __init gds_select_mitigation(void);
++static void __init its_select_mitigation(void);
+
+ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
+ u64 x86_spec_ctrl_base;
+@@ -67,6 +68,14 @@ static DEFINE_MUTEX(spec_ctrl_mutex);
+
+ void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
+
++static void __init set_return_thunk(void *thunk)
++{
++ if (x86_return_thunk != __x86_return_thunk)
++ pr_warn("x86/bugs: return thunk changed\n");
++
++ x86_return_thunk = thunk;
++}
++
+ /* Update SPEC_CTRL MSR and its cached copy unconditionally */
+ static void update_spec_ctrl(u64 val)
+ {
+@@ -175,6 +184,7 @@ void __init cpu_select_mitigations(void)
+ */
+ srso_select_mitigation();
+ gds_select_mitigation();
++ its_select_mitigation();
+ }
+
+ /*
+@@ -1104,7 +1114,7 @@ do_cmd_auto:
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ setup_force_cpu_cap(X86_FEATURE_UNRET);
+
+- x86_return_thunk = retbleed_return_thunk;
++ set_return_thunk(retbleed_return_thunk);
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+@@ -1139,7 +1149,7 @@ do_cmd_auto:
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
+
+- x86_return_thunk = call_depth_return_thunk;
++ set_return_thunk(call_depth_return_thunk);
+ break;
+
+ default:
+@@ -1174,6 +1184,115 @@ do_cmd_auto:
+ }
+
+ #undef pr_fmt
++#define pr_fmt(fmt) "ITS: " fmt
++
++enum its_mitigation_cmd {
++ ITS_CMD_OFF,
++ ITS_CMD_ON,
++};
++
++enum its_mitigation {
++ ITS_MITIGATION_OFF,
++ ITS_MITIGATION_ALIGNED_THUNKS,
++ ITS_MITIGATION_RETPOLINE_STUFF,
++};
++
++static const char * const its_strings[] = {
++ [ITS_MITIGATION_OFF] = "Vulnerable",
++ [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
++ [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
++};
++
++static enum its_mitigation its_mitigation __ro_after_init = ITS_MITIGATION_ALIGNED_THUNKS;
++
++static enum its_mitigation_cmd its_cmd __ro_after_init =
++ IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_CMD_ON : ITS_CMD_OFF;
++
++static int __init its_parse_cmdline(char *str)
++{
++ if (!str)
++ return -EINVAL;
++
++ if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
++ pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
++ return 0;
++ }
++
++ if (!strcmp(str, "off")) {
++ its_cmd = ITS_CMD_OFF;
++ } else if (!strcmp(str, "on")) {
++ its_cmd = ITS_CMD_ON;
++ } else if (!strcmp(str, "force")) {
++ its_cmd = ITS_CMD_ON;
++ setup_force_cpu_bug(X86_BUG_ITS);
++ } else {
++ pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
++ }
++
++ return 0;
++}
++early_param("indirect_target_selection", its_parse_cmdline);
++
++static void __init its_select_mitigation(void)
++{
++ enum its_mitigation_cmd cmd = its_cmd;
++
++ if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) {
++ its_mitigation = ITS_MITIGATION_OFF;
++ return;
++ }
++
++ /* Retpoline+CDT mitigates ITS, bail out */
++ if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
++ boot_cpu_has(X86_FEATURE_CALL_DEPTH)) {
++ its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
++ goto out;
++ }
++
++ /* Exit early to avoid irrelevant warnings */
++ if (cmd == ITS_CMD_OFF) {
++ its_mitigation = ITS_MITIGATION_OFF;
++ goto out;
++ }
++ if (spectre_v2_enabled == SPECTRE_V2_NONE) {
++ pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
++ its_mitigation = ITS_MITIGATION_OFF;
++ goto out;
++ }
++ if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
++ !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
++ pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
++ its_mitigation = ITS_MITIGATION_OFF;
++ goto out;
++ }
++ if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
++ pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
++ its_mitigation = ITS_MITIGATION_OFF;
++ goto out;
++ }
++ if (boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
++ pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
++ its_mitigation = ITS_MITIGATION_OFF;
++ goto out;
++ }
++
++ switch (cmd) {
++ case ITS_CMD_OFF:
++ its_mitigation = ITS_MITIGATION_OFF;
++ break;
++ case ITS_CMD_ON:
++ its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
++ if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
++ setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
++ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
++ set_return_thunk(its_return_thunk);
++ break;
++ }
++out:
++ pr_info("%s\n", its_strings[its_mitigation]);
++}
++
++#undef pr_fmt
+ #define pr_fmt(fmt) "Spectre V2 : " fmt
+
+ static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
+@@ -2624,10 +2743,10 @@ static void __init srso_select_mitigatio
+
+ if (boot_cpu_data.x86 == 0x19) {
+ setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+- x86_return_thunk = srso_alias_return_thunk;
++ set_return_thunk(srso_alias_return_thunk);
+ } else {
+ setup_force_cpu_cap(X86_FEATURE_SRSO);
+- x86_return_thunk = srso_return_thunk;
++ set_return_thunk(srso_return_thunk);
+ }
+ if (has_microcode)
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET;
+@@ -2802,6 +2921,11 @@ static ssize_t rfds_show_state(char *buf
+ return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
+ }
+
++static ssize_t its_show_state(char *buf)
++{
++ return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
++}
++
+ static char *stibp_state(void)
+ {
+ if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
+@@ -2984,6 +3108,9 @@ static ssize_t cpu_show_common(struct de
+ case X86_BUG_RFDS:
+ return rfds_show_state(buf);
+
++ case X86_BUG_ITS:
++ return its_show_state(buf);
++
+ default:
+ break;
+ }
+@@ -3063,6 +3190,11 @@ ssize_t cpu_show_reg_file_data_sampling(
+ {
+ return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
+ }
++
++ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
++}
+ #endif
+
+ void __warn_thunk(void)
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -599,6 +599,7 @@ CPU_SHOW_VULN_FALLBACK(retbleed);
+ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
+ CPU_SHOW_VULN_FALLBACK(gds);
+ CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
++CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
+
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+@@ -614,6 +615,7 @@ static DEVICE_ATTR(retbleed, 0444, cpu_s
+ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
+ static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
+ static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
++static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
+
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+@@ -630,6 +632,7 @@ static struct attribute *cpu_root_vulner
+ &dev_attr_spec_rstack_overflow.attr,
+ &dev_attr_gather_data_sampling.attr,
+ &dev_attr_reg_file_data_sampling.attr,
++ &dev_attr_indirect_target_selection.attr,
+ NULL
+ };
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -77,6 +77,8 @@ extern ssize_t cpu_show_gds(struct devic
+ struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
+ struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
++ struct device_attribute *attr, char *buf);
+
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
--- /dev/null
+From 634f4db9eb8b43f47da8ea86c3262e7963f4ae59 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Fri, 21 Jun 2024 17:40:41 -0700
+Subject: x86/its: Enumerate Indirect Target Selection (ITS) bug
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit 159013a7ca18c271ff64192deb62a689b622d860 upstream.
+
+ITS bug in some pre-Alderlake Intel CPUs may allow indirect branches in the
+first half of a cache line get predicted to a target of a branch located in
+the second half of the cache line.
+
+Set X86_BUG_ITS on affected CPUs. Mitigation to follow in later commits.
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1
+ arch/x86/include/asm/msr-index.h | 8 +++++
+ arch/x86/kernel/cpu/common.c | 58 +++++++++++++++++++++++++++++--------
+ arch/x86/kvm/x86.c | 4 +-
+ 4 files changed, 58 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -526,4 +526,5 @@
+ #define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
+ #define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */
+ #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
++#define X86_BUG_ITS X86_BUG(1*32 + 5) /* "its" CPU is affected by Indirect Target Selection */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -209,6 +209,14 @@
+ * VERW clears CPU Register
+ * File.
+ */
++#define ARCH_CAP_ITS_NO BIT_ULL(62) /*
++ * Not susceptible to
++ * Indirect Target Selection.
++ * This bit is not set by
++ * HW, but is synthesized by
++ * VMMs for guests to know
++ * their affected status.
++ */
+
+ #define MSR_IA32_FLUSH_CMD 0x0000010b
+ #define L1D_FLUSH BIT(0) /*
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1228,6 +1228,8 @@ static const __initconst struct x86_cpu_
+ #define GDS BIT(6)
+ /* CPU is affected by Register File Data Sampling */
+ #define RFDS BIT(7)
++/* CPU is affected by Indirect Target Selection */
++#define ITS BIT(8)
+
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
+@@ -1239,22 +1241,25 @@ static const struct x86_cpu_id cpu_vuln_
+ VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_G, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_X, X86_STEPPING_ANY, MMIO),
+ VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL, X86_STEPPING_ANY, SRBDS),
+- VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
++ VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x0, 0x5), MMIO | RETBLEED | GDS),
++ VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS),
+ VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
+- VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
+- VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
++ VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L, X86_STEPPINGS(0x0, 0xb), MMIO | RETBLEED | GDS | SRBDS),
++ VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS),
++ VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE, X86_STEPPINGS(0x0, 0xc), MMIO | RETBLEED | GDS | SRBDS),
++ VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS),
+ VULNBL_INTEL_STEPPINGS(INTEL_CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
+- VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
+- VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
+- VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS),
+- VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
+- VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
+- VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
+- VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE_L, X86_STEPPING_ANY, GDS),
+- VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE, X86_STEPPING_ANY, GDS),
++ VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
++ VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS | ITS),
++ VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS | ITS),
++ VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
++ VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED | ITS),
++ VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
++ VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE_L, X86_STEPPING_ANY, GDS | ITS),
++ VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE, X86_STEPPING_ANY, GDS | ITS),
+ VULNBL_INTEL_STEPPINGS(INTEL_LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
+- VULNBL_INTEL_STEPPINGS(INTEL_ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
++ VULNBL_INTEL_STEPPINGS(INTEL_ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS),
+ VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE_L, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE, X86_STEPPING_ANY, RFDS),
+@@ -1318,6 +1323,32 @@ static bool __init vulnerable_to_rfds(u6
+ return cpu_matches(cpu_vuln_blacklist, RFDS);
+ }
+
++static bool __init vulnerable_to_its(u64 x86_arch_cap_msr)
++{
++ /* The "immunity" bit trumps everything else: */
++ if (x86_arch_cap_msr & ARCH_CAP_ITS_NO)
++ return false;
++ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
++ return false;
++
++ /* None of the affected CPUs have BHI_CTRL */
++ if (boot_cpu_has(X86_FEATURE_BHI_CTRL))
++ return false;
++
++ /*
++ * If a VMM did not expose ITS_NO, assume that a guest could
++ * be running on a vulnerable hardware or may migrate to such
++ * hardware.
++ */
++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
++ return true;
++
++ if (cpu_matches(cpu_vuln_blacklist, ITS))
++ return true;
++
++ return false;
++}
++
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
+@@ -1450,6 +1481,9 @@ static void __init cpu_set_bug_bits(stru
+ if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
+ setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
+
++ if (vulnerable_to_its(x86_arch_cap_msr))
++ setup_force_cpu_bug(X86_BUG_ITS);
++
+ if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+ return;
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1623,7 +1623,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
+ ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
+ ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
+ ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
+- ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO)
++ ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO | ARCH_CAP_ITS_NO)
+
+ static u64 kvm_get_arch_capabilities(void)
+ {
+@@ -1657,6 +1657,8 @@ static u64 kvm_get_arch_capabilities(voi
+ data |= ARCH_CAP_MDS_NO;
+ if (!boot_cpu_has_bug(X86_BUG_RFDS))
+ data |= ARCH_CAP_RFDS_NO;
++ if (!boot_cpu_has_bug(X86_BUG_ITS))
++ data |= ARCH_CAP_ITS_NO;
+
+ if (!boot_cpu_has(X86_FEATURE_RTM)) {
+ /*
--- /dev/null
+From 879bfd9b7214ff433cce77cb242b35b1f09964ea Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 14 Oct 2024 10:05:48 -0700
+Subject: x86/its: Use dynamic thunks for indirect branches
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 872df34d7c51a79523820ea6a14860398c639b87 upstream.
+
+ITS mitigation moves the unsafe indirect branches to a safe thunk. This
+could degrade the prediction accuracy as the source address of indirect
+branches becomes same for different execution paths.
+
+To improve the predictions, and hence the performance, assign a separate
+thunk for each indirect callsite. This is also a defense-in-depth measure
+to avoid indirect branches aliasing with each other.
+
+As an example, 5000 dynamic thunks would utilize around 16 bits of the
+address space, thereby gaining entropy. For a BTB that uses
+32 bits for indexing, dynamic thunks could provide better prediction
+accuracy over fixed thunks.
+
+Have ITS thunks be variable sized and use EXECMEM_MODULE_TEXT such that
+they are both more flexible (got to extend them later) and live in 2M TLBs,
+just like kernel code, avoiding undue TLB pressure.
+
+ [ pawan: CONFIG_EXECMEM_ROX is not supported on backport kernel, made
+ adjustments to set memory to RW and ROX ]
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/Kconfig | 1
+ arch/x86/include/asm/alternative.h | 10 ++
+ arch/x86/kernel/alternative.c | 129 ++++++++++++++++++++++++++++++++++++-
+ arch/x86/kernel/module.c | 6 +
+ include/linux/execmem.h | 3
+ include/linux/module.h | 5 +
+ 6 files changed, 151 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2751,6 +2751,7 @@ config MITIGATION_ITS
+ bool "Enable Indirect Target Selection mitigation"
+ depends on CPU_SUP_INTEL && X86_64
+ depends on MITIGATION_RETPOLINE && MITIGATION_RETHUNK
++ select EXECMEM
+ default y
+ help
+ Enable Indirect Target Selection (ITS) mitigation. ITS is a bug in
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -134,6 +134,16 @@ static __always_inline int x86_call_dept
+ }
+ #endif
+
++#ifdef CONFIG_MITIGATION_ITS
++extern void its_init_mod(struct module *mod);
++extern void its_fini_mod(struct module *mod);
++extern void its_free_mod(struct module *mod);
++#else /* CONFIG_MITIGATION_ITS */
++static inline void its_init_mod(struct module *mod) { }
++static inline void its_fini_mod(struct module *mod) { }
++static inline void its_free_mod(struct module *mod) { }
++#endif
++
+ #if defined(CONFIG_MITIGATION_RETHUNK) && defined(CONFIG_OBJTOOL)
+ extern bool cpu_wants_rethunk(void);
+ extern bool cpu_wants_rethunk_at(void *addr);
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -18,6 +18,7 @@
+ #include <linux/mmu_context.h>
+ #include <linux/bsearch.h>
+ #include <linux/sync_core.h>
++#include <linux/execmem.h>
+ #include <asm/text-patching.h>
+ #include <asm/alternative.h>
+ #include <asm/sections.h>
+@@ -32,6 +33,7 @@
+ #include <asm/asm-prototypes.h>
+ #include <asm/cfi.h>
+ #include <asm/ibt.h>
++#include <asm/set_memory.h>
+
+ int __read_mostly alternatives_patched;
+
+@@ -125,6 +127,123 @@ const unsigned char * const x86_nops[ASM
+ #endif
+ };
+
++#ifdef CONFIG_MITIGATION_ITS
++
++static struct module *its_mod;
++static void *its_page;
++static unsigned int its_offset;
++
++/* Initialize a thunk with the "jmp *reg; int3" instructions. */
++static void *its_init_thunk(void *thunk, int reg)
++{
++ u8 *bytes = thunk;
++ int i = 0;
++
++ if (reg >= 8) {
++ bytes[i++] = 0x41; /* REX.B prefix */
++ reg -= 8;
++ }
++ bytes[i++] = 0xff;
++ bytes[i++] = 0xe0 + reg; /* jmp *reg */
++ bytes[i++] = 0xcc;
++
++ return thunk;
++}
++
++void its_init_mod(struct module *mod)
++{
++ if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
++ return;
++
++ mutex_lock(&text_mutex);
++ its_mod = mod;
++ its_page = NULL;
++}
++
++void its_fini_mod(struct module *mod)
++{
++ if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
++ return;
++
++ WARN_ON_ONCE(its_mod != mod);
++
++ its_mod = NULL;
++ its_page = NULL;
++ mutex_unlock(&text_mutex);
++
++ for (int i = 0; i < mod->its_num_pages; i++) {
++ void *page = mod->its_page_array[i];
++ set_memory_rox((unsigned long)page, 1);
++ }
++}
++
++void its_free_mod(struct module *mod)
++{
++ if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
++ return;
++
++ for (int i = 0; i < mod->its_num_pages; i++) {
++ void *page = mod->its_page_array[i];
++ execmem_free(page);
++ }
++ kfree(mod->its_page_array);
++}
++
++static void *its_alloc(void)
++{
++ void *page __free(execmem) = execmem_alloc(EXECMEM_MODULE_TEXT, PAGE_SIZE);
++
++ if (!page)
++ return NULL;
++
++ if (its_mod) {
++ void *tmp = krealloc(its_mod->its_page_array,
++ (its_mod->its_num_pages+1) * sizeof(void *),
++ GFP_KERNEL);
++ if (!tmp)
++ return NULL;
++
++ its_mod->its_page_array = tmp;
++ its_mod->its_page_array[its_mod->its_num_pages++] = page;
++ }
++
++ return no_free_ptr(page);
++}
++
++static void *its_allocate_thunk(int reg)
++{
++ int size = 3 + (reg / 8);
++ void *thunk;
++
++ if (!its_page || (its_offset + size - 1) >= PAGE_SIZE) {
++ its_page = its_alloc();
++ if (!its_page) {
++ pr_err("ITS page allocation failed\n");
++ return NULL;
++ }
++ memset(its_page, INT3_INSN_OPCODE, PAGE_SIZE);
++ its_offset = 32;
++ }
++
++ /*
++ * If the indirect branch instruction will be in the lower half
++ * of a cacheline, then update the offset to reach the upper half.
++ */
++ if ((its_offset + size - 1) % 64 < 32)
++ its_offset = ((its_offset - 1) | 0x3F) + 33;
++
++ thunk = its_page + its_offset;
++ its_offset += size;
++
++ set_memory_rw((unsigned long)its_page, 1);
++ thunk = its_init_thunk(thunk, reg);
++ set_memory_rox((unsigned long)its_page, 1);
++
++ return thunk;
++}
++
++#endif
++
+ /*
+ * Nomenclature for variable names to simplify and clarify this code and ease
+ * any potential staring at it:
+@@ -637,9 +756,13 @@ static int emit_call_track_retpoline(voi
+ #ifdef CONFIG_MITIGATION_ITS
+ static int emit_its_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes)
+ {
+- return __emit_trampoline(addr, insn, bytes,
+- __x86_indirect_its_thunk_array[reg],
+- __x86_indirect_its_thunk_array[reg]);
++ u8 *thunk = __x86_indirect_its_thunk_array[reg];
++ u8 *tmp = its_allocate_thunk(reg);
++
++ if (tmp)
++ thunk = tmp;
++
++ return __emit_trampoline(addr, insn, bytes, thunk, thunk);
+ }
+
+ /* Check if an indirect branch is at ITS-unsafe address */
+--- a/arch/x86/kernel/module.c
++++ b/arch/x86/kernel/module.c
+@@ -251,6 +251,8 @@ int module_finalize(const Elf_Ehdr *hdr,
+ ibt_endbr = s;
+ }
+
++ its_init_mod(me);
++
+ if (retpolines || cfi) {
+ void *rseg = NULL, *cseg = NULL;
+ unsigned int rsize = 0, csize = 0;
+@@ -271,6 +273,9 @@ int module_finalize(const Elf_Ehdr *hdr,
+ void *rseg = (void *)retpolines->sh_addr;
+ apply_retpolines(rseg, rseg + retpolines->sh_size);
+ }
++
++ its_fini_mod(me);
++
+ if (returns) {
+ void *rseg = (void *)returns->sh_addr;
+ apply_returns(rseg, rseg + returns->sh_size);
+@@ -318,4 +323,5 @@ int module_finalize(const Elf_Ehdr *hdr,
+ void module_arch_cleanup(struct module *mod)
+ {
+ alternatives_smp_module_del(mod);
++ its_free_mod(mod);
+ }
+--- a/include/linux/execmem.h
++++ b/include/linux/execmem.h
+@@ -4,6 +4,7 @@
+
+ #include <linux/types.h>
+ #include <linux/moduleloader.h>
++#include <linux/cleanup.h>
+
+ #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ !defined(CONFIG_KASAN_VMALLOC)
+@@ -123,6 +124,8 @@ void *execmem_alloc(enum execmem_type ty
+ */
+ void execmem_free(void *ptr);
+
++DEFINE_FREE(execmem, void *, if (_T) execmem_free(_T));
++
+ #if defined(CONFIG_EXECMEM) && !defined(CONFIG_ARCH_WANTS_EXECMEM_LATE)
+ void execmem_init(void);
+ #else
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -582,6 +582,11 @@ struct module {
+ atomic_t refcnt;
+ #endif
+
++#ifdef CONFIG_MITIGATION_ITS
++ int its_num_pages;
++ void **its_page_array;
++#endif
++
+ #ifdef CONFIG_CONSTRUCTORS
+ /* Constructor functions. */
+ ctor_fn_t *ctors;
--- /dev/null
+From 8aebbdd956f69e6cc2a46349d81bf8967d885dc1 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Fri, 28 Feb 2025 18:35:58 -0800
+Subject: x86/speculation: Add a conditional CS prefix to CALL_NOSPEC
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit 052040e34c08428a5a388b85787e8531970c0c67 upstream.
+
+Retpoline mitigation for spectre-v2 uses thunks for indirect branches. To
+support this mitigation compilers add a CS prefix with
+-mindirect-branch-cs-prefix. For an indirect branch in asm, this needs to
+be added manually.
+
+CS prefix is already being added to indirect branches in asm files, but not
+in inline asm. Add CS prefix to CALL_NOSPEC for inline asm as well. There
+is no JMP_NOSPEC for inline asm.
+
+Reported-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Andrew Cooper <andrew.cooper3@citrix.com
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20250228-call-nospec-v3-2-96599fed0f33@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -210,9 +210,8 @@
+ .endm
+
+ /*
+- * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
+- * to the retpoline thunk with a CS prefix when the register requires
+- * a RAX prefix byte to encode. Also see apply_retpolines().
++ * Emits a conditional CS prefix that is compatible with
++ * -mindirect-branch-cs-prefix.
+ */
+ .macro __CS_PREFIX reg:req
+ .irp rs,r8,r9,r10,r11,r12,r13,r14,r15
+@@ -439,11 +438,23 @@ static inline void call_depth_return_thu
+ #ifdef CONFIG_X86_64
+
+ /*
++ * Emits a conditional CS prefix that is compatible with
++ * -mindirect-branch-cs-prefix.
++ */
++#define __CS_PREFIX(reg) \
++ ".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \
++ ".ifc \\rs," reg "\n" \
++ ".byte 0x2e\n" \
++ ".endif\n" \
++ ".endr\n"
++
++/*
+ * Inline asm uses the %V modifier which is only in newer GCC
+ * which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
+ */
+ #ifdef CONFIG_MITIGATION_RETPOLINE
+-#define CALL_NOSPEC "call __x86_indirect_thunk_%V[thunk_target]\n"
++#define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \
++ "call __x86_indirect_thunk_%V[thunk_target]\n"
+ #else
+ #define CALL_NOSPEC "call *%[thunk_target]\n"
+ #endif
--- /dev/null
+From c5592dd8edcf28fed105f8722a569ea63ec94688 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Thu, 20 Mar 2025 11:13:15 -0700
+Subject: x86/speculation: Remove the extra #ifdef around CALL_NOSPEC
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit c8c81458863ab686cda4fe1e603fccaae0f12460 upstream.
+
+Commit:
+
+ 010c4a461c1d ("x86/speculation: Simplify and make CALL_NOSPEC consistent")
+
+added an #ifdef CONFIG_MITIGATION_RETPOLINE around the CALL_NOSPEC definition.
+This is not required as this code is already under a larger #ifdef.
+
+Remove the extra #ifdef, no functional change.
+
+vmlinux size remains same before and after this change:
+
+ CONFIG_MITIGATION_RETPOLINE=y:
+ text data bss dec hex filename
+ 25434752 7342290 2301212 35078254 217406e vmlinux.before
+ 25434752 7342290 2301212 35078254 217406e vmlinux.after
+
+ # CONFIG_MITIGATION_RETPOLINE is not set:
+ text data bss dec hex filename
+ 22943094 6214994 1550152 30708240 1d49210 vmlinux.before
+ 22943094 6214994 1550152 30708240 1d49210 vmlinux.after
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Link: https://lore.kernel.org/r/20250320-call-nospec-extra-ifdef-v1-1-d9b084d24820@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -452,12 +452,8 @@ static inline void call_depth_return_thu
+ * Inline asm uses the %V modifier which is only in newer GCC
+ * which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
+ */
+-#ifdef CONFIG_MITIGATION_RETPOLINE
+ #define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \
+ "call __x86_indirect_thunk_%V[thunk_target]\n"
+-#else
+-#define CALL_NOSPEC "call *%[thunk_target]\n"
+-#endif
+
+ # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
+
--- /dev/null
+From 145110ea2a53c52a2ceb675f7590fdf451925a4b Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Fri, 28 Feb 2025 18:35:43 -0800
+Subject: x86/speculation: Simplify and make CALL_NOSPEC consistent
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit cfceff8526a426948b53445c02bcb98453c7330d upstream.
+
+CALL_NOSPEC macro is used to generate Spectre-v2 mitigation friendly
+indirect branches. At compile time the macro defaults to indirect branch,
+and at runtime those can be patched to thunk based mitigations.
+
+This approach is opposite of what is done for the rest of the kernel, where
+the compile time default is to replace indirect calls with retpoline thunk
+calls.
+
+Make CALL_NOSPEC consistent with the rest of the kernel, default to
+retpoline thunk at compile time when CONFIG_MITIGATION_RETPOLINE is
+enabled.
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Andrew Cooper <andrew.cooper3@citrix.com
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20250228-call-nospec-v3-1-96599fed0f33@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -442,16 +442,11 @@ static inline void call_depth_return_thu
+ * Inline asm uses the %V modifier which is only in newer GCC
+ * which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
+ */
+-# define CALL_NOSPEC \
+- ALTERNATIVE_2( \
+- ANNOTATE_RETPOLINE_SAFE \
+- "call *%[thunk_target]\n", \
+- "call __x86_indirect_thunk_%V[thunk_target]\n", \
+- X86_FEATURE_RETPOLINE, \
+- "lfence;\n" \
+- ANNOTATE_RETPOLINE_SAFE \
+- "call *%[thunk_target]\n", \
+- X86_FEATURE_RETPOLINE_LFENCE)
++#ifdef CONFIG_MITIGATION_RETPOLINE
++#define CALL_NOSPEC "call __x86_indirect_thunk_%V[thunk_target]\n"
++#else
++#define CALL_NOSPEC "call *%[thunk_target]\n"
++#endif
+
+ # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
+