--- /dev/null
+From 980d7eb01638f535e9ab885449c0bbb4cec0fde6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:25 +0200
+Subject: ARM: cpu: Switch to arch_cpu_finalize_init()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit ee31bb0524a2e7c99b03f50249a411cc1eaa411f upstream
+
+check_bugs() is about to be phased out. Switch over to the new
+arch_cpu_finalize_init() implementation.
+
+No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20230613224545.078124882@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/Kconfig | 1 +
+ arch/arm/include/asm/bugs.h | 4 ----
+ arch/arm/kernel/bugs.c | 3 ++-
+ 3 files changed, 3 insertions(+), 5 deletions(-)
+
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -5,6 +5,7 @@ config ARM
+ select ARCH_32BIT_OFF_T
+ select ARCH_CLOCKSOURCE_DATA
+ select ARCH_HAS_BINFMT_FLAT
++ select ARCH_HAS_CPU_FINALIZE_INIT if MMU
+ select ARCH_HAS_DEBUG_VIRTUAL if MMU
+ select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
+--- a/arch/arm/include/asm/bugs.h
++++ b/arch/arm/include/asm/bugs.h
+@@ -1,7 +1,5 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
+- * arch/arm/include/asm/bugs.h
+- *
+ * Copyright (C) 1995-2003 Russell King
+ */
+ #ifndef __ASM_BUGS_H
+@@ -10,10 +8,8 @@
+ extern void check_writebuffer_bugs(void);
+
+ #ifdef CONFIG_MMU
+-extern void check_bugs(void);
+ extern void check_other_bugs(void);
+ #else
+-#define check_bugs() do { } while (0)
+ #define check_other_bugs() do { } while (0)
+ #endif
+
+--- a/arch/arm/kernel/bugs.c
++++ b/arch/arm/kernel/bugs.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/init.h>
++#include <linux/cpu.h>
+ #include <asm/bugs.h>
+ #include <asm/proc-fns.h>
+
+@@ -11,7 +12,7 @@ void check_other_bugs(void)
+ #endif
+ }
+
+-void __init check_bugs(void)
++void __init arch_cpu_finalize_init(void)
+ {
+ check_writebuffer_bugs();
+ check_other_bugs();
--- /dev/null
+From 1b0fc0345f2852ffe54fb9ae0e12e2ee69ad6a20 Mon Sep 17 00:00:00 2001
+From: Dave Hansen <dave.hansen@linux.intel.com>
+Date: Tue, 1 Aug 2023 07:31:07 -0700
+Subject: Documentation/x86: Fix backwards on/off logic about YMM support
+
+From: Dave Hansen <dave.hansen@linux.intel.com>
+
+commit 1b0fc0345f2852ffe54fb9ae0e12e2ee69ad6a20 upstream
+
+These options clearly turn *off* XSAVE YMM support. Correct the
+typo.
+
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Fixes: 553a5c03e90a ("x86/speculation: Add force option to GDS mitigation")
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/gather_data_sampling.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst
++++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst
+@@ -63,7 +63,7 @@ GDS can also be mitigated on systems tha
+ disabling AVX. This can be done by setting gather_data_sampling="force" or
+ "clearcpuid=avx" on the kernel command-line.
+
+-If used, these options will disable AVX use by turning on XSAVE YMM support.
++If used, these options will disable AVX use by turning off XSAVE YMM support.
+ However, the processor will still enumerate AVX support. Userspace that
+ does not follow proper AVX enumeration to check both AVX *and* XSAVE YMM
+ support will break.
--- /dev/null
+From dff0dc6635f86b571b1bb61f3f3525b3763c3566 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:27 +0200
+Subject: ia64/cpu: Switch to arch_cpu_finalize_init()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 6c38e3005621800263f117fb00d6787a76e16de7 upstream
+
+check_bugs() is about to be phased out. Switch over to the new
+arch_cpu_finalize_init() implementation.
+
+No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20230613224545.137045745@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/ia64/Kconfig | 1 +
+ arch/ia64/include/asm/bugs.h | 20 --------------------
+ arch/ia64/kernel/setup.c | 3 +--
+ 3 files changed, 2 insertions(+), 22 deletions(-)
+ delete mode 100644 arch/ia64/include/asm/bugs.h
+
+--- a/arch/ia64/Kconfig
++++ b/arch/ia64/Kconfig
+@@ -8,6 +8,7 @@ menu "Processor type and features"
+
+ config IA64
+ bool
++ select ARCH_HAS_CPU_FINALIZE_INIT
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select ACPI
+--- a/arch/ia64/include/asm/bugs.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * This is included by init/main.c to check for architecture-dependent bugs.
+- *
+- * Needs:
+- * void check_bugs(void);
+- *
+- * Based on <asm-alpha/bugs.h>.
+- *
+- * Modified 1998, 1999, 2003
+- * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
+- */
+-#ifndef _ASM_IA64_BUGS_H
+-#define _ASM_IA64_BUGS_H
+-
+-#include <asm/processor.h>
+-
+-extern void check_bugs (void);
+-
+-#endif /* _ASM_IA64_BUGS_H */
+--- a/arch/ia64/kernel/setup.c
++++ b/arch/ia64/kernel/setup.c
+@@ -1073,8 +1073,7 @@ cpu_init (void)
+ }
+ }
+
+-void __init
+-check_bugs (void)
++void __init arch_cpu_finalize_init(void)
+ {
+ ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
+ (unsigned long) __end___mckinley_e9_bundles);
--- /dev/null
+From 4f8644b469a237107a34deb77ca301377c7def7c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:39 +0200
+Subject: init: Invoke arch_cpu_finalize_init() earlier
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 9df9d2f0471b4c4702670380b8d8a45b40b23a7d upstream
+
+X86 is reworking the boot process so that initializations which are not
+required during early boot can be moved into the late boot process and out
+of the fragile and restricted initial boot phase.
+
+arch_cpu_finalize_init() is the obvious place to do such initializations,
+but arch_cpu_finalize_init() is invoked too late in start_kernel() e.g. for
+initializing the FPU completely. fork_init() requires that the FPU is
+initialized as the size of task_struct on X86 depends on the size of the
+required FPU register buffer.
+
+Fortunately none of the init calls between calibrate_delay() and
+arch_cpu_finalize_init() is relevant for the functionality of
+arch_cpu_finalize_init().
+
+Invoke it right after calibrate_delay() where everything which is relevant
+for arch_cpu_finalize_init() has been set up already.
+
+No functional change intended.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Link: https://lore.kernel.org/r/20230613224545.612182854@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ init/main.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -744,6 +744,9 @@ asmlinkage __visible void __init start_k
+ late_time_init();
+ sched_clock_init();
+ calibrate_delay();
++
++ arch_cpu_finalize_init();
++
+ pid_idr_init();
+ anon_vma_init();
+ #ifdef CONFIG_X86
+@@ -772,8 +775,6 @@ asmlinkage __visible void __init start_k
+
+ poking_init();
+
+- arch_cpu_finalize_init();
+-
+ acpi_subsystem_init();
+ arch_post_acpi_subsys_init();
+ sfi_init_late();
--- /dev/null
+From 11bc27b01a313cc489c807ceddb0a93a6770fd11 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:22 +0200
+Subject: init: Provide arch_cpu_finalize_init()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 7725acaa4f0c04fbefb0e0d342635b967bb7d414 upstream
+
+check_bugs() has become a dumping ground for all sorts of activities to
+finalize the CPU initialization before running the rest of the init code.
+
+Most are empty, a few do actual bug checks, some do alternative patching
+and some cobble a CPU advertisement string together....
+
+Aside of that the current implementation requires duplicated function
+declaration and mostly empty header files for them.
+
+Provide a new function arch_cpu_finalize_init(). Provide a generic
+declaration if CONFIG_ARCH_HAS_CPU_FINALIZE_INIT is selected and a stub
+inline otherwise.
+
+This requires a temporary #ifdef in start_kernel() which will be removed
+along with check_bugs() once the architectures are converted over.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20230613224544.957805717@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/Kconfig | 3 +++
+ include/linux/cpu.h | 6 ++++++
+ init/main.c | 5 +++++
+ 3 files changed, 14 insertions(+)
+
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -271,6 +271,9 @@ config ARCH_HAS_UNCACHED_SEGMENT
+ select ARCH_HAS_DMA_PREP_COHERENT
+ bool
+
++config ARCH_HAS_CPU_FINALIZE_INIT
++ bool
++
+ # Select if arch init_task must go in the __init_task_data section
+ config ARCH_TASK_STRUCT_ON_STACK
+ bool
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -193,6 +193,12 @@ void arch_cpu_idle_enter(void);
+ void arch_cpu_idle_exit(void);
+ void arch_cpu_idle_dead(void);
+
++#ifdef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT
++void arch_cpu_finalize_init(void);
++#else
++static inline void arch_cpu_finalize_init(void) { }
++#endif
++
+ int cpu_report_state(int cpu);
+ int cpu_check_up_prepare(int cpu);
+ void cpu_set_state_online(int cpu);
+--- a/init/main.c
++++ b/init/main.c
+@@ -772,7 +772,12 @@ asmlinkage __visible void __init start_k
+ delayacct_init();
+
+ poking_init();
++
++ arch_cpu_finalize_init();
++ /* Temporary conditional until everything has been converted */
++#ifndef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT
+ check_bugs();
++#endif
+
+ acpi_subsystem_init();
+ arch_post_acpi_subsys_init();
--- /dev/null
+From eb6d42cbbc5b7384bebb9b783970c5b07ac10fc5 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:38 +0200
+Subject: init: Remove check_bugs() leftovers
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 61235b24b9cb37c13fcad5b9596d59a1afdcec30 upstream
+
+Everything is converted over to arch_cpu_finalize_init(). Remove the
+check_bugs() leftovers including the empty stubs in asm-generic, alpha,
+parisc, powerpc and xtensa.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Link: https://lore.kernel.org/r/20230613224545.553215951@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/alpha/include/asm/bugs.h | 20 --------------------
+ arch/parisc/include/asm/bugs.h | 20 --------------------
+ arch/powerpc/include/asm/bugs.h | 15 ---------------
+ arch/xtensa/include/asm/bugs.h | 18 ------------------
+ include/asm-generic/bugs.h | 11 -----------
+ init/main.c | 5 -----
+ 6 files changed, 89 deletions(-)
+ delete mode 100644 arch/alpha/include/asm/bugs.h
+ delete mode 100644 arch/parisc/include/asm/bugs.h
+ delete mode 100644 arch/powerpc/include/asm/bugs.h
+ delete mode 100644 arch/xtensa/include/asm/bugs.h
+ delete mode 100644 include/asm-generic/bugs.h
+
+--- a/arch/alpha/include/asm/bugs.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/*
+- * include/asm-alpha/bugs.h
+- *
+- * Copyright (C) 1994 Linus Torvalds
+- */
+-
+-/*
+- * This is included by init/main.c to check for architecture-dependent bugs.
+- *
+- * Needs:
+- * void check_bugs(void);
+- */
+-
+-/*
+- * I don't know of any alpha bugs yet.. Nice chip
+- */
+-
+-static void check_bugs(void)
+-{
+-}
+--- a/arch/parisc/include/asm/bugs.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * include/asm-parisc/bugs.h
+- *
+- * Copyright (C) 1999 Mike Shaver
+- */
+-
+-/*
+- * This is included by init/main.c to check for architecture-dependent bugs.
+- *
+- * Needs:
+- * void check_bugs(void);
+- */
+-
+-#include <asm/processor.h>
+-
+-static inline void check_bugs(void)
+-{
+-// identify_cpu(&boot_cpu_data);
+-}
+--- a/arch/powerpc/include/asm/bugs.h
++++ /dev/null
+@@ -1,15 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-#ifndef _ASM_POWERPC_BUGS_H
+-#define _ASM_POWERPC_BUGS_H
+-
+-/*
+- */
+-
+-/*
+- * This file is included by 'init/main.c' to check for
+- * architecture-dependent bugs.
+- */
+-
+-static inline void check_bugs(void) { }
+-
+-#endif /* _ASM_POWERPC_BUGS_H */
+--- a/arch/xtensa/include/asm/bugs.h
++++ /dev/null
+@@ -1,18 +0,0 @@
+-/*
+- * include/asm-xtensa/bugs.h
+- *
+- * This is included by init/main.c to check for architecture-dependent bugs.
+- *
+- * Xtensa processors don't have any bugs. :)
+- *
+- * This file is subject to the terms and conditions of the GNU General
+- * Public License. See the file "COPYING" in the main directory of
+- * this archive for more details.
+- */
+-
+-#ifndef _XTENSA_BUGS_H
+-#define _XTENSA_BUGS_H
+-
+-static void check_bugs(void) { }
+-
+-#endif /* _XTENSA_BUGS_H */
+--- a/include/asm-generic/bugs.h
++++ /dev/null
+@@ -1,11 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef __ASM_GENERIC_BUGS_H
+-#define __ASM_GENERIC_BUGS_H
+-/*
+- * This file is included by 'init/main.c' to check for
+- * architecture-dependent bugs.
+- */
+-
+-static inline void check_bugs(void) { }
+-
+-#endif /* __ASM_GENERIC_BUGS_H */
+--- a/init/main.c
++++ b/init/main.c
+@@ -96,7 +96,6 @@
+ #include <linux/mem_encrypt.h>
+
+ #include <asm/io.h>
+-#include <asm/bugs.h>
+ #include <asm/setup.h>
+ #include <asm/sections.h>
+ #include <asm/cacheflush.h>
+@@ -774,10 +773,6 @@ asmlinkage __visible void __init start_k
+ poking_init();
+
+ arch_cpu_finalize_init();
+- /* Temporary conditional until everything has been converted */
+-#ifndef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT
+- check_bugs();
+-#endif
+
+ acpi_subsystem_init();
+ arch_post_acpi_subsys_init();
--- /dev/null
+From 555b9962472818fba44eb42f31cfd1e118d20478 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:41 +0200
+Subject: init, x86: Move mem_encrypt_init() into arch_cpu_finalize_init()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 439e17576eb47f26b78c5bbc72e344d4206d2327 upstream
+
+Invoke the X86ism mem_encrypt_init() from X86 arch_cpu_finalize_init() and
+remove the weak fallback from the core code.
+
+No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20230613224545.670360645@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/mem_encrypt.h | 2 ++
+ arch/x86/kernel/cpu/common.c | 11 +++++++++++
+ init/main.c | 11 -----------
+ 3 files changed, 13 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/include/asm/mem_encrypt.h
++++ b/arch/x86/include/asm/mem_encrypt.h
+@@ -77,6 +77,8 @@ early_set_memory_decrypted(unsigned long
+ static inline int __init
+ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
+
++static inline void mem_encrypt_init(void) { }
++
+ #define __bss_decrypted
+
+ #endif /* CONFIG_AMD_MEM_ENCRYPT */
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -17,6 +17,7 @@
+ #include <linux/init.h>
+ #include <linux/kprobes.h>
+ #include <linux/kgdb.h>
++#include <linux/mem_encrypt.h>
+ #include <linux/smp.h>
+ #include <linux/cpu.h>
+ #include <linux/io.h>
+@@ -2209,4 +2210,14 @@ void __init arch_cpu_finalize_init(void)
+ } else {
+ fpu__init_check_bugs();
+ }
++
++ /*
++ * This needs to be called before any devices perform DMA
++ * operations that might use the SWIOTLB bounce buffers. It will
++ * mark the bounce buffers as decrypted so that their usage will
++ * not cause "plain-text" data to be decrypted when accessed. It
++ * must be called after late_time_init() so that Hyper-V x86/x64
++ * hypercalls work when the SWIOTLB bounce buffers are decrypted.
++ */
++ mem_encrypt_init();
+ }
+--- a/init/main.c
++++ b/init/main.c
+@@ -93,7 +93,6 @@
+ #include <linux/cache.h>
+ #include <linux/rodata_test.h>
+ #include <linux/jump_label.h>
+-#include <linux/mem_encrypt.h>
+
+ #include <asm/io.h>
+ #include <asm/setup.h>
+@@ -503,8 +502,6 @@ void __init __weak thread_stack_cache_in
+ }
+ #endif
+
+-void __init __weak mem_encrypt_init(void) { }
+-
+ void __init __weak poking_init(void) { }
+
+ void __init __weak pgtable_cache_init(void) { }
+@@ -720,14 +717,6 @@ asmlinkage __visible void __init start_k
+ */
+ locking_selftest();
+
+- /*
+- * This needs to be called before any devices perform DMA
+- * operations that might use the SWIOTLB bounce buffers. It will
+- * mark the bounce buffers as decrypted so that their usage will
+- * not cause "plain-text" data to be decrypted when accessed.
+- */
+- mem_encrypt_init();
+-
+ #ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start && !initrd_below_start_ok &&
+ page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
--- /dev/null
+From e9a103c76a5ffb605204f25222e6217931ff129b Mon Sep 17 00:00:00 2001
+From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Date: Wed, 12 Jul 2023 19:43:14 -0700
+Subject: KVM: Add GDS_NO support to KVM
+
+From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+
+commit 81ac7e5d741742d650b4ed6186c4826c1a0631a7 upstream
+
+Gather Data Sampling (GDS) is a transient execution attack using
+gather instructions from the AVX2 and AVX512 extensions. This attack
+allows malicious code to infer data that was previously stored in
+vector registers. Systems that are not vulnerable to GDS will set the
+GDS_NO bit of the IA32_ARCH_CAPABILITIES MSR. This is useful for VM
+guests that may think they are on vulnerable systems that are, in
+fact, not affected. Guests that are running on affected hosts where
+the mitigation is enabled are protected as if they were running
+on an unaffected system.
+
+On all hosts that are not affected or that are mitigated, set the
+GDS_NO bit.
+
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 7 +++++++
+ arch/x86/kvm/x86.c | 5 +++++
+ 2 files changed, 12 insertions(+)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -628,6 +628,13 @@ static const char * const gds_strings[]
+ [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
+ };
+
++bool gds_ucode_mitigated(void)
++{
++ return (gds_mitigation == GDS_MITIGATION_FULL ||
++ gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
++}
++EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
++
+ void update_gds_msr(void)
+ {
+ u64 mcu_ctrl_after;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -226,6 +226,8 @@ struct kvm_stats_debugfs_item debugfs_en
+
+ u64 __read_mostly host_xcr0;
+
++extern bool gds_ucode_mitigated(void);
++
+ struct kmem_cache *x86_fpu_cache;
+ EXPORT_SYMBOL_GPL(x86_fpu_cache);
+
+@@ -1409,6 +1411,9 @@ static u64 kvm_get_arch_capabilities(voi
+ /* Guests don't need to know "Fill buffer clear control" exists */
+ data &= ~ARCH_CAP_FB_CLEAR_CTRL;
+
++ if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated())
++ data |= ARCH_CAP_GDS_NO;
++
+ return data;
+ }
+
--- /dev/null
+From ca442015ccef31abd0a73cd621c4a4da3a76d20b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:30 +0200
+Subject: m68k/cpu: Switch to arch_cpu_finalize_init()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 9ceecc2589b9d7cef6b321339ed8de484eac4b20 upstream
+
+check_bugs() is about to be phased out. Switch over to the new
+arch_cpu_finalize_init() implementation.
+
+No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Link: https://lore.kernel.org/r/20230613224545.254342916@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/m68k/Kconfig | 1 +
+ arch/m68k/include/asm/bugs.h | 21 ---------------------
+ arch/m68k/kernel/setup_mm.c | 3 ++-
+ 3 files changed, 3 insertions(+), 22 deletions(-)
+ delete mode 100644 arch/m68k/include/asm/bugs.h
+
+--- a/arch/m68k/Kconfig
++++ b/arch/m68k/Kconfig
+@@ -4,6 +4,7 @@ config M68K
+ default y
+ select ARCH_32BIT_OFF_T
+ select ARCH_HAS_BINFMT_FLAT
++ select ARCH_HAS_CPU_FINALIZE_INIT if MMU
+ select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
+ select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
+--- a/arch/m68k/include/asm/bugs.h
++++ /dev/null
+@@ -1,21 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * include/asm-m68k/bugs.h
+- *
+- * Copyright (C) 1994 Linus Torvalds
+- */
+-
+-/*
+- * This is included by init/main.c to check for architecture-dependent bugs.
+- *
+- * Needs:
+- * void check_bugs(void);
+- */
+-
+-#ifdef CONFIG_MMU
+-extern void check_bugs(void); /* in arch/m68k/kernel/setup.c */
+-#else
+-static void check_bugs(void)
+-{
+-}
+-#endif
+--- a/arch/m68k/kernel/setup_mm.c
++++ b/arch/m68k/kernel/setup_mm.c
+@@ -10,6 +10,7 @@
+ */
+
+ #include <linux/kernel.h>
++#include <linux/cpu.h>
+ #include <linux/mm.h>
+ #include <linux/sched.h>
+ #include <linux/delay.h>
+@@ -527,7 +528,7 @@ static int __init proc_hardware_init(voi
+ module_init(proc_hardware_init);
+ #endif
+
+-void check_bugs(void)
++void __init arch_cpu_finalize_init(void)
+ {
+ #if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU)
+ if (m68k_fputype == 0) {
--- /dev/null
+From 2bedb079d39e87a51a6af0a9606dbd147a9bbfde Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:32 +0200
+Subject: mips/cpu: Switch to arch_cpu_finalize_init()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 7f066a22fe353a827a402ee2835e81f045b1574d upstream
+
+check_bugs() is about to be phased out. Switch over to the new
+arch_cpu_finalize_init() implementation.
+
+No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20230613224545.312438573@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/Kconfig | 1 +
+ arch/mips/include/asm/bugs.h | 17 -----------------
+ arch/mips/kernel/setup.c | 13 +++++++++++++
+ 3 files changed, 14 insertions(+), 17 deletions(-)
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -5,6 +5,7 @@ config MIPS
+ select ARCH_32BIT_OFF_T if !64BIT
+ select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
+ select ARCH_CLOCKSOURCE_DATA
++ select ARCH_HAS_CPU_FINALIZE_INIT
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_SUPPORTS_UPROBES
+--- a/arch/mips/include/asm/bugs.h
++++ b/arch/mips/include/asm/bugs.h
+@@ -1,17 +1,11 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ /*
+- * This is included by init/main.c to check for architecture-dependent bugs.
+- *
+ * Copyright (C) 2007 Maciej W. Rozycki
+- *
+- * Needs:
+- * void check_bugs(void);
+ */
+ #ifndef _ASM_BUGS_H
+ #define _ASM_BUGS_H
+
+ #include <linux/bug.h>
+-#include <linux/delay.h>
+ #include <linux/smp.h>
+
+ #include <asm/cpu.h>
+@@ -31,17 +25,6 @@ static inline void check_bugs_early(void
+ #endif
+ }
+
+-static inline void check_bugs(void)
+-{
+- unsigned int cpu = smp_processor_id();
+-
+- cpu_data[cpu].udelay_val = loops_per_jiffy;
+- check_bugs32();
+-#ifdef CONFIG_64BIT
+- check_bugs64();
+-#endif
+-}
+-
+ static inline int r4k_daddiu_bug(void)
+ {
+ #ifdef CONFIG_64BIT
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -11,6 +11,8 @@
+ * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
+ */
+ #include <linux/init.h>
++#include <linux/cpu.h>
++#include <linux/delay.h>
+ #include <linux/ioport.h>
+ #include <linux/export.h>
+ #include <linux/screen_info.h>
+@@ -812,3 +814,14 @@ static int __init setnocoherentio(char *
+ }
+ early_param("nocoherentio", setnocoherentio);
+ #endif
++
++void __init arch_cpu_finalize_init(void)
++{
++ unsigned int cpu = smp_processor_id();
++
++ cpu_data[cpu].udelay_val = loops_per_jiffy;
++ check_bugs32();
++
++ if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
++ check_bugs64();
++}
--- /dev/null
+From af80602799681c78f14fbe20b6185a56020dedee Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 25 Oct 2022 21:38:18 +0200
+Subject: mm: Move mm_cachep initialization to mm_init()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit af80602799681c78f14fbe20b6185a56020dedee upstream.
+
+In order to allow using mm_alloc() much earlier, move initializing
+mm_cachep into mm_init().
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20221025201057.751153381@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/sched/task.h | 1 +
+ init/main.c | 1 +
+ kernel/fork.c | 32 ++++++++++++++++++--------------
+ 3 files changed, 20 insertions(+), 14 deletions(-)
+
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -53,6 +53,7 @@ extern void sched_dead(struct task_struc
+ void __noreturn do_task_dead(void);
+ void __noreturn make_task_dead(int signr);
+
++extern void mm_cache_init(void);
+ extern void proc_caches_init(void);
+
+ extern void fork_init(void);
+--- a/init/main.c
++++ b/init/main.c
+@@ -563,6 +563,7 @@ static void __init mm_init(void)
+ init_espfix_bsp();
+ /* Should be run after espfix64 is set up. */
+ pti_init();
++ mm_cache_init();
+ }
+
+ void __init __weak arch_call_rest_init(void)
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2705,10 +2705,27 @@ static void sighand_ctor(void *data)
+ init_waitqueue_head(&sighand->signalfd_wqh);
+ }
+
+-void __init proc_caches_init(void)
++void __init mm_cache_init(void)
+ {
+ unsigned int mm_size;
+
++ /*
++ * The mm_cpumask is located at the end of mm_struct, and is
++ * dynamically sized based on the maximum CPU number this system
++ * can have, taking hotplug into account (nr_cpu_ids).
++ */
++ mm_size = sizeof(struct mm_struct) + cpumask_size();
++
++ mm_cachep = kmem_cache_create_usercopy("mm_struct",
++ mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
++ offsetof(struct mm_struct, saved_auxv),
++ sizeof_field(struct mm_struct, saved_auxv),
++ NULL);
++}
++
++void __init proc_caches_init(void)
++{
+ sighand_cachep = kmem_cache_create("sighand_cache",
+ sizeof(struct sighand_struct), 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
+@@ -2726,19 +2743,6 @@ void __init proc_caches_init(void)
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
+ NULL);
+
+- /*
+- * The mm_cpumask is located at the end of mm_struct, and is
+- * dynamically sized based on the maximum CPU number this system
+- * can have, taking hotplug into account (nr_cpu_ids).
+- */
+- mm_size = sizeof(struct mm_struct) + cpumask_size();
+-
+- mm_cachep = kmem_cache_create_usercopy("mm_struct",
+- mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
+- SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
+- offsetof(struct mm_struct, saved_auxv),
+- sizeof_field(struct mm_struct, saved_auxv),
+- NULL);
+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
+ mmap_init();
+ nsproxy_cache_init();
--- /dev/null
+init-provide-arch_cpu_finalize_init.patch
+x86-cpu-switch-to-arch_cpu_finalize_init.patch
+arm-cpu-switch-to-arch_cpu_finalize_init.patch
+ia64-cpu-switch-to-arch_cpu_finalize_init.patch
+m68k-cpu-switch-to-arch_cpu_finalize_init.patch
+mips-cpu-switch-to-arch_cpu_finalize_init.patch
+sh-cpu-switch-to-arch_cpu_finalize_init.patch
+sparc-cpu-switch-to-arch_cpu_finalize_init.patch
+um-cpu-switch-to-arch_cpu_finalize_init.patch
+init-remove-check_bugs-leftovers.patch
+init-invoke-arch_cpu_finalize_init-earlier.patch
+init-x86-move-mem_encrypt_init-into-arch_cpu_finalize_init.patch
+x86-fpu-remove-cpuinfo-argument-from-init-functions.patch
+x86-fpu-mark-init-functions-__init.patch
+x86-fpu-move-fpu-initialization-into-arch_cpu_finalize_init.patch
+x86-speculation-add-gather-data-sampling-mitigation.patch
+x86-speculation-add-force-option-to-gds-mitigation.patch
+x86-speculation-add-kconfig-option-for-gds.patch
+kvm-add-gds_no-support-to-kvm.patch
+x86-xen-fix-secondary-processors-fpu-initialization.patch
+x86-mm-fix-poking_init-for-xen-pv-guests.patch
+x86-mm-use-mm_alloc-in-poking_init.patch
+mm-move-mm_cachep-initialization-to-mm_init.patch
+x86-mm-initialize-text-poking-earlier.patch
+documentation-x86-fix-backwards-on-off-logic-about-ymm-support.patch
+x86-cpufeatures-add-sev-es-cpu-feature.patch
+x86-cpu-add-vm-page-flush-msr-availablility-as-a-cpuid-feature.patch
+x86-cpufeatures-assign-dedicated-feature-word-for-cpuid_0x8000001f.patch
+tools-headers-cpufeatures-sync-with-the-kernel-sources.patch
+x86-bugs-increase-the-x86-bugs-vector-size-to-two-u32s.patch
+x86-cpu-kvm-add-support-for-cpuid_80000021_eax.patch
+xen-netback-fix-buffer-overrun-triggered-by-unusual-packet.patch
+x86-fix-backwards-merge-of-gds-srso-bit.patch
--- /dev/null
+From 3ea1c65b457df5417ae78185f0381816b6d0c22c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:33 +0200
+Subject: sh/cpu: Switch to arch_cpu_finalize_init()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 01eb454e9bfe593f320ecbc9aaec60bf87cd453d upstream
+
+check_bugs() is about to be phased out. Switch over to the new
+arch_cpu_finalize_init() implementation.
+
+No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20230613224545.371697797@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sh/Kconfig | 1
+ arch/sh/include/asm/bugs.h | 78 ----------------------------------------
+ arch/sh/include/asm/processor.h | 2 +
+ arch/sh/kernel/idle.c | 1
+ arch/sh/kernel/setup.c | 55 ++++++++++++++++++++++++++++
+ 5 files changed, 59 insertions(+), 78 deletions(-)
+ delete mode 100644 arch/sh/include/asm/bugs.h
+
+--- a/arch/sh/Kconfig
++++ b/arch/sh/Kconfig
+@@ -2,6 +2,7 @@
+ config SUPERH
+ def_bool y
+ select ARCH_HAS_BINFMT_FLAT if !MMU
++ select ARCH_HAS_CPU_FINALIZE_INIT
+ select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+--- a/arch/sh/include/asm/bugs.h
++++ /dev/null
+@@ -1,78 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef __ASM_SH_BUGS_H
+-#define __ASM_SH_BUGS_H
+-
+-/*
+- * This is included by init/main.c to check for architecture-dependent bugs.
+- *
+- * Needs:
+- * void check_bugs(void);
+- */
+-
+-/*
+- * I don't know of any Super-H bugs yet.
+- */
+-
+-#include <asm/processor.h>
+-
+-extern void select_idle_routine(void);
+-
+-static void __init check_bugs(void)
+-{
+- extern unsigned long loops_per_jiffy;
+- char *p = &init_utsname()->machine[2]; /* "sh" */
+-
+- select_idle_routine();
+-
+- current_cpu_data.loops_per_jiffy = loops_per_jiffy;
+-
+- switch (current_cpu_data.family) {
+- case CPU_FAMILY_SH2:
+- *p++ = '2';
+- break;
+- case CPU_FAMILY_SH2A:
+- *p++ = '2';
+- *p++ = 'a';
+- break;
+- case CPU_FAMILY_SH3:
+- *p++ = '3';
+- break;
+- case CPU_FAMILY_SH4:
+- *p++ = '4';
+- break;
+- case CPU_FAMILY_SH4A:
+- *p++ = '4';
+- *p++ = 'a';
+- break;
+- case CPU_FAMILY_SH4AL_DSP:
+- *p++ = '4';
+- *p++ = 'a';
+- *p++ = 'l';
+- *p++ = '-';
+- *p++ = 'd';
+- *p++ = 's';
+- *p++ = 'p';
+- break;
+- case CPU_FAMILY_SH5:
+- *p++ = '6';
+- *p++ = '4';
+- break;
+- case CPU_FAMILY_UNKNOWN:
+- /*
+- * Specifically use CPU_FAMILY_UNKNOWN rather than
+- * default:, so we're able to have the compiler whine
+- * about unhandled enumerations.
+- */
+- break;
+- }
+-
+- printk("CPU: %s\n", get_cpu_subtype(¤t_cpu_data));
+-
+-#ifndef __LITTLE_ENDIAN__
+- /* 'eb' means 'Endian Big' */
+- *p++ = 'e';
+- *p++ = 'b';
+-#endif
+- *p = '\0';
+-}
+-#endif /* __ASM_SH_BUGS_H */
+--- a/arch/sh/include/asm/processor.h
++++ b/arch/sh/include/asm/processor.h
+@@ -173,6 +173,8 @@ extern unsigned int instruction_size(uns
+ #define instruction_size(insn) (4)
+ #endif
+
++void select_idle_routine(void);
++
+ #endif /* __ASSEMBLY__ */
+
+ #ifdef CONFIG_SUPERH32
+--- a/arch/sh/kernel/idle.c
++++ b/arch/sh/kernel/idle.c
+@@ -15,6 +15,7 @@
+ #include <linux/smp.h>
+ #include <linux/atomic.h>
+ #include <asm/pgalloc.h>
++#include <asm/processor.h>
+ #include <asm/smp.h>
+ #include <asm/bl_bit.h>
+
+--- a/arch/sh/kernel/setup.c
++++ b/arch/sh/kernel/setup.c
+@@ -43,6 +43,7 @@
+ #include <asm/smp.h>
+ #include <asm/mmu_context.h>
+ #include <asm/mmzone.h>
++#include <asm/processor.h>
+ #include <asm/sparsemem.h>
+
+ /*
+@@ -362,3 +363,57 @@ int test_mode_pin(int pin)
+ {
+ return sh_mv.mv_mode_pins() & pin;
+ }
++
++void __init arch_cpu_finalize_init(void)
++{
++ char *p = &init_utsname()->machine[2]; /* "sh" */
++
++ select_idle_routine();
++
++ current_cpu_data.loops_per_jiffy = loops_per_jiffy;
++
++ switch (current_cpu_data.family) {
++ case CPU_FAMILY_SH2:
++ *p++ = '2';
++ break;
++ case CPU_FAMILY_SH2A:
++ *p++ = '2';
++ *p++ = 'a';
++ break;
++ case CPU_FAMILY_SH3:
++ *p++ = '3';
++ break;
++ case CPU_FAMILY_SH4:
++ *p++ = '4';
++ break;
++ case CPU_FAMILY_SH4A:
++ *p++ = '4';
++ *p++ = 'a';
++ break;
++ case CPU_FAMILY_SH4AL_DSP:
++ *p++ = '4';
++ *p++ = 'a';
++ *p++ = 'l';
++ *p++ = '-';
++ *p++ = 'd';
++ *p++ = 's';
++ *p++ = 'p';
++ break;
++ case CPU_FAMILY_UNKNOWN:
++ /*
++ * Specifically use CPU_FAMILY_UNKNOWN rather than
++ * default:, so we're able to have the compiler whine
++ * about unhandled enumerations.
++ */
++ break;
++ }
++
++ pr_info("CPU: %s\n", get_cpu_subtype(¤t_cpu_data));
++
++#ifndef __LITTLE_ENDIAN__
++ /* 'eb' means 'Endian Big' */
++ *p++ = 'e';
++ *p++ = 'b';
++#endif
++ *p = '\0';
++}
--- /dev/null
+From e7a2caf480097e1131b5239e95083c3e8995be07 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:35 +0200
+Subject: sparc/cpu: Switch to arch_cpu_finalize_init()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 44ade508e3bfac45ae97864587de29eb1a881ec0 upstream
+
+check_bugs() is about to be phased out. Switch over to the new
+arch_cpu_finalize_init() implementation.
+
+No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Sam Ravnborg <sam@ravnborg.org>
+Link: https://lore.kernel.org/r/20230613224545.431995857@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/Kconfig | 1 +
+ arch/sparc/include/asm/bugs.h | 18 ------------------
+ arch/sparc/kernel/setup_32.c | 7 +++++++
+ 3 files changed, 8 insertions(+), 18 deletions(-)
+ delete mode 100644 arch/sparc/include/asm/bugs.h
+
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -52,6 +52,7 @@ config SPARC
+ config SPARC32
+ def_bool !64BIT
+ select ARCH_32BIT_OFF_T
++ select ARCH_HAS_CPU_FINALIZE_INIT if !SMP
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
+ select GENERIC_ATOMIC64
+ select CLZ_TAB
+--- a/arch/sparc/include/asm/bugs.h
++++ /dev/null
+@@ -1,18 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/* include/asm/bugs.h: Sparc probes for various bugs.
+- *
+- * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
+- */
+-
+-#ifdef CONFIG_SPARC32
+-#include <asm/cpudata.h>
+-#endif
+-
+-extern unsigned long loops_per_jiffy;
+-
+-static void __init check_bugs(void)
+-{
+-#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP)
+- cpu_data(0).udelay_val = loops_per_jiffy;
+-#endif
+-}
+--- a/arch/sparc/kernel/setup_32.c
++++ b/arch/sparc/kernel/setup_32.c
+@@ -422,3 +422,10 @@ static int __init topology_init(void)
+ }
+
+ subsys_initcall(topology_init);
++
++#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP)
++void __init arch_cpu_finalize_init(void)
++{
++ cpu_data(0).udelay_val = loops_per_jiffy;
++}
++#endif
--- /dev/null
+From 1a9bcadd0058a3e81c1beca48e5e08dee9446a01 Mon Sep 17 00:00:00 2001
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+Date: Tue, 2 Mar 2021 17:16:17 -0300
+Subject: tools headers cpufeatures: Sync with the kernel sources
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+commit 1a9bcadd0058a3e81c1beca48e5e08dee9446a01 upstream.
+
+To pick the changes from:
+
+ 3b9c723ed7cfa4e1 ("KVM: SVM: Add support for SVM instruction address check change")
+ b85a0425d8056f3b ("Enumerate AVX Vector Neural Network instructions")
+ fb35d30fe5b06cc2 ("x86/cpufeatures: Assign dedicated feature word for CPUID_0x8000001F[EAX]")
+
+This only causes these perf files to be rebuilt:
+
+ CC /tmp/build/perf/bench/mem-memcpy-x86-64-asm.o
+ CC /tmp/build/perf/bench/mem-memset-x86-64-asm.o
+
+And addresses this perf build warning:
+
+ Warning: Kernel ABI header at 'tools/arch/x86/include/asm/cpufeatures.h' differs from latest version at 'arch/x86/include/asm/cpufeatures.h'
+ diff -u tools/arch/x86/include/asm/cpufeatures.h arch/x86/include/asm/cpufeatures.h
+
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Kyung Min Park <kyung.min.park@intel.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Sean Christopherson <seanjc@google.com>
+Cc: Wei Huang <wei.huang2@amd.com>
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/arch/x86/include/asm/cpufeatures.h | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+--- a/tools/arch/x86/include/asm/cpufeatures.h
++++ b/tools/arch/x86/include/asm/cpufeatures.h
+@@ -13,7 +13,7 @@
+ /*
+ * Defines x86 CPU feature bits
+ */
+-#define NCAPINTS 19 /* N 32-bit words worth of info */
++#define NCAPINTS 20 /* N 32-bit words worth of info */
+ #define NBUGINTS 1 /* N 32-bit bug flags */
+
+ /*
+@@ -96,6 +96,7 @@
+ #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
+ #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
+ #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
++/* FREE! ( 3*32+17) */
+ #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
+ #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
+ #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
+@@ -199,7 +200,7 @@
+ #define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
+ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
+ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+-#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
++/* FREE! ( 7*32+10) */
+ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
+ #define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+ #define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCEs for Spectre variant 2 */
+@@ -209,7 +210,7 @@
+ #define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
+ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
+ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+-#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
++/* FREE! ( 7*32+20) */
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+@@ -287,6 +288,7 @@
+ #define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM-Exit when EIBRS is enabled */
+
+ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
++#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
+ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
+
+ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
+@@ -328,6 +330,7 @@
+ #define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
+ #define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
+ #define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
++#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */
+
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
+ #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
+@@ -367,6 +370,13 @@
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+ #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+
++/* AMD-defined memory encryption features, CPUID level 0x8000001f (EAX), word 19 */
++#define X86_FEATURE_SME (19*32+ 0) /* AMD Secure Memory Encryption */
++#define X86_FEATURE_SEV (19*32+ 1) /* AMD Secure Encrypted Virtualization */
++#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* "" VM Page Flush MSR is supported */
++#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
++#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
++
+ /*
+ * BUG word(s)
+ */
--- /dev/null
+From 760b926637a95305fe8b066e8943ef688607dc0e Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:36 +0200
+Subject: um/cpu: Switch to arch_cpu_finalize_init()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 9349b5cd0908f8afe95529fc7a8cbb1417df9b0c upstream
+
+check_bugs() is about to be phased out. Switch over to the new
+arch_cpu_finalize_init() implementation.
+
+No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Richard Weinberger <richard@nod.at>
+Link: https://lore.kernel.org/r/20230613224545.493148694@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/um/Kconfig | 1 +
+ arch/um/include/asm/bugs.h | 7 -------
+ arch/um/kernel/um_arch.c | 3 ++-
+ 3 files changed, 3 insertions(+), 8 deletions(-)
+ delete mode 100644 arch/um/include/asm/bugs.h
+
+--- a/arch/um/Kconfig
++++ b/arch/um/Kconfig
+@@ -5,6 +5,7 @@ menu "UML-specific options"
+ config UML
+ bool
+ default y
++ select ARCH_HAS_CPU_FINALIZE_INIT
+ select ARCH_HAS_KCOV
+ select ARCH_NO_PREEMPT
+ select HAVE_ARCH_AUDITSYSCALL
+--- a/arch/um/include/asm/bugs.h
++++ /dev/null
+@@ -1,7 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef __UM_BUGS_H
+-#define __UM_BUGS_H
+-
+-void check_bugs(void);
+-
+-#endif
+--- a/arch/um/kernel/um_arch.c
++++ b/arch/um/kernel/um_arch.c
+@@ -3,6 +3,7 @@
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ */
+
++#include <linux/cpu.h>
+ #include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/mm.h>
+@@ -353,7 +354,7 @@ void __init setup_arch(char **cmdline_p)
+ setup_hostinfo(host_info, sizeof host_info);
+ }
+
+-void __init check_bugs(void)
++void __init arch_cpu_finalize_init(void)
+ {
+ arch_check_bugs();
+ os_check_bugs();
--- /dev/null
+From 9f1e4b82d3cdbbedf38548c403c38352c9490fc4 Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Sat, 8 Jul 2023 10:21:35 +0200
+Subject: x86/bugs: Increase the x86 bugs vector size to two u32s
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+Upstream commit: 0e52740ffd10c6c316837c6c128f460f1aaba1ea
+
+There was never a doubt in my mind that they would not fit into a single
+u32 eventually.
+
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 2 +-
+ tools/arch/x86/include/asm/cpufeatures.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -14,7 +14,7 @@
+ * Defines x86 CPU feature bits
+ */
+ #define NCAPINTS 20 /* N 32-bit words worth of info */
+-#define NBUGINTS 1 /* N 32-bit bug flags */
++#define NBUGINTS 2 /* N 32-bit bug flags */
+
+ /*
+ * Note: If the comment begins with a quoted string, that string is used
+--- a/tools/arch/x86/include/asm/cpufeatures.h
++++ b/tools/arch/x86/include/asm/cpufeatures.h
+@@ -14,7 +14,7 @@
+ * Defines x86 CPU feature bits
+ */
+ #define NCAPINTS 20 /* N 32-bit words worth of info */
+-#define NBUGINTS 1 /* N 32-bit bug flags */
++#define NBUGINTS 2 /* N 32-bit bug flags */
+
+ /*
+ * Note: If the comment begins with a quoted string, that string is used
--- /dev/null
+From 69372cf01290b9587d2cee8fbe161d75d55c3adc Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Thu, 10 Dec 2020 11:09:36 -0600
+Subject: x86/cpu: Add VM page flush MSR availablility as a CPUID feature
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit 69372cf01290b9587d2cee8fbe161d75d55c3adc upstream.
+
+On systems that do not have hardware enforced cache coherency between
+encrypted and unencrypted mappings of the same physical page, the
+hypervisor can use the VM page flush MSR (0xc001011e) to flush the cache
+contents of an SEV guest page. When a small number of pages are being
+flushed, this can be used in place of issuing a WBINVD across all CPUs.
+
+CPUID 0x8000001f_eax[2] is used to determine if the VM page flush MSR is
+available. Add a CPUID feature to indicate it is supported and define the
+MSR.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Message-Id: <f1966379e31f9b208db5257509c4a089a87d33d0.1607620209.git.thomas.lendacky@amd.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/include/asm/msr-index.h | 1 +
+ arch/x86/kernel/cpu/scattered.c | 1 +
+ 3 files changed, 3 insertions(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -236,6 +236,7 @@
+ #define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
+ #define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
+ #define X86_FEATURE_SEV_ES ( 8*32+20) /* AMD Secure Encrypted Virtualization - Encrypted State */
++#define X86_FEATURE_VM_PAGE_FLUSH ( 8*32+21) /* "" VM Page Flush MSR is supported */
+
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
+ #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -495,6 +495,7 @@
+ #define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
+ #define MSR_AMD64_IBSOPDATA4 0xc001103d
+ #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
++#define MSR_AMD64_VM_PAGE_FLUSH 0xc001011e
+ #define MSR_AMD64_SEV 0xc0010131
+ #define MSR_AMD64_SEV_ENABLED_BIT 0
+ #define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -44,6 +44,7 @@ static const struct cpuid_bit cpuid_bits
+ { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
+ { X86_FEATURE_SEV_ES, CPUID_EAX, 3, 0x8000001f, 0 },
+ { X86_FEATURE_SME_COHERENT, CPUID_EAX, 10, 0x8000001f, 0 },
++ { X86_FEATURE_VM_PAGE_FLUSH, CPUID_EAX, 2, 0x8000001f, 0 },
+ { 0, 0, 0, 0, 0 }
+ };
+
--- /dev/null
+From 25633de10d576ba7b91d35fc74095b90b2bb8bc8 Mon Sep 17 00:00:00 2001
+From: Kim Phillips <kim.phillips@amd.com>
+Date: Tue, 10 Jan 2023 16:46:37 -0600
+Subject: x86/cpu, kvm: Add support for CPUID_80000021_EAX
+
+From: Kim Phillips <kim.phillips@amd.com>
+
+commit 8415a74852d7c24795007ee9862d25feb519007c upstream.
+
+Add support for CPUID leaf 80000021, EAX. The majority of the features will be
+used in the kernel and thus a separate leaf is appropriate.
+
+Include KVM's reverse_cpuid entry because features are used by VM guests, too.
+
+ [ bp: Massage commit message. ]
+
+Signed-off-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Sean Christopherson <seanjc@google.com>
+Link: https://lore.kernel.org/r/20230124163319.2277355-2-kim.phillips@amd.com
+[bwh: Backported to 6.1: adjust context]
+Signed-off-by: Ben Hutchings <benh@debian.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeature.h | 7 +++++--
+ arch/x86/include/asm/cpufeatures.h | 2 +-
+ arch/x86/include/asm/disabled-features.h | 3 ++-
+ arch/x86/include/asm/required-features.h | 3 ++-
+ arch/x86/kernel/cpu/common.c | 3 +++
+ arch/x86/kvm/cpuid.h | 1 +
+ 6 files changed, 14 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -31,6 +31,7 @@ enum cpuid_leafs
+ CPUID_8000_0007_EBX,
+ CPUID_7_EDX,
+ CPUID_8000_001F_EAX,
++ CPUID_8000_0021_EAX,
+ };
+
+ #ifdef CONFIG_X86_FEATURE_NAMES
+@@ -90,8 +91,9 @@ extern const char * const x86_bug_flags[
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \
++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \
+ REQUIRED_MASK_CHECK || \
+- BUILD_BUG_ON_ZERO(NCAPINTS != 20))
++ BUILD_BUG_ON_ZERO(NCAPINTS != 21))
+
+ #define DISABLED_MASK_BIT_SET(feature_bit) \
+ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
+@@ -114,8 +116,9 @@ extern const char * const x86_bug_flags[
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \
++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \
+ DISABLED_MASK_CHECK || \
+- BUILD_BUG_ON_ZERO(NCAPINTS != 20))
++ BUILD_BUG_ON_ZERO(NCAPINTS != 21))
+
+ #define cpu_has(c, bit) \
+ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -13,7 +13,7 @@
+ /*
+ * Defines x86 CPU feature bits
+ */
+-#define NCAPINTS 20 /* N 32-bit words worth of info */
++#define NCAPINTS 21 /* N 32-bit words worth of info */
+ #define NBUGINTS 2 /* N 32-bit bug flags */
+
+ /*
+--- a/arch/x86/include/asm/disabled-features.h
++++ b/arch/x86/include/asm/disabled-features.h
+@@ -85,6 +85,7 @@
+ #define DISABLED_MASK17 0
+ #define DISABLED_MASK18 0
+ #define DISABLED_MASK19 0
+-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
++#define DISABLED_MASK20 0
++#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
+
+ #endif /* _ASM_X86_DISABLED_FEATURES_H */
+--- a/arch/x86/include/asm/required-features.h
++++ b/arch/x86/include/asm/required-features.h
+@@ -102,6 +102,7 @@
+ #define REQUIRED_MASK17 0
+ #define REQUIRED_MASK18 0
+ #define REQUIRED_MASK19 0
+-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
++#define REQUIRED_MASK20 0
++#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
+
+ #endif /* _ASM_X86_REQUIRED_FEATURES_H */
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -970,6 +970,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
+ if (c->extended_cpuid_level >= 0x8000001f)
+ c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
+
++ if (c->extended_cpuid_level >= 0x80000021)
++ c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021);
++
+ init_scattered_cpuid_features(c);
+ init_speculation_control(c);
+ init_cqm(c);
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -53,6 +53,7 @@ static const struct cpuid_reg reverse_cp
+ [CPUID_7_ECX] = { 7, 0, CPUID_ECX},
+ [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
+ [CPUID_7_EDX] = { 7, 0, CPUID_EDX},
++ [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
+ };
+
+ static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
--- /dev/null
+From b3454eb2d26a6cecada04b38e72e255ae702ccdb Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:24 +0200
+Subject: x86/cpu: Switch to arch_cpu_finalize_init()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 7c7077a72674402654f3291354720cd73cdf649e upstream
+
+check_bugs() is a dumping ground for finalizing the CPU bringup. Only parts of
+it has to do with actual CPU bugs.
+
+Split it apart into arch_cpu_finalize_init() and cpu_select_mitigations().
+
+Fixup the bogus 32bit comments while at it.
+
+No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230613224545.019583869@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/Kconfig | 1
+ arch/x86/include/asm/bugs.h | 2 -
+ arch/x86/kernel/cpu/bugs.c | 51 ----------------------------------------
+ arch/x86/kernel/cpu/common.c | 54 +++++++++++++++++++++++++++++++++++++++++++
+ arch/x86/kernel/cpu/cpu.h | 1
+ 5 files changed, 57 insertions(+), 52 deletions(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -60,6 +60,7 @@ config X86
+ select ARCH_CLOCKSOURCE_DATA
+ select ARCH_CLOCKSOURCE_INIT
+ select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
++ select ARCH_HAS_CPU_FINALIZE_INIT
+ select ARCH_HAS_DEBUG_VIRTUAL
+ select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_ELF_RANDOMIZE
+--- a/arch/x86/include/asm/bugs.h
++++ b/arch/x86/include/asm/bugs.h
+@@ -4,8 +4,6 @@
+
+ #include <asm/processor.h>
+
+-extern void check_bugs(void);
+-
+ #if defined(CONFIG_CPU_SUP_INTEL)
+ void check_mpx_erratum(struct cpuinfo_x86 *c);
+ #else
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -9,7 +9,6 @@
+ * - Andrew D. Balsa (code cleanup).
+ */
+ #include <linux/init.h>
+-#include <linux/utsname.h>
+ #include <linux/cpu.h>
+ #include <linux/module.h>
+ #include <linux/nospec.h>
+@@ -25,9 +24,7 @@
+ #include <asm/msr.h>
+ #include <asm/vmx.h>
+ #include <asm/paravirt.h>
+-#include <asm/alternative.h>
+ #include <asm/pgtable.h>
+-#include <asm/set_memory.h>
+ #include <asm/intel-family.h>
+ #include <asm/e820/api.h>
+ #include <asm/hypervisor.h>
+@@ -115,21 +112,8 @@ EXPORT_SYMBOL_GPL(mds_idle_clear);
+ DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+ EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
+
+-void __init check_bugs(void)
++void __init cpu_select_mitigations(void)
+ {
+- identify_boot_cpu();
+-
+- /*
+- * identify_boot_cpu() initialized SMT support information, let the
+- * core code know.
+- */
+- cpu_smt_check_topology();
+-
+- if (!IS_ENABLED(CONFIG_SMP)) {
+- pr_info("CPU: ");
+- print_cpu_info(&boot_cpu_data);
+- }
+-
+ /*
+ * Read the SPEC_CTRL MSR to account for reserved bits which may
+ * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
+@@ -165,39 +149,6 @@ void __init check_bugs(void)
+ l1tf_select_mitigation();
+ md_clear_select_mitigation();
+ srbds_select_mitigation();
+-
+- arch_smt_update();
+-
+-#ifdef CONFIG_X86_32
+- /*
+- * Check whether we are able to run this kernel safely on SMP.
+- *
+- * - i386 is no longer supported.
+- * - In order to run on anything without a TSC, we need to be
+- * compiled for a i486.
+- */
+- if (boot_cpu_data.x86 < 4)
+- panic("Kernel requires i486+ for 'invlpg' and other features");
+-
+- init_utsname()->machine[1] =
+- '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
+- alternative_instructions();
+-
+- fpu__init_check_bugs();
+-#else /* CONFIG_X86_64 */
+- alternative_instructions();
+-
+- /*
+- * Make sure the first 2MB area is not mapped by huge pages
+- * There are typically fixed size MTRRs in there and overlapping
+- * MTRRs into large pages causes slow downs.
+- *
+- * Right now we don't do that with gbpages because there seems
+- * very little benefit for that case.
+- */
+- if (!direct_gbpages)
+- set_memory_4k((unsigned long)__va(0), 1);
+-#endif
+ }
+
+ /*
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -18,10 +18,14 @@
+ #include <linux/kprobes.h>
+ #include <linux/kgdb.h>
+ #include <linux/smp.h>
++#include <linux/cpu.h>
+ #include <linux/io.h>
+ #include <linux/syscore_ops.h>
+
+ #include <asm/stackprotector.h>
++#include <linux/utsname.h>
++
++#include <asm/alternative.h>
+ #include <asm/perf_event.h>
+ #include <asm/mmu_context.h>
+ #include <asm/archrandom.h>
+@@ -57,6 +61,7 @@
+ #ifdef CONFIG_X86_LOCAL_APIC
+ #include <asm/uv/uv.h>
+ #endif
++#include <asm/set_memory.h>
+
+ #include "cpu.h"
+
+@@ -2156,3 +2161,52 @@ void arch_smt_update(void)
+ /* Check whether IPI broadcasting can be enabled */
+ apic_smt_update();
+ }
++
++void __init arch_cpu_finalize_init(void)
++{
++ identify_boot_cpu();
++
++ /*
++ * identify_boot_cpu() initialized SMT support information, let the
++ * core code know.
++ */
++ cpu_smt_check_topology();
++
++ if (!IS_ENABLED(CONFIG_SMP)) {
++ pr_info("CPU: ");
++ print_cpu_info(&boot_cpu_data);
++ }
++
++ cpu_select_mitigations();
++
++ arch_smt_update();
++
++ if (IS_ENABLED(CONFIG_X86_32)) {
++ /*
++ * Check whether this is a real i386 which is not longer
++ * supported and fixup the utsname.
++ */
++ if (boot_cpu_data.x86 < 4)
++ panic("Kernel requires i486+ for 'invlpg' and other features");
++
++ init_utsname()->machine[1] =
++ '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
++ }
++
++ alternative_instructions();
++
++ if (IS_ENABLED(CONFIG_X86_64)) {
++ /*
++ * Make sure the first 2MB area is not mapped by huge pages
++ * There are typically fixed size MTRRs in there and overlapping
++ * MTRRs into large pages causes slow downs.
++ *
++ * Right now we don't do that with gbpages because there seems
++ * very little benefit for that case.
++ */
++ if (!direct_gbpages)
++ set_memory_4k((unsigned long)__va(0), 1);
++ } else {
++ fpu__init_check_bugs();
++ }
++}
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -76,6 +76,7 @@ extern void detect_ht(struct cpuinfo_x86
+ extern void check_null_seg_clears_base(struct cpuinfo_x86 *c);
+
+ unsigned int aperfmperf_get_khz(int cpu);
++void cpu_select_mitigations(void);
+
+ extern void x86_spec_ctrl_setup_ap(void);
+ extern void update_srbds_msr(void);
--- /dev/null
+From 360e7c5c4ca4fd8e627781ed42f95d58bc3bb732 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Mon, 7 Sep 2020 15:15:06 +0200
+Subject: x86/cpufeatures: Add SEV-ES CPU feature
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit 360e7c5c4ca4fd8e627781ed42f95d58bc3bb732 upstream.
+
+Add CPU feature detection for Secure Encrypted Virtualization with
+Encrypted State. This feature enhances SEV by also encrypting the
+guest register state, making it in-accessible to the hypervisor.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Link: https://lkml.kernel.org/r/20200907131613.12703-6-joro@8bytes.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/kernel/cpu/amd.c | 3 ++-
+ arch/x86/kernel/cpu/scattered.c | 1 +
+ 3 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -235,6 +235,7 @@
+ #define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
+ #define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
+ #define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
++#define X86_FEATURE_SEV_ES ( 8*32+20) /* AMD Secure Encrypted Virtualization - Encrypted State */
+
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
+ #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -663,7 +663,7 @@ static void early_detect_mem_encrypt(str
+ * If BIOS has not enabled SME then don't advertise the
+ * SME feature (set in scattered.c).
+ * For SEV: If BIOS has not enabled SEV then don't advertise the
+- * SEV feature (set in scattered.c).
++ * SEV and SEV_ES feature (set in scattered.c).
+ *
+ * In all cases, since support for SME and SEV requires long mode,
+ * don't advertise the feature under CONFIG_X86_32.
+@@ -694,6 +694,7 @@ clear_all:
+ setup_clear_cpu_cap(X86_FEATURE_SME);
+ clear_sev:
+ setup_clear_cpu_cap(X86_FEATURE_SEV);
++ setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
+ }
+ }
+
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -42,6 +42,7 @@ static const struct cpuid_bit cpuid_bits
+ { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
+ { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
+ { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
++ { X86_FEATURE_SEV_ES, CPUID_EAX, 3, 0x8000001f, 0 },
+ { X86_FEATURE_SME_COHERENT, CPUID_EAX, 10, 0x8000001f, 0 },
+ { 0, 0, 0, 0, 0 }
+ };
--- /dev/null
+From fb35d30fe5b06cc24444f0405da8fbe0be5330d1 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 22 Jan 2021 12:40:46 -0800
+Subject: x86/cpufeatures: Assign dedicated feature word for CPUID_0x8000001F[EAX]
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit fb35d30fe5b06cc24444f0405da8fbe0be5330d1 upstream.
+
+Collect the scattered SME/SEV related feature flags into a dedicated
+word. There are now five recognized features in CPUID.0x8000001F.EAX,
+with at least one more on the horizon (SEV-SNP). Using a dedicated word
+allows KVM to use its automagic CPUID adjustment logic when reporting
+the set of supported features to userspace.
+
+No functional change intended.
+
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Brijesh Singh <brijesh.singh@amd.com>
+Link: https://lkml.kernel.org/r/20210122204047.2860075-2-seanjc@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeature.h | 7 +++++--
+ arch/x86/include/asm/cpufeatures.h | 17 +++++++++++------
+ arch/x86/include/asm/disabled-features.h | 3 ++-
+ arch/x86/include/asm/required-features.h | 3 ++-
+ arch/x86/kernel/cpu/common.c | 3 +++
+ arch/x86/kernel/cpu/scattered.c | 5 -----
+ tools/arch/x86/include/asm/disabled-features.h | 3 ++-
+ tools/arch/x86/include/asm/required-features.h | 3 ++-
+ 8 files changed, 27 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -30,6 +30,7 @@ enum cpuid_leafs
+ CPUID_7_ECX,
+ CPUID_8000_0007_EBX,
+ CPUID_7_EDX,
++ CPUID_8000_001F_EAX,
+ };
+
+ #ifdef CONFIG_X86_FEATURE_NAMES
+@@ -88,8 +89,9 @@ extern const char * const x86_bug_flags[
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \
+ REQUIRED_MASK_CHECK || \
+- BUILD_BUG_ON_ZERO(NCAPINTS != 19))
++ BUILD_BUG_ON_ZERO(NCAPINTS != 20))
+
+ #define DISABLED_MASK_BIT_SET(feature_bit) \
+ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
+@@ -111,8 +113,9 @@ extern const char * const x86_bug_flags[
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \
+ DISABLED_MASK_CHECK || \
+- BUILD_BUG_ON_ZERO(NCAPINTS != 19))
++ BUILD_BUG_ON_ZERO(NCAPINTS != 20))
+
+ #define cpu_has(c, bit) \
+ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -13,7 +13,7 @@
+ /*
+ * Defines x86 CPU feature bits
+ */
+-#define NCAPINTS 19 /* N 32-bit words worth of info */
++#define NCAPINTS 20 /* N 32-bit words worth of info */
+ #define NBUGINTS 1 /* N 32-bit bug flags */
+
+ /*
+@@ -96,7 +96,7 @@
+ #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
+ #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
+ #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
+-#define X86_FEATURE_SME_COHERENT ( 3*32+17) /* "" AMD hardware-enforced cache coherency */
++/* FREE! ( 3*32+17) */
+ #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
+ #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
+ #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
+@@ -201,7 +201,7 @@
+ #define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
+ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
+ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+-#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
++/* FREE! ( 7*32+10) */
+ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
+ #define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
+ #define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
+@@ -211,7 +211,7 @@
+ #define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
+ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
+ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+-#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
++/* FREE! ( 7*32+20) */
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+@@ -235,8 +235,6 @@
+ #define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
+ #define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
+ #define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
+-#define X86_FEATURE_SEV_ES ( 8*32+20) /* AMD Secure Encrypted Virtualization - Encrypted State */
+-#define X86_FEATURE_VM_PAGE_FLUSH ( 8*32+21) /* "" VM Page Flush MSR is supported */
+
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
+ #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
+@@ -377,6 +375,13 @@
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+ #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+
++/* AMD-defined memory encryption features, CPUID level 0x8000001f (EAX), word 19 */
++#define X86_FEATURE_SME (19*32+ 0) /* AMD Secure Memory Encryption */
++#define X86_FEATURE_SEV (19*32+ 1) /* AMD Secure Encrypted Virtualization */
++#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* "" VM Page Flush MSR is supported */
++#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
++#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
++
+ /*
+ * BUG word(s)
+ */
+--- a/arch/x86/include/asm/disabled-features.h
++++ b/arch/x86/include/asm/disabled-features.h
+@@ -84,6 +84,7 @@
+ #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
+ #define DISABLED_MASK17 0
+ #define DISABLED_MASK18 0
+-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
++#define DISABLED_MASK19 0
++#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
+
+ #endif /* _ASM_X86_DISABLED_FEATURES_H */
+--- a/arch/x86/include/asm/required-features.h
++++ b/arch/x86/include/asm/required-features.h
+@@ -101,6 +101,7 @@
+ #define REQUIRED_MASK16 0
+ #define REQUIRED_MASK17 0
+ #define REQUIRED_MASK18 0
+-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
++#define REQUIRED_MASK19 0
++#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
+
+ #endif /* _ASM_X86_REQUIRED_FEATURES_H */
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -967,6 +967,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
+ if (c->extended_cpuid_level >= 0x8000000a)
+ c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
+
++ if (c->extended_cpuid_level >= 0x8000001f)
++ c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
++
+ init_scattered_cpuid_features(c);
+ init_speculation_control(c);
+ init_cqm(c);
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -40,11 +40,6 @@ static const struct cpuid_bit cpuid_bits
+ { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
+ { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
+ { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
+- { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
+- { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
+- { X86_FEATURE_SEV_ES, CPUID_EAX, 3, 0x8000001f, 0 },
+- { X86_FEATURE_SME_COHERENT, CPUID_EAX, 10, 0x8000001f, 0 },
+- { X86_FEATURE_VM_PAGE_FLUSH, CPUID_EAX, 2, 0x8000001f, 0 },
+ { 0, 0, 0, 0, 0 }
+ };
+
+--- a/tools/arch/x86/include/asm/disabled-features.h
++++ b/tools/arch/x86/include/asm/disabled-features.h
+@@ -84,6 +84,7 @@
+ #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
+ #define DISABLED_MASK17 0
+ #define DISABLED_MASK18 0
+-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
++#define DISABLED_MASK19 0
++#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
+
+ #endif /* _ASM_X86_DISABLED_FEATURES_H */
+--- a/tools/arch/x86/include/asm/required-features.h
++++ b/tools/arch/x86/include/asm/required-features.h
+@@ -101,6 +101,7 @@
+ #define REQUIRED_MASK16 0
+ #define REQUIRED_MASK17 0
+ #define REQUIRED_MASK18 0
+-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
++#define REQUIRED_MASK19 0
++#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
+
+ #endif /* _ASM_X86_REQUIRED_FEATURES_H */
--- /dev/null
+From foo@baz Tue Aug 8 07:20:48 PM CEST 2023
+Date: Tue, 08 Aug 2023 19:20:48 +0200
+To: Greg KH <gregkh@linuxfoundation.org>
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Subject: x86: fix backwards merge of GDS/SRSO bit
+
+Stable-tree-only change.
+
+Due to the way the GDS and SRSO patches flowed into the stable tree, it
+was a 50% chance that the merge of the which value GDS and SRSO should
+be. Of course, I lost that bet, and chose the opposite of what Linus
+chose in commit 64094e7e3118 ("Merge tag 'gds-for-linus-2023-08-01' of
+git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip")
+
+Fix this up by switching the values to match what is now in Linus's tree
+as that is the correct value to mirror.
+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/common.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1135,8 +1135,12 @@ static const __initconst struct x86_cpu_
+ #define MMIO_SBDS BIT(2)
+ /* CPU is affected by RETbleed, speculating where you would not expect it */
+ #define RETBLEED BIT(3)
++/* CPU is affected by SMT (cross-thread) return predictions */
++#define SMT_RSB BIT(4)
++/* CPU is affected by SRSO */
++#define SRSO BIT(5)
+ /* CPU is affected by GDS */
+-#define GDS BIT(4)
++#define GDS BIT(6)
+
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
--- /dev/null
+From 677d1e9bb0bff552b161e9058f1b6fdfd88ece91 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:45 +0200
+Subject: x86/fpu: Mark init functions __init
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 1703db2b90c91b2eb2d699519fc505fe431dde0e upstream
+
+No point in keeping them around.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20230613224545.841685728@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/fpu/init.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -50,7 +50,7 @@ void fpu__init_cpu(void)
+ fpu__init_cpu_xstate();
+ }
+
+-static bool fpu__probe_without_cpuid(void)
++static bool __init fpu__probe_without_cpuid(void)
+ {
+ unsigned long cr0;
+ u16 fsw, fcw;
+@@ -68,7 +68,7 @@ static bool fpu__probe_without_cpuid(voi
+ return fsw == 0 && (fcw & 0x103f) == 0x003f;
+ }
+
+-static void fpu__init_system_early_generic(void)
++static void __init fpu__init_system_early_generic(void)
+ {
+ if (!boot_cpu_has(X86_FEATURE_CPUID) &&
+ !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
--- /dev/null
+From 8a3b312da29169625141ff9c984a796724240ac1 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:46 +0200
+Subject: x86/fpu: Move FPU initialization into arch_cpu_finalize_init()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit b81fac906a8f9e682e513ddd95697ec7a20878d4 upstream
+
+Initializing the FPU during the early boot process is a pointless
+exercise. Early boot is convoluted and fragile enough.
+
+Nothing requires that the FPU is set up early. It has to be initialized
+before fork_init() because the task_struct size depends on the FPU register
+buffer size.
+
+Move the initialization to arch_cpu_finalize_init() which is the perfect
+place to do so.
+
+No functional change.
+
+This allows to remove quite some of the custom early command line parsing,
+but that's subject to the next installment.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20230613224545.902376621@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/common.c | 13 +++++++------
+ arch/x86/kernel/smpboot.c | 1 +
+ 2 files changed, 8 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1364,8 +1364,6 @@ static void __init early_identify_cpu(st
+
+ cpu_set_bug_bits(c);
+
+- fpu__init_system();
+-
+ #ifdef CONFIG_X86_32
+ /*
+ * Regardless of whether PCID is enumerated, the SDM says
+@@ -2055,8 +2053,6 @@ void cpu_init(void)
+ clear_all_debug_regs();
+ dbg_restore_debug_regs();
+
+- fpu__init_cpu();
+-
+ if (is_uv_system())
+ uv_cpu_init();
+
+@@ -2114,8 +2110,6 @@ void cpu_init(void)
+ clear_all_debug_regs();
+ dbg_restore_debug_regs();
+
+- fpu__init_cpu();
+-
+ load_fixmap_gdt(cpu);
+ }
+ #endif
+@@ -2194,6 +2188,13 @@ void __init arch_cpu_finalize_init(void)
+ '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
+ }
+
++ /*
++ * Must be before alternatives because it might set or clear
++ * feature bits.
++ */
++ fpu__init_system();
++ fpu__init_cpu();
++
+ alternative_instructions();
+
+ if (IS_ENABLED(CONFIG_X86_64)) {
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -235,6 +235,7 @@ static void notrace start_secondary(void
+ #endif
+ load_current_idt();
+ cpu_init();
++ fpu__init_cpu();
+ rcu_cpu_starting(raw_smp_processor_id());
+ x86_cpuinit.early_percpu_clock_init();
+ preempt_disable();
--- /dev/null
+From a03b110dad183d18d01f0f8e370228c2747133e6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Jun 2023 01:39:43 +0200
+Subject: x86/fpu: Remove cpuinfo argument from init functions
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 1f34bb2a24643e0087652d81078e4f616562738d upstream
+
+Nothing in the call chain requires it
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20230613224545.783704297@linutronix.de
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/fpu/internal.h | 2 +-
+ arch/x86/kernel/cpu/common.c | 2 +-
+ arch/x86/kernel/fpu/init.c | 6 +++---
+ 3 files changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -41,7 +41,7 @@ extern int dump_fpu(struct pt_regs *ptr
+ extern void fpu__init_cpu(void);
+ extern void fpu__init_system_xstate(void);
+ extern void fpu__init_cpu_xstate(void);
+-extern void fpu__init_system(struct cpuinfo_x86 *c);
++extern void fpu__init_system(void);
+ extern void fpu__init_check_bugs(void);
+ extern void fpu__resume_cpu(void);
+ extern u64 fpu__get_supported_xfeatures_mask(void);
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1364,7 +1364,7 @@ static void __init early_identify_cpu(st
+
+ cpu_set_bug_bits(c);
+
+- fpu__init_system(c);
++ fpu__init_system();
+
+ #ifdef CONFIG_X86_32
+ /*
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -68,7 +68,7 @@ static bool fpu__probe_without_cpuid(voi
+ return fsw == 0 && (fcw & 0x103f) == 0x003f;
+ }
+
+-static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
++static void fpu__init_system_early_generic(void)
+ {
+ if (!boot_cpu_has(X86_FEATURE_CPUID) &&
+ !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
+@@ -290,10 +290,10 @@ static void __init fpu__init_parse_early
+ * Called on the boot CPU once per system bootup, to set up the initial
+ * FPU state that is later cloned into all processes:
+ */
+-void __init fpu__init_system(struct cpuinfo_x86 *c)
++void __init fpu__init_system(void)
+ {
+ fpu__init_parse_early_param();
+- fpu__init_system_early_generic(c);
++ fpu__init_system_early_generic();
+
+ /*
+ * The FPU has to be operational for some of the
--- /dev/null
+From 26ce6ec364f18d2915923bc05784084e54a5c4cc Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Mon, 9 Jan 2023 16:09:22 +0100
+Subject: x86/mm: fix poking_init() for Xen PV guests
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 26ce6ec364f18d2915923bc05784084e54a5c4cc upstream.
+
+Commit 3f4c8211d982 ("x86/mm: Use mm_alloc() in poking_init()") broke
+the kernel for running as Xen PV guest.
+
+It seems as if the new address space is never activated before being
+used, resulting in Xen rejecting to accept the new CR3 value (the PGD
+isn't pinned).
+
+Fix that by adding the now missing call of paravirt_arch_dup_mmap() to
+poking_init(). That call was previously done by dup_mm()->dup_mmap() and
+it is a NOP for all cases but for Xen PV, where it is just doing the
+pinning of the PGD.
+
+Fixes: 3f4c8211d982 ("x86/mm: Use mm_alloc() in poking_init()")
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20230109150922.10578-1-jgross@suse.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/init.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -26,6 +26,7 @@
+ #include <asm/cpufeature.h>
+ #include <asm/pti.h>
+ #include <asm/text-patching.h>
++#include <asm/paravirt.h>
+
+ /*
+ * We need to define the tracepoints somewhere, and tlb.c
+@@ -738,6 +739,9 @@ void __init poking_init(void)
+ poking_mm = copy_init_mm();
+ BUG_ON(!poking_mm);
+
++ /* Xen PV guests need the PGD to be pinned. */
++ paravirt_arch_dup_mmap(NULL, poking_mm);
++
+ /*
+ * Randomize the poking address, but make sure that the following page
+ * will be mapped at the same PMD. We need 2 pages, so find space for 3,
--- /dev/null
+From 5b93a83649c7cba3a15eb7e8959b250841acb1b1 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 25 Oct 2022 21:38:25 +0200
+Subject: x86/mm: Initialize text poking earlier
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 5b93a83649c7cba3a15eb7e8959b250841acb1b1 upstream.
+
+Move poking_init() up a bunch; specifically move it right after
+mm_init() which is right before ftrace_init().
+
+This will allow simplifying ftrace text poking which currently has
+a bunch of exceptions for early boot.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20221025201057.881703081@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ init/main.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -624,7 +624,7 @@ asmlinkage __visible void __init start_k
+ sort_main_extable();
+ trap_init();
+ mm_init();
+-
++ poking_init();
+ ftrace_init();
+
+ /* trace_printk can be enabled here */
+@@ -763,8 +763,6 @@ asmlinkage __visible void __init start_k
+ taskstats_init_early();
+ delayacct_init();
+
+- poking_init();
+-
+ acpi_subsystem_init();
+ arch_post_acpi_subsys_init();
+ sfi_init_late();
--- /dev/null
+From 3f4c8211d982099be693be9aa7d6fc4607dff290 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 25 Oct 2022 21:38:21 +0200
+Subject: x86/mm: Use mm_alloc() in poking_init()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 3f4c8211d982099be693be9aa7d6fc4607dff290 upstream.
+
+Instead of duplicating init_mm, allocate a fresh mm. The advantage is
+that mm_alloc() has much simpler dependencies. Additionally it makes
+more conceptual sense, init_mm has no (and must not have) user state
+to duplicate.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20221025201057.816175235@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/init.c | 3 ++-
+ include/linux/sched/task.h | 1 -
+ kernel/fork.c | 5 -----
+ 3 files changed, 2 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -7,6 +7,7 @@
+ #include <linux/swapops.h>
+ #include <linux/kmemleak.h>
+ #include <linux/sched/task.h>
++#include <linux/sched/mm.h>
+
+ #include <asm/set_memory.h>
+ #include <asm/cpu_device_id.h>
+@@ -736,7 +737,7 @@ void __init poking_init(void)
+ spinlock_t *ptl;
+ pte_t *ptep;
+
+- poking_mm = copy_init_mm();
++ poking_mm = mm_alloc();
+ BUG_ON(!poking_mm);
+
+ /* Xen PV guests need the PGD to be pinned. */
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -93,7 +93,6 @@ extern long _do_fork(struct kernel_clone
+ extern bool legacy_clone_args_valid(const struct kernel_clone_args *kargs);
+ extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
+ struct task_struct *fork_idle(int);
+-struct mm_struct *copy_init_mm(void);
+ extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+ extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2335,11 +2335,6 @@ struct task_struct *fork_idle(int cpu)
+ return task;
+ }
+
+-struct mm_struct *copy_init_mm(void)
+-{
+- return dup_mm(NULL, &init_mm);
+-}
+-
+ /*
+ * Ok, this is the main fork-routine.
+ *
--- /dev/null
+From ead252286b6800873dd961075a36939f15e9b163 Mon Sep 17 00:00:00 2001
+From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Date: Wed, 12 Jul 2023 19:43:12 -0700
+Subject: x86/speculation: Add force option to GDS mitigation
+
+From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+
+commit 553a5c03e90a6087e88f8ff878335ef0621536fb upstream
+
+The Gather Data Sampling (GDS) vulnerability allows malicious software
+to infer stale data previously stored in vector registers. This may
+include sensitive data such as cryptographic keys. GDS is mitigated in
+microcode, and systems with up-to-date microcode are protected by
+default. However, any affected system that is running with older
+microcode will still be vulnerable to GDS attacks.
+
+Since the gather instructions used by the attacker are part of the
+AVX2 and AVX512 extensions, disabling these extensions prevents gather
+instructions from being executed, thereby mitigating the system from
+GDS. Disabling AVX2 is sufficient, but we don't have the granularity
+to do this. The XCR0[2] disables AVX, with no option to just disable
+AVX2.
+
+Add a kernel parameter gather_data_sampling=force that will enable the
+microcode mitigation if available, otherwise it will disable AVX on
+affected systems.
+
+This option will be ignored if cmdline mitigations=off.
+
+This is a *big* hammer. It is known to break buggy userspace that
+uses incomplete, buggy AVX enumeration. Unfortunately, such userspace
+does exist in the wild:
+
+ https://www.mail-archive.com/bug-coreutils@gnu.org/msg33046.html
+
+[ dhansen: add some more ominous warnings about disabling AVX ]
+
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/gather_data_sampling.rst | 18 +++++++++--
+ Documentation/admin-guide/kernel-parameters.txt | 8 ++++-
+ arch/x86/kernel/cpu/bugs.c | 20 ++++++++++++-
+ 3 files changed, 40 insertions(+), 6 deletions(-)
+
+--- a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst
++++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst
+@@ -60,14 +60,21 @@ bits:
+ ================================ === ============================
+
+ GDS can also be mitigated on systems that don't have updated microcode by
+-disabling AVX. This can be done by setting "clearcpuid=avx" on the kernel
+-command-line.
++disabling AVX. This can be done by setting gather_data_sampling="force" or
++"clearcpuid=avx" on the kernel command-line.
++
++If used, these options will disable AVX use by turning on XSAVE YMM support.
++However, the processor will still enumerate AVX support. Userspace that
++does not follow proper AVX enumeration to check both AVX *and* XSAVE YMM
++support will break.
+
+ Mitigation control on the kernel command line
+ ---------------------------------------------
+ The mitigation can be disabled by setting "gather_data_sampling=off" or
+-"mitigations=off" on the kernel command line. Not specifying either will
+-default to the mitigation being enabled.
++"mitigations=off" on the kernel command line. Not specifying either will default
++to the mitigation being enabled. Specifying "gather_data_sampling=force" will
++use the microcode mitigation when available or disable AVX on affected systems
++where the microcode hasn't been updated to include the mitigation.
+
+ GDS System Information
+ ------------------------
+@@ -83,6 +90,9 @@ The possible values contained in this fi
+ Vulnerable Processor vulnerable and mitigation disabled.
+ Vulnerable: No microcode Processor vulnerable and microcode is missing
+ mitigation.
++ Mitigation: AVX disabled,
++ no microcode Processor is vulnerable and microcode is missing
++ mitigation. AVX disabled as mitigation.
+ Mitigation: Microcode Processor is vulnerable and mitigation is in
+ effect.
+ Mitigation: Microcode (locked) Processor is vulnerable and mitigation is in
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1346,7 +1346,13 @@
+
+ This issue is mitigated by default in updated microcode.
+ The mitigation may have a performance impact but can be
+- disabled.
++ disabled. On systems without the microcode mitigation
++ disabling AVX serves as a mitigation.
++
++ force: Disable AVX to mitigate systems without
++ microcode mitigation. No effect if the microcode
++ mitigation is present. Known to cause crashes in
++ userspace with buggy AVX enumeration.
+
+ off: Disable GDS mitigation.
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -607,6 +607,7 @@ early_param("srbds", srbds_parse_cmdline
+ enum gds_mitigations {
+ GDS_MITIGATION_OFF,
+ GDS_MITIGATION_UCODE_NEEDED,
++ GDS_MITIGATION_FORCE,
+ GDS_MITIGATION_FULL,
+ GDS_MITIGATION_FULL_LOCKED,
+ GDS_MITIGATION_HYPERVISOR,
+@@ -617,6 +618,7 @@ static enum gds_mitigations gds_mitigati
+ static const char * const gds_strings[] = {
+ [GDS_MITIGATION_OFF] = "Vulnerable",
+ [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
++ [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode",
+ [GDS_MITIGATION_FULL] = "Mitigation: Microcode",
+ [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)",
+ [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
+@@ -642,6 +644,7 @@ void update_gds_msr(void)
+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+ mcu_ctrl &= ~GDS_MITG_DIS;
+ break;
++ case GDS_MITIGATION_FORCE:
+ case GDS_MITIGATION_UCODE_NEEDED:
+ case GDS_MITIGATION_HYPERVISOR:
+ return;
+@@ -676,10 +679,23 @@ static void __init gds_select_mitigation
+
+ /* No microcode */
+ if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
+- gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
++ if (gds_mitigation == GDS_MITIGATION_FORCE) {
++ /*
++ * This only needs to be done on the boot CPU so do it
++ * here rather than in update_gds_msr()
++ */
++ setup_clear_cpu_cap(X86_FEATURE_AVX);
++ pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
++ } else {
++ gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
++ }
+ goto out;
+ }
+
++ /* Microcode has mitigation, use it */
++ if (gds_mitigation == GDS_MITIGATION_FORCE)
++ gds_mitigation = GDS_MITIGATION_FULL;
++
+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+ if (mcu_ctrl & GDS_MITG_LOCKED) {
+ if (gds_mitigation == GDS_MITIGATION_OFF)
+@@ -710,6 +726,8 @@ static int __init gds_parse_cmdline(char
+
+ if (!strcmp(str, "off"))
+ gds_mitigation = GDS_MITIGATION_OFF;
++ else if (!strcmp(str, "force"))
++ gds_mitigation = GDS_MITIGATION_FORCE;
+
+ return 0;
+ }
--- /dev/null
+From d63b3f0e819275ee64648eb01330aad3e347d9ba Mon Sep 17 00:00:00 2001
+From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Date: Wed, 12 Jul 2023 19:43:11 -0700
+Subject: x86/speculation: Add Gather Data Sampling mitigation
+
+From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+
+commit 8974eb588283b7d44a7c91fa09fcbaf380339f3a upstream
+
+Gather Data Sampling (GDS) is a hardware vulnerability which allows
+unprivileged speculative access to data which was previously stored in
+vector registers.
+
+Intel processors that support AVX2 and AVX512 have gather instructions
+that fetch non-contiguous data elements from memory. On vulnerable
+hardware, when a gather instruction is transiently executed and
+encounters a fault, stale data from architectural or internal vector
+registers may get transiently stored to the destination vector
+register allowing an attacker to infer the stale data using typical
+side channel techniques like cache timing attacks.
+
+This mitigation is different from many earlier ones for two reasons.
+First, it is enabled by default and a bit must be set to *DISABLE* it.
+This is the opposite of normal mitigation polarity. This means GDS can
+be mitigated simply by updating microcode and leaving the new control
+bit alone.
+
+Second, GDS has a "lock" bit. This lock bit is there because the
+mitigation affects the hardware security features KeyLocker and SGX.
+It needs to be enabled and *STAY* enabled for these features to be
+mitigated against GDS.
+
+The mitigation is enabled in the microcode by default. Disable it by
+setting gather_data_sampling=off or by disabling all mitigations with
+mitigations=off. The mitigation status can be checked by reading:
+
+ /sys/devices/system/cpu/vulnerabilities/gather_data_sampling
+
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 11 -
+ Documentation/admin-guide/hw-vuln/gather_data_sampling.rst | 99 +++++++++
+ Documentation/admin-guide/hw-vuln/index.rst | 1
+ Documentation/admin-guide/kernel-parameters.txt | 33 ++-
+ arch/x86/include/asm/cpufeatures.h | 1
+ arch/x86/include/asm/msr-index.h | 11 +
+ arch/x86/kernel/cpu/bugs.c | 129 +++++++++++++
+ arch/x86/kernel/cpu/common.c | 34 ++-
+ arch/x86/kernel/cpu/cpu.h | 1
+ drivers/base/cpu.c | 8
+ 10 files changed, 305 insertions(+), 23 deletions(-)
+ create mode 100644 Documentation/admin-guide/hw-vuln/gather_data_sampling.rst
+
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -480,16 +480,17 @@ Description: information about CPUs hete
+ cpu_capacity: capacity of cpu#.
+
+ What: /sys/devices/system/cpu/vulnerabilities
++ /sys/devices/system/cpu/vulnerabilities/gather_data_sampling
++ /sys/devices/system/cpu/vulnerabilities/itlb_multihit
++ /sys/devices/system/cpu/vulnerabilities/l1tf
++ /sys/devices/system/cpu/vulnerabilities/mds
+ /sys/devices/system/cpu/vulnerabilities/meltdown
++ /sys/devices/system/cpu/vulnerabilities/mmio_stale_data
++ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+ /sys/devices/system/cpu/vulnerabilities/spectre_v1
+ /sys/devices/system/cpu/vulnerabilities/spectre_v2
+- /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+- /sys/devices/system/cpu/vulnerabilities/l1tf
+- /sys/devices/system/cpu/vulnerabilities/mds
+ /sys/devices/system/cpu/vulnerabilities/srbds
+ /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+- /sys/devices/system/cpu/vulnerabilities/itlb_multihit
+- /sys/devices/system/cpu/vulnerabilities/mmio_stale_data
+ Date: January 2018
+ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description: Information about CPU vulnerabilities
+--- /dev/null
++++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst
+@@ -0,0 +1,99 @@
++.. SPDX-License-Identifier: GPL-2.0
++
++GDS - Gather Data Sampling
++==========================
++
++Gather Data Sampling is a hardware vulnerability which allows unprivileged
++speculative access to data which was previously stored in vector registers.
++
++Problem
++-------
++When a gather instruction performs loads from memory, different data elements
++are merged into the destination vector register. However, when a gather
++instruction that is transiently executed encounters a fault, stale data from
++architectural or internal vector registers may get transiently forwarded to the
++destination vector register instead. This will allow a malicious attacker to
++infer stale data using typical side channel techniques like cache timing
++attacks. GDS is a purely sampling-based attack.
++
++The attacker uses gather instructions to infer the stale vector register data.
++The victim does not need to do anything special other than use the vector
++registers. The victim does not need to use gather instructions to be
++vulnerable.
++
++Because the buffers are shared between Hyper-Threads cross Hyper-Thread attacks
++are possible.
++
++Attack scenarios
++----------------
++Without mitigation, GDS can infer stale data across virtually all
++permission boundaries:
++
++ Non-enclaves can infer SGX enclave data
++ Userspace can infer kernel data
++ Guests can infer data from hosts
++ Guest can infer guest from other guests
++ Users can infer data from other users
++
++Because of this, it is important to ensure that the mitigation stays enabled in
++lower-privilege contexts like guests and when running outside SGX enclaves.
++
++The hardware enforces the mitigation for SGX. Likewise, VMMs should ensure
++that guests are not allowed to disable the GDS mitigation. If a host erred and
++allowed this, a guest could theoretically disable GDS mitigation, mount an
++attack, and re-enable it.
++
++Mitigation mechanism
++--------------------
++This issue is mitigated in microcode. The microcode defines the following new
++bits:
++
++ ================================ === ============================
++ IA32_ARCH_CAPABILITIES[GDS_CTRL] R/O Enumerates GDS vulnerability
++ and mitigation support.
++ IA32_ARCH_CAPABILITIES[GDS_NO] R/O Processor is not vulnerable.
++ IA32_MCU_OPT_CTRL[GDS_MITG_DIS] R/W Disables the mitigation
++ 0 by default.
++ IA32_MCU_OPT_CTRL[GDS_MITG_LOCK] R/W Locks GDS_MITG_DIS=0. Writes
++ to GDS_MITG_DIS are ignored
++ Can't be cleared once set.
++ ================================ === ============================
++
++GDS can also be mitigated on systems that don't have updated microcode by
++disabling AVX. This can be done by setting "clearcpuid=avx" on the kernel
++command-line.
++
++Mitigation control on the kernel command line
++---------------------------------------------
++The mitigation can be disabled by setting "gather_data_sampling=off" or
++"mitigations=off" on the kernel command line. Not specifying either will
++default to the mitigation being enabled.
++
++GDS System Information
++------------------------
++The kernel provides vulnerability status information through sysfs. For
++GDS this can be accessed by the following sysfs file:
++
++/sys/devices/system/cpu/vulnerabilities/gather_data_sampling
++
++The possible values contained in this file are:
++
++ ============================== =============================================
++ Not affected Processor not vulnerable.
++ Vulnerable Processor vulnerable and mitigation disabled.
++ Vulnerable: No microcode Processor vulnerable and microcode is missing
++ mitigation.
++ Mitigation: Microcode Processor is vulnerable and mitigation is in
++ effect.
++ Mitigation: Microcode (locked) Processor is vulnerable and mitigation is in
++ effect and cannot be disabled.
++ Unknown: Dependent on
++ hypervisor status Running on a virtual guest processor that is
++ affected but with no way to know if host
++ processor is mitigated or vulnerable.
++ ============================== =============================================
++
++GDS Default mitigation
++----------------------
++The updated microcode will enable the mitigation by default. The kernel's
++default action is to leave the mitigation enabled.
+--- a/Documentation/admin-guide/hw-vuln/index.rst
++++ b/Documentation/admin-guide/hw-vuln/index.rst
+@@ -16,3 +16,4 @@ are configurable at compile, boot or run
+ multihit.rst
+ special-register-buffer-data-sampling.rst
+ processor_mmio_stale_data.rst
++ gather_data_sampling.rst
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1336,6 +1336,20 @@
+ Format: off | on
+ default: on
+
++ gather_data_sampling=
++ [X86,INTEL] Control the Gather Data Sampling (GDS)
++ mitigation.
++
++ Gather Data Sampling is a hardware vulnerability which
++ allows unprivileged speculative access to data which was
++ previously stored in vector registers.
++
++ This issue is mitigated by default in updated microcode.
++ The mitigation may have a performance impact but can be
++ disabled.
++
++ off: Disable GDS mitigation.
++
+ gcov_persist= [GCOV] When non-zero (default), profiling data for
+ kernel modules is saved and remains accessible via
+ debugfs, even when the module is unloaded/reloaded.
+@@ -2696,21 +2710,22 @@
+ Disable all optional CPU mitigations. This
+ improves system performance, but it may also
+ expose users to several CPU vulnerabilities.
+- Equivalent to: nopti [X86,PPC]
++ Equivalent to: gather_data_sampling=off [X86]
+ kpti=0 [ARM64]
+- nospectre_v1 [X86,PPC]
++ kvm.nx_huge_pages=off [X86]
++ l1tf=off [X86]
++ mds=off [X86]
++ mmio_stale_data=off [X86]
++ no_entry_flush [PPC]
++ no_uaccess_flush [PPC]
+ nobp=0 [S390]
++ nopti [X86,PPC]
++ nospectre_v1 [X86,PPC]
+ nospectre_v2 [X86,PPC,S390,ARM64]
+- spectre_v2_user=off [X86]
+ spec_store_bypass_disable=off [X86,PPC]
++ spectre_v2_user=off [X86]
+ ssbd=force-off [ARM64]
+- l1tf=off [X86]
+- mds=off [X86]
+ tsx_async_abort=off [X86]
+- kvm.nx_huge_pages=off [X86]
+- no_entry_flush [PPC]
+- no_uaccess_flush [PPC]
+- mmio_stale_data=off [X86]
+
+ Exceptions:
+ This does not have any effect on
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -415,5 +415,6 @@
+ #define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
+ #define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
+ #define X86_BUG_MMIO_UNKNOWN X86_BUG(28) /* CPU is too old and its MMIO Stale Data status is unknown */
++#define X86_BUG_GDS X86_BUG(29) /* CPU is affected by Gather Data Sampling */
+
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -147,6 +147,15 @@
+ * Not susceptible to Post-Barrier
+ * Return Stack Buffer Predictions.
+ */
++#define ARCH_CAP_GDS_CTRL BIT(25) /*
++ * CPU is vulnerable to Gather
++ * Data Sampling (GDS) and
++ * has controls for mitigation.
++ */
++#define ARCH_CAP_GDS_NO BIT(26) /*
++ * CPU is not vulnerable to Gather
++ * Data Sampling (GDS).
++ */
+
+ #define MSR_IA32_FLUSH_CMD 0x0000010b
+ #define L1D_FLUSH BIT(0) /*
+@@ -165,6 +174,8 @@
+ #define MSR_IA32_MCU_OPT_CTRL 0x00000123
+ #define RNGDS_MITG_DIS BIT(0)
+ #define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */
++#define GDS_MITG_DIS BIT(4) /* Disable GDS mitigation */
++#define GDS_MITG_LOCKED BIT(5) /* GDS mitigation locked */
+
+ #define MSR_IA32_SYSENTER_CS 0x00000174
+ #define MSR_IA32_SYSENTER_ESP 0x00000175
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -44,6 +44,7 @@ static void __init md_clear_select_mitig
+ static void __init taa_select_mitigation(void);
+ static void __init mmio_select_mitigation(void);
+ static void __init srbds_select_mitigation(void);
++static void __init gds_select_mitigation(void);
+
+ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
+ u64 x86_spec_ctrl_base;
+@@ -149,6 +150,7 @@ void __init cpu_select_mitigations(void)
+ l1tf_select_mitigation();
+ md_clear_select_mitigation();
+ srbds_select_mitigation();
++ gds_select_mitigation();
+ }
+
+ /*
+@@ -600,6 +602,120 @@ static int __init srbds_parse_cmdline(ch
+ early_param("srbds", srbds_parse_cmdline);
+
+ #undef pr_fmt
++#define pr_fmt(fmt) "GDS: " fmt
++
++enum gds_mitigations {
++ GDS_MITIGATION_OFF,
++ GDS_MITIGATION_UCODE_NEEDED,
++ GDS_MITIGATION_FULL,
++ GDS_MITIGATION_FULL_LOCKED,
++ GDS_MITIGATION_HYPERVISOR,
++};
++
++static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL;
++
++static const char * const gds_strings[] = {
++ [GDS_MITIGATION_OFF] = "Vulnerable",
++ [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
++ [GDS_MITIGATION_FULL] = "Mitigation: Microcode",
++ [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)",
++ [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
++};
++
++void update_gds_msr(void)
++{
++ u64 mcu_ctrl_after;
++ u64 mcu_ctrl;
++
++ switch (gds_mitigation) {
++ case GDS_MITIGATION_OFF:
++ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
++ mcu_ctrl |= GDS_MITG_DIS;
++ break;
++ case GDS_MITIGATION_FULL_LOCKED:
++ /*
++ * The LOCKED state comes from the boot CPU. APs might not have
++ * the same state. Make sure the mitigation is enabled on all
++ * CPUs.
++ */
++ case GDS_MITIGATION_FULL:
++ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
++ mcu_ctrl &= ~GDS_MITG_DIS;
++ break;
++ case GDS_MITIGATION_UCODE_NEEDED:
++ case GDS_MITIGATION_HYPERVISOR:
++ return;
++ };
++
++ wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
++
++ /*
++ * Check to make sure that the WRMSR value was not ignored. Writes to
++ * GDS_MITG_DIS will be ignored if this processor is locked but the boot
++ * processor was not.
++ */
++ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
++ WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
++}
++
++static void __init gds_select_mitigation(void)
++{
++ u64 mcu_ctrl;
++
++ if (!boot_cpu_has_bug(X86_BUG_GDS))
++ return;
++
++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
++ gds_mitigation = GDS_MITIGATION_HYPERVISOR;
++ goto out;
++ }
++
++ if (cpu_mitigations_off())
++ gds_mitigation = GDS_MITIGATION_OFF;
++ /* Will verify below that mitigation _can_ be disabled */
++
++ /* No microcode */
++ if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
++ gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
++ goto out;
++ }
++
++ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
++ if (mcu_ctrl & GDS_MITG_LOCKED) {
++ if (gds_mitigation == GDS_MITIGATION_OFF)
++ pr_warn("Mitigation locked. Disable failed.\n");
++
++ /*
++ * The mitigation is selected from the boot CPU. All other CPUs
++ * _should_ have the same state. If the boot CPU isn't locked
++ * but others are then update_gds_msr() will WARN() of the state
++ * mismatch. If the boot CPU is locked update_gds_msr() will
++ * ensure the other CPUs have the mitigation enabled.
++ */
++ gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
++ }
++
++ update_gds_msr();
++out:
++ pr_info("%s\n", gds_strings[gds_mitigation]);
++}
++
++static int __init gds_parse_cmdline(char *str)
++{
++ if (!str)
++ return -EINVAL;
++
++ if (!boot_cpu_has_bug(X86_BUG_GDS))
++ return 0;
++
++ if (!strcmp(str, "off"))
++ gds_mitigation = GDS_MITIGATION_OFF;
++
++ return 0;
++}
++early_param("gather_data_sampling", gds_parse_cmdline);
++
++#undef pr_fmt
+ #define pr_fmt(fmt) "Spectre V1 : " fmt
+
+ enum spectre_v1_mitigation {
+@@ -2158,6 +2274,11 @@ static ssize_t retbleed_show_state(char
+ return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
+ }
+
++static ssize_t gds_show_state(char *buf)
++{
++ return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
++}
++
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ char *buf, unsigned int bug)
+ {
+@@ -2207,6 +2328,9 @@ static ssize_t cpu_show_common(struct de
+ case X86_BUG_RETBLEED:
+ return retbleed_show_state(buf);
+
++ case X86_BUG_GDS:
++ return gds_show_state(buf);
++
+ default:
+ break;
+ }
+@@ -2271,4 +2395,9 @@ ssize_t cpu_show_retbleed(struct device
+ {
+ return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
+ }
++
++ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
++}
+ #endif
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1129,6 +1129,8 @@ static const __initconst struct x86_cpu_
+ #define MMIO_SBDS BIT(2)
+ /* CPU is affected by RETbleed, speculating where you would not expect it */
+ #define RETBLEED BIT(3)
++/* CPU is affected by GDS */
++#define GDS BIT(4)
+
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
+@@ -1141,19 +1143,21 @@ static const struct x86_cpu_id cpu_vuln_
+ VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
+ VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
+- VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED),
++ VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
+- VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
+- VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
++ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS),
++ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
+- VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
+- VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO),
+- VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO),
+- VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
++ VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
++ VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
++ VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS),
++ VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
+- VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
++ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
++ VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS),
++ VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS),
+ VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
+- VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED),
++ VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
+ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO),
+ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
+@@ -1279,6 +1283,16 @@ static void __init cpu_set_bug_bits(stru
+ !(ia32_cap & ARCH_CAP_PBRSB_NO))
+ setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
+
++ /*
++ * Check if CPU is vulnerable to GDS. If running in a virtual machine on
++ * an affected processor, the VMM may have disabled the use of GATHER by
++ * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
++ * which means that AVX will be disabled.
++ */
++ if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
++ boot_cpu_has(X86_FEATURE_AVX))
++ setup_force_cpu_bug(X86_BUG_GDS);
++
+ if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+ return;
+
+@@ -1755,6 +1769,8 @@ void identify_secondary_cpu(struct cpuin
+ validate_apic_and_package_id(c);
+ x86_spec_ctrl_setup_ap();
+ update_srbds_msr();
++ if (boot_cpu_has_bug(X86_BUG_GDS))
++ update_gds_msr();
+ }
+
+ static __init int setup_noclflush(char *arg)
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -80,6 +80,7 @@ void cpu_select_mitigations(void);
+
+ extern void x86_spec_ctrl_setup_ap(void);
+ extern void update_srbds_msr(void);
++extern void update_gds_msr(void);
+
+ extern u64 x86_read_arch_cap_msr(void);
+
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -581,6 +581,12 @@ ssize_t __weak cpu_show_retbleed(struct
+ return sysfs_emit(buf, "Not affected\n");
+ }
+
++ssize_t __weak cpu_show_gds(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sysfs_emit(buf, "Not affected\n");
++}
++
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+ static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+@@ -592,6 +598,7 @@ static DEVICE_ATTR(itlb_multihit, 0444,
+ static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
+ static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
+ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
++static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
+
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+@@ -605,6 +612,7 @@ static struct attribute *cpu_root_vulner
+ &dev_attr_srbds.attr,
+ &dev_attr_mmio_stale_data.attr,
+ &dev_attr_retbleed.attr,
++ &dev_attr_gather_data_sampling.attr,
+ NULL
+ };
+
--- /dev/null
+From dc9710d3e8c3a26fbd764f4bd733814c9464bf31 Mon Sep 17 00:00:00 2001
+From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Date: Wed, 12 Jul 2023 19:43:13 -0700
+Subject: x86/speculation: Add Kconfig option for GDS
+
+From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+
+commit 53cf5797f114ba2bd86d23a862302119848eff19 upstream
+
+Gather Data Sampling (GDS) is mitigated in microcode. However, on
+systems that haven't received the updated microcode, disabling AVX
+can act as a mitigation. Add a Kconfig option that uses the microcode
+mitigation if available and disables AVX otherwise. Setting this
+option has no effect on systems not affected by GDS. This is the
+equivalent of setting gather_data_sampling=force.
+
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/Kconfig | 19 +++++++++++++++++++
+ arch/x86/kernel/cpu/bugs.c | 4 ++++
+ 2 files changed, 23 insertions(+)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2501,6 +2501,25 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
+ def_bool y
+ depends on X86_64 || X86_PAE
+
++config GDS_FORCE_MITIGATION
++ bool "Force GDS Mitigation"
++ depends on CPU_SUP_INTEL
++ default n
++ help
++ Gather Data Sampling (GDS) is a hardware vulnerability which allows
++ unprivileged speculative access to data which was previously stored in
++ vector registers.
++
++ This option is equivalent to setting gather_data_sampling=force on the
++ command line. The microcode mitigation is used if present, otherwise
++ AVX is disabled as a mitigation. On affected systems that are missing
++ the microcode any userspace code that unconditionally uses AVX will
++ break with this option set.
++
++ Setting this option on systems not vulnerable to GDS has no effect.
++
++ If in doubt, say N.
++
+ config ARCH_ENABLE_HUGEPAGE_MIGRATION
+ def_bool y
+ depends on X86_64 && HUGETLB_PAGE && MIGRATION
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -613,7 +613,11 @@ enum gds_mitigations {
+ GDS_MITIGATION_HYPERVISOR,
+ };
+
++#if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION)
++static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE;
++#else
+ static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL;
++#endif
+
+ static const char * const gds_strings[] = {
+ [GDS_MITIGATION_OFF] = "Vulnerable",
--- /dev/null
+From fe3e0a13e597c1c8617814bf9b42ab732db5c26e Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Mon, 3 Jul 2023 15:00:32 +0200
+Subject: x86/xen: Fix secondary processors' FPU initialization
+
+From: Juergen Gross <jgross@suse.com>
+
+commit fe3e0a13e597c1c8617814bf9b42ab732db5c26e upstream.
+
+Moving the call of fpu__init_cpu() from cpu_init() to start_secondary()
+broke Xen PV guests, as those don't call start_secondary() for APs.
+
+Call fpu__init_cpu() in Xen's cpu_bringup(), which is the Xen PV
+replacement of start_secondary().
+
+Fixes: b81fac906a8f ("x86/fpu: Move FPU initialization into arch_cpu_finalize_init()")
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20230703130032.22916-1-jgross@suse.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/xen/smp_pv.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/xen/smp_pv.c
++++ b/arch/x86/xen/smp_pv.c
+@@ -28,6 +28,7 @@
+ #include <asm/desc.h>
+ #include <asm/pgtable.h>
+ #include <asm/cpu.h>
++#include <asm/fpu/internal.h>
+
+ #include <xen/interface/xen.h>
+ #include <xen/interface/vcpu.h>
+@@ -61,6 +62,7 @@ static void cpu_bringup(void)
+
+ cr4_init();
+ cpu_init();
++ fpu__init_cpu();
+ touch_softlockup_watchdog();
+ preempt_disable();
+
--- /dev/null
+From 534fc31d09b706a16d83533e16b5dc855caf7576 Mon Sep 17 00:00:00 2001
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+Date: Thu, 3 Aug 2023 08:41:22 +0200
+Subject: xen/netback: Fix buffer overrun triggered by unusual packet
+
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+
+commit 534fc31d09b706a16d83533e16b5dc855caf7576 upstream.
+
+It is possible that a guest can send a packet that contains a head + 18
+slots and yet has a len <= XEN_NETBACK_TX_COPY_LEN. This causes nr_slots
+to underflow in xenvif_get_requests() which then causes the subsequent
+loop's termination condition to be wrong, causing a buffer overrun of
+queue->tx_map_ops.
+
+Rework the code to account for the extra frag_overflow slots.
+
+This is CVE-2023-34319 / XSA-432.
+
+Fixes: ad7f402ae4f4 ("xen/netback: Ensure protocol headers don't fall in the non-linear area")
+Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
+Reviewed-by: Paul Durrant <paul@xen.org>
+Reviewed-by: Wei Liu <wei.liu@kernel.org>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/netback.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -389,7 +389,7 @@ static void xenvif_get_requests(struct x
+ struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
+ struct xen_netif_tx_request *txp = first;
+
+- nr_slots = shinfo->nr_frags + 1;
++ nr_slots = shinfo->nr_frags + frag_overflow + 1;
+
+ copy_count(skb) = 0;
+ XENVIF_TX_CB(skb)->split_mask = 0;
+@@ -455,8 +455,8 @@ static void xenvif_get_requests(struct x
+ }
+ }
+
+- for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
+- shinfo->nr_frags++, gop++) {
++ for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
++ shinfo->nr_frags++, gop++, nr_slots--) {
+ index = pending_index(queue->pending_cons++);
+ pending_idx = queue->pending_ring[index];
+ xenvif_tx_create_map_op(queue, pending_idx, txp,
+@@ -469,12 +469,12 @@ static void xenvif_get_requests(struct x
+ txp++;
+ }
+
+- if (frag_overflow) {
++ if (nr_slots > 0) {
+
+ shinfo = skb_shinfo(nskb);
+ frags = shinfo->frags;
+
+- for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
++ for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
+ shinfo->nr_frags++, txp++, gop++) {
+ index = pending_index(queue->pending_cons++);
+ pending_idx = queue->pending_ring[index];
+@@ -485,6 +485,11 @@ static void xenvif_get_requests(struct x
+ }
+
+ skb_shinfo(skb)->frag_list = nskb;
++ } else if (nskb) {
++ /* A frag_list skb was allocated but it is no longer needed
++ * because enough slots were converted to copy ops above.
++ */
++ kfree_skb(nskb);
+ }
+
+ (*copy_ops) = cop - queue->tx_copy_ops;