kvm-x86-nsvm-harden-svm_free_nested-against-freeing-vmcb02-while-still-in-use.patch
kvm-x86-add-kvm_leave_nested.patch
kvm-x86-remove-exit_int_info-warning-in-svm_handle_exit.patch
+x86-tsx-add-a-feature-bit-for-tsx-control-msr-support.patch
+x86-pm-add-enumeration-check-before-spec-msrs-save-restore-setup.patch
+x86-ioremap-fix-page-aligned-size-calculation-in-__ioremap_caller.patch
--- /dev/null
+From 4dbd6a3e90e03130973688fd79e19425f720d999 Mon Sep 17 00:00:00 2001
+From: Michael Kelley <mikelley@microsoft.com>
+Date: Wed, 16 Nov 2022 10:41:24 -0800
+Subject: x86/ioremap: Fix page aligned size calculation in __ioremap_caller()
+
+From: Michael Kelley <mikelley@microsoft.com>
+
+commit 4dbd6a3e90e03130973688fd79e19425f720d999 upstream.
+
+Current code re-calculates the size after aligning the starting and
+ending physical addresses on a page boundary. But the re-calculation
+also embeds the masking of high order bits that exceed the size of
+the physical address space (via PHYSICAL_PAGE_MASK). If the masking
+removes any high order bits, the size calculation results in a huge
+value that is likely to immediately fail.
+
+Fix this by re-calculating the page-aligned size first. Then mask any
+high order bits using PHYSICAL_PAGE_MASK.
+
+Fixes: ffa71f33a820 ("x86, ioremap: Fix incorrect physical address handling in PAE mode")
+Signed-off-by: Michael Kelley <mikelley@microsoft.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/1668624097-14884-2-git-send-email-mikelley@microsoft.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/ioremap.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -216,9 +216,15 @@ __ioremap_caller(resource_size_t phys_ad
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+- phys_addr &= PHYSICAL_PAGE_MASK;
++ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr+1) - phys_addr;
+
++ /*
++ * Mask out any bits not part of the actual physical
++ * address, like memory encryption bits.
++ */
++ phys_addr &= PHYSICAL_PAGE_MASK;
++
+ retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
+ pcm, &new_pcm);
+ if (retval) {
--- /dev/null
+From 50bcceb7724e471d9b591803889df45dcbb584bc Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Tue, 15 Nov 2022 11:17:06 -0800
+Subject: x86/pm: Add enumeration check before spec MSRs save/restore setup
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit 50bcceb7724e471d9b591803889df45dcbb584bc upstream.
+
+pm_save_spec_msr() keeps a list of all the MSRs which _might_ need
+to be saved and restored at hibernate and resume. However, it has
+zero awareness of CPU support for these MSRs. It mostly works by
+unconditionally attempting to manipulate these MSRs and relying on
+rdmsrl_safe() being able to handle a #GP on CPUs where the support is
+unavailable.
+
+However, it's possible for reads (RDMSR) to be supported for a given MSR
+while writes (WRMSR) are not. In this case, msr_build_context() sees
+a successful read (RDMSR) and marks the MSR as valid. Then, later, a
+write (WRMSR) fails, producing a nasty (but harmless) error message.
+This causes restore_processor_state() to try and restore it, but writing
+this MSR is not allowed on the Intel Atom N2600 leading to:
+
+ unchecked MSR access error: WRMSR to 0x122 (tried to write 0x0000000000000002) \
+ at rIP: 0xffffffff8b07a574 (native_write_msr+0x4/0x20)
+ Call Trace:
+ <TASK>
+ restore_processor_state
+ x86_acpi_suspend_lowlevel
+ acpi_suspend_enter
+ suspend_devices_and_enter
+ pm_suspend.cold
+ state_store
+ kernfs_fop_write_iter
+ vfs_write
+ ksys_write
+ do_syscall_64
+ ? do_syscall_64
+ ? up_read
+ ? lock_is_held_type
+ ? asm_exc_page_fault
+ ? lockdep_hardirqs_on
+ entry_SYSCALL_64_after_hwframe
+
+To fix this, add the corresponding X86_FEATURE bit for each MSR. Avoid
+trying to manipulate the MSR when the feature bit is clear. This
+required adding a X86_FEATURE bit for MSRs that do not have one already,
+but it's a small price to pay.
+
+ [ bp: Move struct msr_enumeration inside the only function that uses it. ]
+
+Fixes: 73924ec4d560 ("x86/pm: Save the MSR validity status at context setup")
+Reported-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/c24db75d69df6e66c0465e13676ad3f2837a2ed8.1668539735.git.pawan.kumar.gupta@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/power/cpu.c | 23 +++++++++++++++--------
+ 1 file changed, 15 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -513,16 +513,23 @@ static int pm_cpu_check(const struct x86
+
+ static void pm_save_spec_msr(void)
+ {
+- u32 spec_msr_id[] = {
+- MSR_IA32_SPEC_CTRL,
+- MSR_IA32_TSX_CTRL,
+- MSR_TSX_FORCE_ABORT,
+- MSR_IA32_MCU_OPT_CTRL,
+- MSR_AMD64_LS_CFG,
+- MSR_AMD64_DE_CFG,
++ struct msr_enumeration {
++ u32 msr_no;
++ u32 feature;
++ } msr_enum[] = {
++ { MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL },
++ { MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL },
++ { MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT },
++ { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL },
++ { MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD },
++ { MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC },
+ };
++ int i;
+
+- msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
++ for (i = 0; i < ARRAY_SIZE(msr_enum); i++) {
++ if (boot_cpu_has(msr_enum[i].feature))
++ msr_build_context(&msr_enum[i].msr_no, 1);
++ }
+ }
+
+ static int pm_check_save_msr(void)
--- /dev/null
+From aaa65d17eec372c6a9756833f3964ba05b05ea14 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Tue, 15 Nov 2022 11:17:05 -0800
+Subject: x86/tsx: Add a feature bit for TSX control MSR support
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit aaa65d17eec372c6a9756833f3964ba05b05ea14 upstream.
+
+Support for the TSX control MSR is enumerated in MSR_IA32_ARCH_CAPABILITIES.
+This is different from how other CPU features are enumerated i.e. via
+CPUID. Currently, a call to tsx_ctrl_is_supported() is required for
+enumerating the feature. In the absence of a feature bit for TSX control,
+any code that relies on checking feature bits directly will not work.
+
+In preparation for adding a feature bit check in MSR save/restore
+during suspend/resume, set a new feature bit X86_FEATURE_TSX_CTRL when
+MSR_IA32_TSX_CTRL is present. Also make tsx_ctrl_is_supported() use the
+new feature bit to avoid any overhead of reading the MSR.
+
+ [ bp: Remove tsx_ctrl_is_supported(), add room for two more feature
+ bits in word 11 which are coming up in the next merge window. ]
+
+Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/de619764e1d98afbb7a5fa58424f1278ede37b45.1668539735.git.pawan.kumar.gupta@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 3 ++
+ arch/x86/kernel/cpu/tsx.c | 38 ++++++++++++++++---------------------
+ 2 files changed, 20 insertions(+), 21 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -303,6 +303,9 @@
+ #define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
+ #define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
+
++
++#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
++
+ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
+ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
+ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
+--- a/arch/x86/kernel/cpu/tsx.c
++++ b/arch/x86/kernel/cpu/tsx.c
+@@ -58,24 +58,6 @@ static void tsx_enable(void)
+ wrmsrl(MSR_IA32_TSX_CTRL, tsx);
+ }
+
+-static bool tsx_ctrl_is_supported(void)
+-{
+- u64 ia32_cap = x86_read_arch_cap_msr();
+-
+- /*
+- * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
+- * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
+- *
+- * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
+- * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
+- * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
+- * MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
+- * tsx= cmdline requests will do nothing on CPUs without
+- * MSR_IA32_TSX_CTRL support.
+- */
+- return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR);
+-}
+-
+ static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
+ {
+ if (boot_cpu_has_bug(X86_BUG_TAA))
+@@ -135,7 +117,7 @@ static void tsx_clear_cpuid(void)
+ rdmsrl(MSR_TSX_FORCE_ABORT, msr);
+ msr |= MSR_TFA_TSX_CPUID_CLEAR;
+ wrmsrl(MSR_TSX_FORCE_ABORT, msr);
+- } else if (tsx_ctrl_is_supported()) {
++ } else if (cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL)) {
+ rdmsrl(MSR_IA32_TSX_CTRL, msr);
+ msr |= TSX_CTRL_CPUID_CLEAR;
+ wrmsrl(MSR_IA32_TSX_CTRL, msr);
+@@ -158,7 +140,8 @@ static void tsx_dev_mode_disable(void)
+ u64 mcu_opt_ctrl;
+
+ /* Check if RTM_ALLOW exists */
+- if (!boot_cpu_has_bug(X86_BUG_TAA) || !tsx_ctrl_is_supported() ||
++ if (!boot_cpu_has_bug(X86_BUG_TAA) ||
++ !cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL) ||
+ !cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL))
+ return;
+
+@@ -191,7 +174,20 @@ void __init tsx_init(void)
+ return;
+ }
+
+- if (!tsx_ctrl_is_supported()) {
++ /*
++ * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
++ * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
++ *
++ * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
++ * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
++ * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
++ * MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
++ * tsx= cmdline requests will do nothing on CPUs without
++ * MSR_IA32_TSX_CTRL support.
++ */
++ if (x86_read_arch_cap_msr() & ARCH_CAP_TSX_CTRL_MSR) {
++ setup_force_cpu_cap(X86_FEATURE_MSR_TSX_CTRL);
++ } else {
+ tsx_ctrl_state = TSX_CTRL_NOT_SUPPORTED;
+ return;
+ }