--- /dev/null
+From 250552b925ce400c17d166422fde9bb215958481 Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Mon, 29 Nov 2021 10:47:01 +0100
+Subject: KVM: nVMX: Don't use Enlightened MSR Bitmap for L3
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+commit 250552b925ce400c17d166422fde9bb215958481 upstream.
+
+When KVM runs as a nested hypervisor on top of Hyper-V it uses Enlightened
+VMCS and enables Enlightened MSR Bitmap feature for its L1s and L2s (which
+are actually L2s and L3s from Hyper-V's perspective). When MSR bitmap is
+updated, KVM has to reset HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP from
+clean fields to make Hyper-V aware of the change. For KVM's L1s, this is
+done in vmx_disable_intercept_for_msr()/vmx_enable_intercept_for_msr().
+MSR bitmap for L2 is build in nested_vmx_prepare_msr_bitmap() by blending
+MSR bitmap for L1 and L1's idea of MSR bitmap for L2. KVM, however, doesn't
+check if the resulting bitmap is different and never cleans
+HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP in eVMCS02. This is incorrect and
+may result in Hyper-V missing the update.
+
+The issue could've been solved by calling evmcs_touch_msr_bitmap() for
+eVMCS02 from nested_vmx_prepare_msr_bitmap() unconditionally but doing so
+would not give any performance benefits (compared to not using Enlightened
+MSR Bitmap at all). 3-level nesting is also not a very common setup
+nowadays.
+
+Don't enable 'Enlightened MSR Bitmap' feature for KVM's L2s (real L3s) for
+now.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Message-Id: <20211129094704.326635-2-vkuznets@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Alexandru Matei <alexandru.matei@uipath.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/vmx.c | 22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -2725,15 +2725,6 @@ int alloc_loaded_vmcs(struct loaded_vmcs
+ if (!loaded_vmcs->msr_bitmap)
+ goto out_vmcs;
+ memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
+-
+- if (IS_ENABLED(CONFIG_HYPERV) &&
+- static_branch_unlikely(&enable_evmcs) &&
+- (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
+- struct hv_enlightened_vmcs *evmcs =
+- (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
+-
+- evmcs->hv_enlightenments_control.msr_bitmap = 1;
+- }
+ }
+
+ memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
+@@ -7029,6 +7020,19 @@ static int vmx_create_vcpu(struct kvm_vc
+ if (err < 0)
+ goto free_pml;
+
++ /*
++ * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a
++ * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the
++ * feature only for vmcs01, KVM currently isn't equipped to realize any
++ * performance benefits from enabling it for vmcs02.
++ */
++ if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) &&
++ (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
++ struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
++
++ evmcs->hv_enlightenments_control.msr_bitmap = 1;
++ }
++
+ /* The MSR bitmap starts with all ones */
+ bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
+ bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
--- /dev/null
+From 93827a0a36396f2fd6368a54a020f420c8916e9b Mon Sep 17 00:00:00 2001
+From: Alexandru Matei <alexandru.matei@uipath.com>
+Date: Tue, 24 Jan 2023 00:12:08 +0200
+Subject: KVM: VMX: Fix crash due to uninitialized current_vmcs
+
+From: Alexandru Matei <alexandru.matei@uipath.com>
+
+commit 93827a0a36396f2fd6368a54a020f420c8916e9b upstream.
+
+KVM enables 'Enlightened VMCS' and 'Enlightened MSR Bitmap' when running as
+a nested hypervisor on top of Hyper-V. When MSR bitmap is updated,
+evmcs_touch_msr_bitmap function uses current_vmcs per-cpu variable to mark
+that the msr bitmap was changed.
+
+vmx_vcpu_create() modifies the msr bitmap via vmx_disable_intercept_for_msr
+-> vmx_msr_bitmap_l01_changed which in the end calls this function. The
+function checks for current_vmcs if it is null but the check is
+insufficient because current_vmcs is not initialized. Because of this, the
+code might incorrectly write to the structure pointed by current_vmcs value
+left by another task. Preemption is not disabled, the current task can be
+preempted and moved to another CPU while current_vmcs is accessed multiple
+times from evmcs_touch_msr_bitmap() which leads to crash.
+
+The manipulation of MSR bitmaps by callers happens only for vmcs01 so the
+solution is to use vmx->vmcs01.vmcs instead of current_vmcs.
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000338
+ PGD 4e1775067 P4D 0
+ Oops: 0002 [#1] PREEMPT SMP NOPTI
+ ...
+ RIP: 0010:vmx_msr_bitmap_l01_changed+0x39/0x50 [kvm_intel]
+ ...
+ Call Trace:
+ vmx_disable_intercept_for_msr+0x36/0x260 [kvm_intel]
+ vmx_vcpu_create+0xe6/0x540 [kvm_intel]
+ kvm_arch_vcpu_create+0x1d1/0x2e0 [kvm]
+ kvm_vm_ioctl_create_vcpu+0x178/0x430 [kvm]
+ kvm_vm_ioctl+0x53f/0x790 [kvm]
+ __x64_sys_ioctl+0x8a/0xc0
+ do_syscall_64+0x5c/0x90
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Fixes: ceef7d10dfb6 ("KVM: x86: VMX: hyper-v: Enlightened MSR-Bitmap support")
+Cc: stable@vger.kernel.org
+Suggested-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Alexandru Matei <alexandru.matei@uipath.com>
+Link: https://lore.kernel.org/r/20230123221208.4964-1-alexandru.matei@uipath.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+[manual backport: evmcs.h got renamed to hyperv.h in a later
+version, modified in evmcs.h instead]
+Signed-off-by: Alexandru Matei <alexandru.matei@uipath.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/evmcs.h | 11 -----------
+ arch/x86/kvm/vmx/vmx.c | 9 +++++++--
+ 2 files changed, 7 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kvm/vmx/evmcs.h
++++ b/arch/x86/kvm/vmx/evmcs.h
+@@ -166,16 +166,6 @@ static inline u16 evmcs_read16(unsigned
+ return *(u16 *)((char *)current_evmcs + offset);
+ }
+
+-static inline void evmcs_touch_msr_bitmap(void)
+-{
+- if (unlikely(!current_evmcs))
+- return;
+-
+- if (current_evmcs->hv_enlightenments_control.msr_bitmap)
+- current_evmcs->hv_clean_fields &=
+- ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
+-}
+-
+ static inline void evmcs_load(u64 phys_addr)
+ {
+ struct hv_vp_assist_page *vp_ap =
+@@ -196,7 +186,6 @@ static inline u64 evmcs_read64(unsigned
+ static inline u32 evmcs_read32(unsigned long field) { return 0; }
+ static inline u16 evmcs_read16(unsigned long field) { return 0; }
+ static inline void evmcs_load(u64 phys_addr) {}
+-static inline void evmcs_touch_msr_bitmap(void) {}
+ #endif /* IS_ENABLED(CONFIG_HYPERV) */
+
+ enum nested_evmptrld_status {
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -3792,8 +3792,13 @@ static void vmx_msr_bitmap_l01_changed(s
+ * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR
+ * bitmap has changed.
+ */
+- if (static_branch_unlikely(&enable_evmcs))
+- evmcs_touch_msr_bitmap();
++ if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs)) {
++ struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
++
++ if (evmcs->hv_enlightenments_control.msr_bitmap)
++ evmcs->hv_clean_fields &=
++ ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
++ }
+ }
+
+ static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
--- /dev/null
+From b84155c38076b36d625043a06a2f1c90bde62903 Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Mon, 29 Nov 2021 10:47:02 +0100
+Subject: KVM: VMX: Introduce vmx_msr_bitmap_l01_changed() helper
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+commit b84155c38076b36d625043a06a2f1c90bde62903 upstream.
+
+In preparation to enabling 'Enlightened MSR Bitmap' feature for Hyper-V
+guests move MSR bitmap update tracking to a dedicated helper.
+
+Note: vmx_msr_bitmap_l01_changed() is called when MSR bitmap might be
+updated. KVM doesn't check if the bit we're trying to set is already set
+(or the bit it's trying to clear is already cleared). Such situations
+should not be common and a few false positives should not be a problem.
+
+No functional change intended.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20211129094704.326635-3-vkuznets@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/vmx.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -3785,6 +3785,17 @@ static void vmx_set_msr_bitmap_write(ulo
+ __set_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
+ }
+
++static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
++{
++ /*
++ * When KVM is a nested hypervisor on top of Hyper-V and uses
++ * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR
++ * bitmap has changed.
++ */
++ if (static_branch_unlikely(&enable_evmcs))
++ evmcs_touch_msr_bitmap();
++}
++
+ static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
+ u32 msr, int type)
+ {
+@@ -3794,8 +3805,7 @@ static __always_inline void vmx_disable_
+ if (!cpu_has_vmx_msr_bitmap())
+ return;
+
+- if (static_branch_unlikely(&enable_evmcs))
+- evmcs_touch_msr_bitmap();
++ vmx_msr_bitmap_l01_changed(vmx);
+
+ /*
+ * Mark the desired intercept state in shadow bitmap, this is needed
+@@ -3840,8 +3850,7 @@ static __always_inline void vmx_enable_i
+ if (!cpu_has_vmx_msr_bitmap())
+ return;
+
+- if (static_branch_unlikely(&enable_evmcs))
+- evmcs_touch_msr_bitmap();
++ vmx_msr_bitmap_l01_changed(vmx);
+
+ /*
+ * Mark the desired intercept state in shadow bitmap, this is needed
s390-define-runtime_discard_exit-to-fix-link-error-with-gnu-ld-2.36.patch
sh-define-runtime_discard_exit.patch
uml-define-runtime_discard_exit.patch
+kvm-nvmx-don-t-use-enlightened-msr-bitmap-for-l3.patch
+kvm-vmx-introduce-vmx_msr_bitmap_l01_changed-helper.patch
+kvm-vmx-fix-crash-due-to-uninitialized-current_vmcs.patch