]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 12 May 2021 10:51:22 +0000 (12:51 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 12 May 2021 10:51:22 +0000 (12:51 +0200)
added patches:
kvm-arm-arm64-fix-kvm_vgic_v3_addr_type_redist-read.patch
kvm-arm64-fix-kvm_vgic_v3_addr_type_redist_region-read.patch
kvm-arm64-fully-zero-the-vcpu-state-on-reset.patch
kvm-destroy-i-o-bus-devices-on-unregister-failure-_after_-sync-ing-srcu.patch
kvm-nsvm-set-the-shadow-root-level-to-the-tdp-level-for-nested-npt.patch
kvm-nvmx-defer-the-mmu-reload-to-the-normal-path-on-an-eptp-switch.patch
kvm-nvmx-truncate-base-index-gpr-value-on-address-calc-in-64-bit.patch
kvm-nvmx-truncate-bits-63-32-of-vmcs-field-on-nested-check-in-64-bit.patch
kvm-stop-looking-for-coalesced-mmio-zones-if-the-bus-is-destroyed.patch
kvm-svm-do-not-allow-sev-sev-es-initialization-after-vcpus-are-created.patch
kvm-svm-don-t-strip-the-c-bit-from-cr2-on-pf-interception.patch
kvm-svm-inject-gp-on-guest-msr_tsc_aux-accesses-if-rdtscp-unsupported.patch
kvm-x86-mmu-alloc-page-for-pdptes-when-shadowing-32-bit-npt-with-64-bit.patch
kvm-x86-remove-emulator-s-broken-checks-on-cr0-cr3-cr4-loads.patch

15 files changed:
queue-5.10/kvm-arm-arm64-fix-kvm_vgic_v3_addr_type_redist-read.patch [new file with mode: 0644]
queue-5.10/kvm-arm64-fix-kvm_vgic_v3_addr_type_redist_region-read.patch [new file with mode: 0644]
queue-5.10/kvm-arm64-fully-zero-the-vcpu-state-on-reset.patch [new file with mode: 0644]
queue-5.10/kvm-destroy-i-o-bus-devices-on-unregister-failure-_after_-sync-ing-srcu.patch [new file with mode: 0644]
queue-5.10/kvm-nsvm-set-the-shadow-root-level-to-the-tdp-level-for-nested-npt.patch [new file with mode: 0644]
queue-5.10/kvm-nvmx-defer-the-mmu-reload-to-the-normal-path-on-an-eptp-switch.patch [new file with mode: 0644]
queue-5.10/kvm-nvmx-truncate-base-index-gpr-value-on-address-calc-in-64-bit.patch [new file with mode: 0644]
queue-5.10/kvm-nvmx-truncate-bits-63-32-of-vmcs-field-on-nested-check-in-64-bit.patch [new file with mode: 0644]
queue-5.10/kvm-stop-looking-for-coalesced-mmio-zones-if-the-bus-is-destroyed.patch [new file with mode: 0644]
queue-5.10/kvm-svm-do-not-allow-sev-sev-es-initialization-after-vcpus-are-created.patch [new file with mode: 0644]
queue-5.10/kvm-svm-don-t-strip-the-c-bit-from-cr2-on-pf-interception.patch [new file with mode: 0644]
queue-5.10/kvm-svm-inject-gp-on-guest-msr_tsc_aux-accesses-if-rdtscp-unsupported.patch [new file with mode: 0644]
queue-5.10/kvm-x86-mmu-alloc-page-for-pdptes-when-shadowing-32-bit-npt-with-64-bit.patch [new file with mode: 0644]
queue-5.10/kvm-x86-remove-emulator-s-broken-checks-on-cr0-cr3-cr4-loads.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/kvm-arm-arm64-fix-kvm_vgic_v3_addr_type_redist-read.patch b/queue-5.10/kvm-arm-arm64-fix-kvm_vgic_v3_addr_type_redist-read.patch
new file mode 100644 (file)
index 0000000..6e0d832
--- /dev/null
@@ -0,0 +1,42 @@
+From 94ac0835391efc1a30feda6fc908913ec012951e Mon Sep 17 00:00:00 2001
+From: Eric Auger <eric.auger@redhat.com>
+Date: Mon, 12 Apr 2021 17:00:34 +0200
+Subject: KVM: arm/arm64: Fix KVM_VGIC_V3_ADDR_TYPE_REDIST read
+
+From: Eric Auger <eric.auger@redhat.com>
+
+commit 94ac0835391efc1a30feda6fc908913ec012951e upstream.
+
+When reading the base address of the a REDIST region
+through KVM_VGIC_V3_ADDR_TYPE_REDIST we expect the
+redistributor region list to be populated with a single
+element.
+
+However list_first_entry() expects the list to be non empty.
+Instead we should use list_first_entry_or_null which effectively
+returns NULL if the list is empty.
+
+Fixes: dbd9733ab674 ("KVM: arm/arm64: Replace the single rdist region by a list")
+Cc: <Stable@vger.kernel.org> # v4.18+
+Signed-off-by: Eric Auger <eric.auger@redhat.com>
+Reported-by: Gavin Shan <gshan@redhat.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20210412150034.29185-1-eric.auger@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/vgic/vgic-kvm-device.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
++++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+@@ -87,8 +87,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsig
+                       r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
+                       goto out;
+               }
+-              rdreg = list_first_entry(&vgic->rd_regions,
+-                                       struct vgic_redist_region, list);
++              rdreg = list_first_entry_or_null(&vgic->rd_regions,
++                                               struct vgic_redist_region, list);
+               if (!rdreg)
+                       addr_ptr = &undef_value;
+               else
diff --git a/queue-5.10/kvm-arm64-fix-kvm_vgic_v3_addr_type_redist_region-read.patch b/queue-5.10/kvm-arm64-fix-kvm_vgic_v3_addr_type_redist_region-read.patch
new file mode 100644 (file)
index 0000000..dddcaf3
--- /dev/null
@@ -0,0 +1,39 @@
+From 53b16dd6ba5cf64ed147ac3523ec34651d553cb0 Mon Sep 17 00:00:00 2001
+From: Eric Auger <eric.auger@redhat.com>
+Date: Mon, 5 Apr 2021 18:39:34 +0200
+Subject: KVM: arm64: Fix KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION read
+
+From: Eric Auger <eric.auger@redhat.com>
+
+commit 53b16dd6ba5cf64ed147ac3523ec34651d553cb0 upstream.
+
+The doc says:
+"The characteristics of a specific redistributor region can
+ be read by presetting the index field in the attr data.
+ Only valid for KVM_DEV_TYPE_ARM_VGIC_V3"
+
+Unfortunately the existing code fails to read the input attr data.
+
+Fixes: 04c110932225 ("KVM: arm/arm64: Implement KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION")
+Cc: stable@vger.kernel.org#v4.17+
+Signed-off-by: Eric Auger <eric.auger@redhat.com>
+Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20210405163941.510258-3-eric.auger@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/vgic/vgic-kvm-device.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
++++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+@@ -226,6 +226,9 @@ static int vgic_get_common_attr(struct k
+               u64 addr;
+               unsigned long type = (unsigned long)attr->attr;
++              if (copy_from_user(&addr, uaddr, sizeof(addr)))
++                      return -EFAULT;
++
+               r = kvm_vgic_addr(dev->kvm, type, &addr, false);
+               if (r)
+                       return (r == -ENODEV) ? -ENXIO : r;
diff --git a/queue-5.10/kvm-arm64-fully-zero-the-vcpu-state-on-reset.patch b/queue-5.10/kvm-arm64-fully-zero-the-vcpu-state-on-reset.patch
new file mode 100644 (file)
index 0000000..f1d772c
--- /dev/null
@@ -0,0 +1,40 @@
+From 85d703746154cdc6794b6654b587b0b0354c97e9 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Wed, 7 Apr 2021 18:54:16 +0100
+Subject: KVM: arm64: Fully zero the vcpu state on reset
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 85d703746154cdc6794b6654b587b0b0354c97e9 upstream.
+
+On vcpu reset, we expect all the registers to be brought back
+to their initial state, which happens to be a bunch of zeroes.
+
+However, some recent commit broke this, and is now leaving a bunch
+of registers (such as the FP state) with whatever was left by the
+guest. My bad.
+
+Zero the reset of the state (32bit SPSRs and FPSIMD state).
+
+Cc: stable@vger.kernel.org
+Fixes: e47c2055c68e ("KVM: arm64: Make struct kvm_regs userspace-only")
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/reset.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -291,6 +291,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu
+       /* Reset core registers */
+       memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
++      memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
++      vcpu->arch.ctxt.spsr_abt = 0;
++      vcpu->arch.ctxt.spsr_und = 0;
++      vcpu->arch.ctxt.spsr_irq = 0;
++      vcpu->arch.ctxt.spsr_fiq = 0;
+       vcpu_gp_regs(vcpu)->pstate = pstate;
+       /* Reset system registers */
diff --git a/queue-5.10/kvm-destroy-i-o-bus-devices-on-unregister-failure-_after_-sync-ing-srcu.patch b/queue-5.10/kvm-destroy-i-o-bus-devices-on-unregister-failure-_after_-sync-ing-srcu.patch
new file mode 100644 (file)
index 0000000..834fb55
--- /dev/null
@@ -0,0 +1,51 @@
+From 2ee3757424be7c1cd1d0bbfa6db29a7edd82a250 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Mon, 12 Apr 2021 15:20:48 -0700
+Subject: KVM: Destroy I/O bus devices on unregister failure _after_ sync'ing SRCU
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 2ee3757424be7c1cd1d0bbfa6db29a7edd82a250 upstream.
+
+If allocating a new instance of an I/O bus fails when unregistering a
+device, wait to destroy the device until after all readers are guaranteed
+to see the new null bus.  Destroying devices before the bus is nullified
+could lead to use-after-free since readers expect the devices on their
+reference of the bus to remain valid.
+
+Fixes: f65886606c2d ("KVM: fix memory leak in kvm_io_bus_unregister_dev()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210412222050.876100-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/kvm_main.c |   10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -4367,7 +4367,13 @@ void kvm_io_bus_unregister_dev(struct kv
+               new_bus->dev_count--;
+               memcpy(new_bus->range + i, bus->range + i + 1,
+                               flex_array_size(new_bus, range, new_bus->dev_count - i));
+-      } else {
++      }
++
++      rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
++      synchronize_srcu_expedited(&kvm->srcu);
++
++      /* Destroy the old bus _after_ installing the (null) bus. */
++      if (!new_bus) {
+               pr_err("kvm: failed to shrink bus, removing it completely\n");
+               for (j = 0; j < bus->dev_count; j++) {
+                       if (j == i)
+@@ -4376,8 +4382,6 @@ void kvm_io_bus_unregister_dev(struct kv
+               }
+       }
+-      rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
+-      synchronize_srcu_expedited(&kvm->srcu);
+       kfree(bus);
+       return;
+ }
diff --git a/queue-5.10/kvm-nsvm-set-the-shadow-root-level-to-the-tdp-level-for-nested-npt.patch b/queue-5.10/kvm-nsvm-set-the-shadow-root-level-to-the-tdp-level-for-nested-npt.patch
new file mode 100644 (file)
index 0000000..e6e5f91
--- /dev/null
@@ -0,0 +1,46 @@
+From a3322d5cd87fef5ec0037fd1b14068a533f9a60f Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Thu, 4 Mar 2021 17:10:45 -0800
+Subject: KVM: nSVM: Set the shadow root level to the TDP level for nested NPT
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit a3322d5cd87fef5ec0037fd1b14068a533f9a60f upstream.
+
+Override the shadow root level in the MMU context when configuring
+NPT for shadowing nested NPT.  The level is always tied to the TDP level
+of the host, not whatever level the guest happens to be using.
+
+Fixes: 096586fda522 ("KVM: nSVM: Correctly set the shadow NPT root level in its MMU role")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210305011101.3597423-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/mmu.c |   11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -4617,12 +4617,17 @@ void kvm_init_shadow_npt_mmu(struct kvm_
+       struct kvm_mmu *context = &vcpu->arch.guest_mmu;
+       union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
+-      context->shadow_root_level = new_role.base.level;
+-
+       __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
+-      if (new_role.as_u64 != context->mmu_role.as_u64)
++      if (new_role.as_u64 != context->mmu_role.as_u64) {
+               shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
++
++              /*
++               * Override the level set by the common init helper, nested TDP
++               * always uses the host's TDP configuration.
++               */
++              context->shadow_root_level = new_role.base.level;
++      }
+ }
+ EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
diff --git a/queue-5.10/kvm-nvmx-defer-the-mmu-reload-to-the-normal-path-on-an-eptp-switch.patch b/queue-5.10/kvm-nvmx-defer-the-mmu-reload-to-the-normal-path-on-an-eptp-switch.patch
new file mode 100644 (file)
index 0000000..626bd90
--- /dev/null
@@ -0,0 +1,47 @@
+From c805f5d5585ab5e0cdac6b1ccf7086eb120fb7db Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Thu, 4 Mar 2021 17:10:57 -0800
+Subject: KVM: nVMX: Defer the MMU reload to the normal path on an EPTP switch
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit c805f5d5585ab5e0cdac6b1ccf7086eb120fb7db upstream.
+
+Defer reloading the MMU after a EPTP successful EPTP switch.  The VMFUNC
+instruction itself is executed in the previous EPTP context, any side
+effects, e.g. updating RIP, should occur in the old context.  Practically
+speaking, this bug is benign as VMX doesn't touch the MMU when skipping
+an emulated instruction, nor does queuing a single-step #DB.  No other
+post-switch side effects exist.
+
+Fixes: 41ab93727467 ("KVM: nVMX: Emulate EPTP switching for the L1 hypervisor")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210305011101.3597423-14-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/nested.c |    9 ++-------
+ 1 file changed, 2 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -5491,16 +5491,11 @@ static int nested_vmx_eptp_switching(str
+               if (!nested_vmx_check_eptp(vcpu, new_eptp))
+                       return 1;
+-              kvm_mmu_unload(vcpu);
+               mmu->ept_ad = accessed_dirty;
+               mmu->mmu_role.base.ad_disabled = !accessed_dirty;
+               vmcs12->ept_pointer = new_eptp;
+-              /*
+-               * TODO: Check what's the correct approach in case
+-               * mmu reload fails. Currently, we just let the next
+-               * reload potentially fail
+-               */
+-              kvm_mmu_reload(vcpu);
++
++              kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
+       }
+       return 0;
diff --git a/queue-5.10/kvm-nvmx-truncate-base-index-gpr-value-on-address-calc-in-64-bit.patch b/queue-5.10/kvm-nvmx-truncate-base-index-gpr-value-on-address-calc-in-64-bit.patch
new file mode 100644 (file)
index 0000000..8d9ff3d
--- /dev/null
@@ -0,0 +1,37 @@
+From 82277eeed65eed6c6ee5b8f97bd978763eab148f Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 21 Apr 2021 19:21:25 -0700
+Subject: KVM: nVMX: Truncate base/index GPR value on address calc in !64-bit
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 82277eeed65eed6c6ee5b8f97bd978763eab148f upstream.
+
+Drop bits 63:32 of the base and/or index GPRs when calculating the
+effective address of a VMX instruction memory operand.  Outside of 64-bit
+mode, memory encodings are strictly limited to E*X and below.
+
+Fixes: 064aea774768 ("KVM: nVMX: Decoding memory operands of VMX instructions")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210422022128.3464144-7-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/nested.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4613,9 +4613,9 @@ int get_vmx_mem_address(struct kvm_vcpu
+       else if (addr_size == 0)
+               off = (gva_t)sign_extend64(off, 15);
+       if (base_is_valid)
+-              off += kvm_register_read(vcpu, base_reg);
++              off += kvm_register_readl(vcpu, base_reg);
+       if (index_is_valid)
+-              off += kvm_register_read(vcpu, index_reg) << scaling;
++              off += kvm_register_readl(vcpu, index_reg) << scaling;
+       vmx_get_segment(vcpu, &s, seg_reg);
+       /*
diff --git a/queue-5.10/kvm-nvmx-truncate-bits-63-32-of-vmcs-field-on-nested-check-in-64-bit.patch b/queue-5.10/kvm-nvmx-truncate-bits-63-32-of-vmcs-field-on-nested-check-in-64-bit.patch
new file mode 100644 (file)
index 0000000..6fb3efa
--- /dev/null
@@ -0,0 +1,38 @@
+From ee050a577523dfd5fac95e6cc182ebe0293ead59 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 21 Apr 2021 19:21:24 -0700
+Subject: KVM: nVMX: Truncate bits 63:32 of VMCS field on nested check in !64-bit
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit ee050a577523dfd5fac95e6cc182ebe0293ead59 upstream.
+
+Drop bits 63:32 of the VMCS field encoding when checking for a nested
+VM-Exit on VMREAD/VMWRITE in !64-bit mode.  VMREAD and VMWRITE always
+use 32-bit operands outside of 64-bit mode.
+
+The actual emulation of VMREAD/VMWRITE does the right thing, this bug is
+purely limited to incorrectly causing a nested VM-Exit if a GPR happens
+to have bits 63:32 set outside of 64-bit mode.
+
+Fixes: a7cde481b6e8 ("KVM: nVMX: Do not forward VMREAD/VMWRITE VMExits to L1 if required so by vmcs12 vmread/vmwrite bitmaps")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210422022128.3464144-6-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/nested.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -5724,7 +5724,7 @@ static bool nested_vmx_exit_handled_vmcs
+       /* Decode instruction info and find the field to access */
+       vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+-      field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
++      field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+       /* Out-of-range fields always cause a VM exit from L2 to L1 */
+       if (field >> 15)
diff --git a/queue-5.10/kvm-stop-looking-for-coalesced-mmio-zones-if-the-bus-is-destroyed.patch b/queue-5.10/kvm-stop-looking-for-coalesced-mmio-zones-if-the-bus-is-destroyed.patch
new file mode 100644 (file)
index 0000000..d21c4bc
--- /dev/null
@@ -0,0 +1,128 @@
+From 5d3c4c79384af06e3c8e25b7770b6247496b4417 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Mon, 12 Apr 2021 15:20:49 -0700
+Subject: KVM: Stop looking for coalesced MMIO zones if the bus is destroyed
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 5d3c4c79384af06e3c8e25b7770b6247496b4417 upstream.
+
+Abort the walk of coalesced MMIO zones if kvm_io_bus_unregister_dev()
+fails to allocate memory for the new instance of the bus.  If it can't
+instantiate a new bus, unregister_dev() destroys all devices _except_ the
+target device.   But, it doesn't tell the caller that it obliterated the
+bus and invoked the destructor for all devices that were on the bus.  In
+the coalesced MMIO case, this can result in a deleted list entry
+dereference due to attempting to continue iterating on coalesced_zones
+after future entries (in the walk) have been deleted.
+
+Opportunistically add curly braces to the for-loop, which encompasses
+many lines but sneaks by without braces due to the guts being a single
+if statement.
+
+Fixes: f65886606c2d ("KVM: fix memory leak in kvm_io_bus_unregister_dev()")
+Cc: stable@vger.kernel.org
+Reported-by: Hao Sun <sunhao.th@gmail.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210412222050.876100-3-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/kvm_host.h  |    4 ++--
+ virt/kvm/coalesced_mmio.c |   19 +++++++++++++++++--
+ virt/kvm/kvm_main.c       |   10 +++++-----
+ 3 files changed, 24 insertions(+), 9 deletions(-)
+
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -190,8 +190,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcp
+                   int len, void *val);
+ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+                           int len, struct kvm_io_device *dev);
+-void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+-                             struct kvm_io_device *dev);
++int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
++                            struct kvm_io_device *dev);
+ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+                                        gpa_t addr);
+--- a/virt/kvm/coalesced_mmio.c
++++ b/virt/kvm/coalesced_mmio.c
+@@ -174,21 +174,36 @@ int kvm_vm_ioctl_unregister_coalesced_mm
+                                          struct kvm_coalesced_mmio_zone *zone)
+ {
+       struct kvm_coalesced_mmio_dev *dev, *tmp;
++      int r;
+       if (zone->pio != 1 && zone->pio != 0)
+               return -EINVAL;
+       mutex_lock(&kvm->slots_lock);
+-      list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
++      list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
+               if (zone->pio == dev->zone.pio &&
+                   coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
+-                      kvm_io_bus_unregister_dev(kvm,
++                      r = kvm_io_bus_unregister_dev(kvm,
+                               zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
+                       kvm_iodevice_destructor(&dev->dev);
++
++                      /*
++                       * On failure, unregister destroys all devices on the
++                       * bus _except_ the target device, i.e. coalesced_zones
++                       * has been modified.  No need to restart the walk as
++                       * there aren't any zones left.
++                       */
++                      if (r)
++                              break;
+               }
++      }
+       mutex_unlock(&kvm->slots_lock);
++      /*
++       * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
++       * perspective, the coalesced MMIO is most definitely unregistered.
++       */
+       return 0;
+ }
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -4342,15 +4342,15 @@ int kvm_io_bus_register_dev(struct kvm *
+ }
+ /* Caller must hold slots_lock. */
+-void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+-                             struct kvm_io_device *dev)
++int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
++                            struct kvm_io_device *dev)
+ {
+       int i, j;
+       struct kvm_io_bus *new_bus, *bus;
+       bus = kvm_get_bus(kvm, bus_idx);
+       if (!bus)
+-              return;
++              return 0;
+       for (i = 0; i < bus->dev_count; i++)
+               if (bus->range[i].dev == dev) {
+@@ -4358,7 +4358,7 @@ void kvm_io_bus_unregister_dev(struct kv
+               }
+       if (i == bus->dev_count)
+-              return;
++              return 0;
+       new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
+                         GFP_KERNEL_ACCOUNT);
+@@ -4383,7 +4383,7 @@ void kvm_io_bus_unregister_dev(struct kv
+       }
+       kfree(bus);
+-      return;
++      return new_bus ? 0 : -ENOMEM;
+ }
+ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
diff --git a/queue-5.10/kvm-svm-do-not-allow-sev-sev-es-initialization-after-vcpus-are-created.patch b/queue-5.10/kvm-svm-do-not-allow-sev-sev-es-initialization-after-vcpus-are-created.patch
new file mode 100644 (file)
index 0000000..661d283
--- /dev/null
@@ -0,0 +1,42 @@
+From 8727906fde6ea665b52e68ddc58833772537f40a Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Tue, 30 Mar 2021 20:19:36 -0700
+Subject: KVM: SVM: Do not allow SEV/SEV-ES initialization after vCPUs are created
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 8727906fde6ea665b52e68ddc58833772537f40a upstream.
+
+Reject KVM_SEV_INIT and KVM_SEV_ES_INIT if they are attempted after one
+or more vCPUs have been created.  KVM assumes a VM is tagged SEV/SEV-ES
+prior to vCPU creation, e.g. init_vmcb() needs to mark the VMCB as SEV
+enabled, and svm_create_vcpu() needs to allocate the VMSA.  At best,
+creating vCPUs before SEV/SEV-ES init will lead to unexpected errors
+and/or behavior, and at worst it will crash the host, e.g.
+sev_launch_update_vmsa() will dereference a null svm->vmsa pointer.
+
+Fixes: 1654efcbc431 ("KVM: SVM: Add KVM_SEV_INIT command")
+Fixes: ad73109ae7ec ("KVM: SVM: Provide support to launch and run an SEV-ES guest")
+Cc: stable@vger.kernel.org
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210331031936.2495277-4-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/sev.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -168,6 +168,9 @@ static int sev_guest_init(struct kvm *kv
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       int asid, ret;
++      if (kvm->created_vcpus)
++              return -EINVAL;
++
+       ret = -EBUSY;
+       if (unlikely(sev->active))
+               return ret;
diff --git a/queue-5.10/kvm-svm-don-t-strip-the-c-bit-from-cr2-on-pf-interception.patch b/queue-5.10/kvm-svm-don-t-strip-the-c-bit-from-cr2-on-pf-interception.patch
new file mode 100644 (file)
index 0000000..63a09d9
--- /dev/null
@@ -0,0 +1,35 @@
+From 6d1b867d045699d6ce0dfa0ef35d1b87dd36db56 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Thu, 4 Mar 2021 17:10:56 -0800
+Subject: KVM: SVM: Don't strip the C-bit from CR2 on #PF interception
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 6d1b867d045699d6ce0dfa0ef35d1b87dd36db56 upstream.
+
+Don't strip the C-bit from the faulting address on an intercepted #PF,
+the address is a virtual address, not a physical address.
+
+Fixes: 0ede79e13224 ("KVM: SVM: Clear C-bit from the page fault address")
+Cc: stable@vger.kernel.org
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210305011101.3597423-13-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/svm.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1805,7 +1805,7 @@ static void svm_set_dr7(struct kvm_vcpu
+ static int pf_interception(struct vcpu_svm *svm)
+ {
+-      u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
++      u64 fault_address = svm->vmcb->control.exit_info_2;
+       u64 error_code = svm->vmcb->control.exit_info_1;
+       return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
diff --git a/queue-5.10/kvm-svm-inject-gp-on-guest-msr_tsc_aux-accesses-if-rdtscp-unsupported.patch b/queue-5.10/kvm-svm-inject-gp-on-guest-msr_tsc_aux-accesses-if-rdtscp-unsupported.patch
new file mode 100644 (file)
index 0000000..8177992
--- /dev/null
@@ -0,0 +1,46 @@
+From 6f2b296aa6432d8274e258cc3220047ca04f5de0 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 23 Apr 2021 15:34:01 -0700
+Subject: KVM: SVM: Inject #GP on guest MSR_TSC_AUX accesses if RDTSCP unsupported
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 6f2b296aa6432d8274e258cc3220047ca04f5de0 upstream.
+
+Inject #GP on guest accesses to MSR_TSC_AUX if RDTSCP is unsupported in
+the guest's CPUID model.
+
+Fixes: 46896c73c1a4 ("KVM: svm: add support for RDTSCP")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210423223404.3860547-2-seanjc@google.com>
+Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/svm.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2519,6 +2519,9 @@ static int svm_get_msr(struct kvm_vcpu *
+       case MSR_TSC_AUX:
+               if (!boot_cpu_has(X86_FEATURE_RDTSCP))
+                       return 1;
++              if (!msr_info->host_initiated &&
++                  !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
++                      return 1;
+               msr_info->data = svm->tsc_aux;
+               break;
+       /*
+@@ -2713,6 +2716,10 @@ static int svm_set_msr(struct kvm_vcpu *
+               if (!boot_cpu_has(X86_FEATURE_RDTSCP))
+                       return 1;
++              if (!msr->host_initiated &&
++                  !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
++                      return 1;
++
+               /*
+                * This is rare, so we update the MSR here instead of using
+                * direct_access_msrs.  Doing that would require a rdmsr in
diff --git a/queue-5.10/kvm-x86-mmu-alloc-page-for-pdptes-when-shadowing-32-bit-npt-with-64-bit.patch b/queue-5.10/kvm-x86-mmu-alloc-page-for-pdptes-when-shadowing-32-bit-npt-with-64-bit.patch
new file mode 100644 (file)
index 0000000..bfc41fc
--- /dev/null
@@ -0,0 +1,118 @@
+From 04d45551a1eefbea42655da52f56e846c0af721a Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Thu, 4 Mar 2021 17:10:46 -0800
+Subject: KVM: x86/mmu: Alloc page for PDPTEs when shadowing 32-bit NPT with 64-bit
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 04d45551a1eefbea42655da52f56e846c0af721a upstream.
+
+Allocate the so called pae_root page on-demand, along with the lm_root
+page, when shadowing 32-bit NPT with 64-bit NPT, i.e. when running a
+32-bit L1.  KVM currently only allocates the page when NPT is disabled,
+or when L0 is 32-bit (using PAE paging).
+
+Note, there is an existing memory leak involving the MMU roots, as KVM
+fails to free the PAE roots on failure.  This will be addressed in a
+future commit.
+
+Fixes: ee6268ba3a68 ("KVM: x86: Skip pae_root shadow allocation if tdp enabled")
+Fixes: b6b80c78af83 ("KVM: x86/mmu: Allocate PAE root array when using SVM's 32-bit NPT")
+Cc: stable@vger.kernel.org
+Reviewed-by: Ben Gardon <bgardon@google.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210305011101.3597423-3-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/mmu.c |   44 +++++++++++++++++++++++++++++---------------
+ 1 file changed, 29 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -3195,14 +3195,14 @@ void kvm_mmu_free_roots(struct kvm_vcpu
+               if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
+                   (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
+                       mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
+-              } else {
++              } else if (mmu->pae_root) {
+                       for (i = 0; i < 4; ++i)
+                               if (mmu->pae_root[i] != 0)
+                                       mmu_free_root_page(kvm,
+                                                          &mmu->pae_root[i],
+                                                          &invalid_list);
+-                      mmu->root_hpa = INVALID_PAGE;
+               }
++              mmu->root_hpa = INVALID_PAGE;
+               mmu->root_pgd = 0;
+       }
+@@ -3314,9 +3314,23 @@ static int mmu_alloc_shadow_roots(struct
+        * the shadow page table may be a PAE or a long mode page table.
+        */
+       pm_mask = PT_PRESENT_MASK;
+-      if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
++      if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
+               pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
++              /*
++               * Allocate the page for the PDPTEs when shadowing 32-bit NPT
++               * with 64-bit only when needed.  Unlike 32-bit NPT, it doesn't
++               * need to be in low mem.  See also lm_root below.
++               */
++              if (!vcpu->arch.mmu->pae_root) {
++                      WARN_ON_ONCE(!tdp_enabled);
++
++                      vcpu->arch.mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
++                      if (!vcpu->arch.mmu->pae_root)
++                              return -ENOMEM;
++              }
++      }
++
+       for (i = 0; i < 4; ++i) {
+               MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
+               if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
+@@ -3339,21 +3353,19 @@ static int mmu_alloc_shadow_roots(struct
+       vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
+       /*
+-       * If we shadow a 32 bit page table with a long mode page
+-       * table we enter this path.
++       * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
++       * tables are allocated and initialized at MMU creation as there is no
++       * equivalent level in the guest's NPT to shadow.  Allocate the tables
++       * on demand, as running a 32-bit L1 VMM is very rare.  The PDP is
++       * handled above (to share logic with PAE), deal with the PML4 here.
+        */
+       if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
+               if (vcpu->arch.mmu->lm_root == NULL) {
+-                      /*
+-                       * The additional page necessary for this is only
+-                       * allocated on demand.
+-                       */
+-
+                       u64 *lm_root;
+                       lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
+-                      if (lm_root == NULL)
+-                              return 1;
++                      if (!lm_root)
++                              return -ENOMEM;
+                       lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
+@@ -5297,9 +5309,11 @@ static int __kvm_mmu_create(struct kvm_v
+        * while the PDP table is a per-vCPU construct that's allocated at MMU
+        * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
+        * x86_64.  Therefore we need to allocate the PDP table in the first
+-       * 4GB of memory, which happens to fit the DMA32 zone.  Except for
+-       * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
+-       * skip allocating the PDP table.
++       * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
++       * generally doesn't use PAE paging and can skip allocating the PDP
++       * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
++       * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
++       * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
+        */
+       if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
+               return 0;
diff --git a/queue-5.10/kvm-x86-remove-emulator-s-broken-checks-on-cr0-cr3-cr4-loads.patch b/queue-5.10/kvm-x86-remove-emulator-s-broken-checks-on-cr0-cr3-cr4-loads.patch
new file mode 100644 (file)
index 0000000..25a127e
--- /dev/null
@@ -0,0 +1,132 @@
+From d0fe7b6404408835ed60232cb3bf28324b2f95db Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 21 Apr 2021 19:21:20 -0700
+Subject: KVM: x86: Remove emulator's broken checks on CR0/CR3/CR4 loads
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit d0fe7b6404408835ed60232cb3bf28324b2f95db upstream.
+
+Remove the emulator's checks for illegal CR0, CR3, and CR4 values, as
+the checks are redundant, outdated, and in the case of SEV's C-bit,
+broken.  The emulator manually calculates MAXPHYADDR from CPUID and
+neglects to mask off the C-bit.  For all other checks, kvm_set_cr*() are
+a superset of the emulator checks, e.g. see CR4.LA57.
+
+Fixes: a780a3ea6282 ("KVM: X86: Fix reserved bits check for MOV to CR3")
+Cc: Babu Moger <babu.moger@amd.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210422022128.3464144-2-seanjc@google.com>
+Cc: stable@vger.kernel.org
+[Unify check_cr_read and check_cr_write. - Paolo]
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/emulate.c |   80 +------------------------------------------------
+ 1 file changed, 3 insertions(+), 77 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4220,7 +4220,7 @@ static bool valid_cr(int nr)
+       }
+ }
+-static int check_cr_read(struct x86_emulate_ctxt *ctxt)
++static int check_cr_access(struct x86_emulate_ctxt *ctxt)
+ {
+       if (!valid_cr(ctxt->modrm_reg))
+               return emulate_ud(ctxt);
+@@ -4228,80 +4228,6 @@ static int check_cr_read(struct x86_emul
+       return X86EMUL_CONTINUE;
+ }
+-static int check_cr_write(struct x86_emulate_ctxt *ctxt)
+-{
+-      u64 new_val = ctxt->src.val64;
+-      int cr = ctxt->modrm_reg;
+-      u64 efer = 0;
+-
+-      static u64 cr_reserved_bits[] = {
+-              0xffffffff00000000ULL,
+-              0, 0, 0, /* CR3 checked later */
+-              CR4_RESERVED_BITS,
+-              0, 0, 0,
+-              CR8_RESERVED_BITS,
+-      };
+-
+-      if (!valid_cr(cr))
+-              return emulate_ud(ctxt);
+-
+-      if (new_val & cr_reserved_bits[cr])
+-              return emulate_gp(ctxt, 0);
+-
+-      switch (cr) {
+-      case 0: {
+-              u64 cr4;
+-              if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
+-                  ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
+-                      return emulate_gp(ctxt, 0);
+-
+-              cr4 = ctxt->ops->get_cr(ctxt, 4);
+-              ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+-
+-              if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
+-                  !(cr4 & X86_CR4_PAE))
+-                      return emulate_gp(ctxt, 0);
+-
+-              break;
+-              }
+-      case 3: {
+-              u64 rsvd = 0;
+-
+-              ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+-              if (efer & EFER_LMA) {
+-                      u64 maxphyaddr;
+-                      u32 eax, ebx, ecx, edx;
+-
+-                      eax = 0x80000008;
+-                      ecx = 0;
+-                      if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
+-                                               &edx, true))
+-                              maxphyaddr = eax & 0xff;
+-                      else
+-                              maxphyaddr = 36;
+-                      rsvd = rsvd_bits(maxphyaddr, 63);
+-                      if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
+-                              rsvd &= ~X86_CR3_PCID_NOFLUSH;
+-              }
+-
+-              if (new_val & rsvd)
+-                      return emulate_gp(ctxt, 0);
+-
+-              break;
+-              }
+-      case 4: {
+-              ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+-
+-              if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
+-                      return emulate_gp(ctxt, 0);
+-
+-              break;
+-              }
+-      }
+-
+-      return X86EMUL_CONTINUE;
+-}
+-
+ static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
+ {
+       unsigned long dr7;
+@@ -4841,10 +4767,10 @@ static const struct opcode twobyte_table
+       D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
+       D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
+       /* 0x20 - 0x2F */
+-      DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
++      DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
+       DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
+       IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
+-                                              check_cr_write),
++                                              check_cr_access),
+       IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
+                                               check_dr_write),
+       N, N, N, N,
index 08e4bb94ca7276b782dbca39577184b75c6a61d7..79b424c17594b78d96138b6137ed2d6858e378f0 100644 (file)
@@ -88,3 +88,17 @@ s390-fix-detection-of-vector-enhancements-facility-1-vs.-vector-packed-decimal-f
 kvm-s390-vsie-fix-mvpg-handling-for-prefixing-and-mso.patch
 kvm-s390-split-kvm_s390_real_to_abs.patch
 kvm-s390-extend-kvm_s390_shadow_fault-to-return-entry-pointer.patch
+kvm-x86-mmu-alloc-page-for-pdptes-when-shadowing-32-bit-npt-with-64-bit.patch
+kvm-x86-remove-emulator-s-broken-checks-on-cr0-cr3-cr4-loads.patch
+kvm-nsvm-set-the-shadow-root-level-to-the-tdp-level-for-nested-npt.patch
+kvm-svm-don-t-strip-the-c-bit-from-cr2-on-pf-interception.patch
+kvm-svm-do-not-allow-sev-sev-es-initialization-after-vcpus-are-created.patch
+kvm-svm-inject-gp-on-guest-msr_tsc_aux-accesses-if-rdtscp-unsupported.patch
+kvm-nvmx-defer-the-mmu-reload-to-the-normal-path-on-an-eptp-switch.patch
+kvm-nvmx-truncate-bits-63-32-of-vmcs-field-on-nested-check-in-64-bit.patch
+kvm-nvmx-truncate-base-index-gpr-value-on-address-calc-in-64-bit.patch
+kvm-arm-arm64-fix-kvm_vgic_v3_addr_type_redist-read.patch
+kvm-destroy-i-o-bus-devices-on-unregister-failure-_after_-sync-ing-srcu.patch
+kvm-stop-looking-for-coalesced-mmio-zones-if-the-bus-is-destroyed.patch
+kvm-arm64-fully-zero-the-vcpu-state-on-reset.patch
+kvm-arm64-fix-kvm_vgic_v3_addr_type_redist_region-read.patch