]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 19 Jul 2021 09:14:48 +0000 (11:14 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 19 Jul 2021 09:14:48 +0000 (11:14 +0200)
added patches:
cifs-handle-reconnect-of-tcon-when-there-is-no-cached-dfs-referral.patch
kvm-mmio-fix-use-after-free-read-in-kvm_vm_ioctl_unregister_coalesced_mmio.patch
kvm-nsvm-check-the-value-written-to-msr_vm_hsave_pa.patch
kvm-x86-disable-hardware-breakpoints-unconditionally-before-kvm_x86-run.patch
kvm-x86-mmu-do-not-apply-hpa-memory-encryption-mask-to-gpas.patch
kvm-x86-use-guest-maxphyaddr-from-cpuid.0x8000_0008-iff-tdp-is-enabled.patch

queue-5.10/cifs-handle-reconnect-of-tcon-when-there-is-no-cached-dfs-referral.patch [new file with mode: 0644]
queue-5.10/kvm-mmio-fix-use-after-free-read-in-kvm_vm_ioctl_unregister_coalesced_mmio.patch [new file with mode: 0644]
queue-5.10/kvm-nsvm-check-the-value-written-to-msr_vm_hsave_pa.patch [new file with mode: 0644]
queue-5.10/kvm-x86-disable-hardware-breakpoints-unconditionally-before-kvm_x86-run.patch [new file with mode: 0644]
queue-5.10/kvm-x86-mmu-do-not-apply-hpa-memory-encryption-mask-to-gpas.patch [new file with mode: 0644]
queue-5.10/kvm-x86-use-guest-maxphyaddr-from-cpuid.0x8000_0008-iff-tdp-is-enabled.patch [new file with mode: 0644]
queue-5.10/series [new file with mode: 0644]

diff --git a/queue-5.10/cifs-handle-reconnect-of-tcon-when-there-is-no-cached-dfs-referral.patch b/queue-5.10/cifs-handle-reconnect-of-tcon-when-there-is-no-cached-dfs-referral.patch
new file mode 100644 (file)
index 0000000..3b3883a
--- /dev/null
@@ -0,0 +1,42 @@
+From 507345b5ae6a57b7ecd7550ff39282ed20de7b8d Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@cjr.nz>
+Date: Mon, 12 Jul 2021 12:38:24 -0300
+Subject: cifs: handle reconnect of tcon when there is no cached dfs referral
+
+From: Paulo Alcantara <pc@cjr.nz>
+
+commit 507345b5ae6a57b7ecd7550ff39282ed20de7b8d upstream.
+
+When there is no cached DFS referral of tcon->dfs_path, then reconnect
+to same share.
+
+Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/cifs/connect.c |    6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -5352,7 +5352,8 @@ int cifs_tree_connect(const unsigned int
+       if (!tree)
+               return -ENOMEM;
+-      if (!tcon->dfs_path) {
++      /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
++      if (!tcon->dfs_path || dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl)) {
+               if (tcon->ipc) {
+                       scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
+                       rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
+@@ -5362,9 +5363,6 @@ int cifs_tree_connect(const unsigned int
+               goto out;
+       }
+-      rc = dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl);
+-      if (rc)
+-              goto out;
+       isroot = ref.server_type == DFS_TYPE_ROOT;
+       free_dfs_info_param(&ref);
diff --git a/queue-5.10/kvm-mmio-fix-use-after-free-read-in-kvm_vm_ioctl_unregister_coalesced_mmio.patch b/queue-5.10/kvm-mmio-fix-use-after-free-read-in-kvm_vm_ioctl_unregister_coalesced_mmio.patch
new file mode 100644 (file)
index 0000000..2bceab2
--- /dev/null
@@ -0,0 +1,128 @@
+From 23fa2e46a5556f787ce2ea1a315d3ab93cced204 Mon Sep 17 00:00:00 2001
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+Date: Sat, 26 Jun 2021 15:03:04 +0800
+Subject: KVM: mmio: Fix use-after-free Read in kvm_vm_ioctl_unregister_coalesced_mmio
+
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+
+commit 23fa2e46a5556f787ce2ea1a315d3ab93cced204 upstream.
+
+BUG: KASAN: use-after-free in kvm_vm_ioctl_unregister_coalesced_mmio+0x7c/0x1ec arch/arm64/kvm/../../../virt/kvm/coalesced_mmio.c:183
+Read of size 8 at addr ffff0000c03a2500 by task syz-executor083/4269
+
+CPU: 5 PID: 4269 Comm: syz-executor083 Not tainted 5.10.0 #7
+Hardware name: linux,dummy-virt (DT)
+Call trace:
+ dump_backtrace+0x0/0x2d0 arch/arm64/kernel/stacktrace.c:132
+ show_stack+0x28/0x34 arch/arm64/kernel/stacktrace.c:196
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x110/0x164 lib/dump_stack.c:118
+ print_address_description+0x78/0x5c8 mm/kasan/report.c:385
+ __kasan_report mm/kasan/report.c:545 [inline]
+ kasan_report+0x148/0x1e4 mm/kasan/report.c:562
+ check_memory_region_inline mm/kasan/generic.c:183 [inline]
+ __asan_load8+0xb4/0xbc mm/kasan/generic.c:252
+ kvm_vm_ioctl_unregister_coalesced_mmio+0x7c/0x1ec arch/arm64/kvm/../../../virt/kvm/coalesced_mmio.c:183
+ kvm_vm_ioctl+0xe30/0x14c4 arch/arm64/kvm/../../../virt/kvm/kvm_main.c:3755
+ vfs_ioctl fs/ioctl.c:48 [inline]
+ __do_sys_ioctl fs/ioctl.c:753 [inline]
+ __se_sys_ioctl fs/ioctl.c:739 [inline]
+ __arm64_sys_ioctl+0xf88/0x131c fs/ioctl.c:739
+ __invoke_syscall arch/arm64/kernel/syscall.c:36 [inline]
+ invoke_syscall arch/arm64/kernel/syscall.c:48 [inline]
+ el0_svc_common arch/arm64/kernel/syscall.c:158 [inline]
+ do_el0_svc+0x120/0x290 arch/arm64/kernel/syscall.c:220
+ el0_svc+0x1c/0x28 arch/arm64/kernel/entry-common.c:367
+ el0_sync_handler+0x98/0x170 arch/arm64/kernel/entry-common.c:383
+ el0_sync+0x140/0x180 arch/arm64/kernel/entry.S:670
+
+Allocated by task 4269:
+ stack_trace_save+0x80/0xb8 kernel/stacktrace.c:121
+ kasan_save_stack mm/kasan/common.c:48 [inline]
+ kasan_set_track mm/kasan/common.c:56 [inline]
+ __kasan_kmalloc+0xdc/0x120 mm/kasan/common.c:461
+ kasan_kmalloc+0xc/0x14 mm/kasan/common.c:475
+ kmem_cache_alloc_trace include/linux/slab.h:450 [inline]
+ kmalloc include/linux/slab.h:552 [inline]
+ kzalloc include/linux/slab.h:664 [inline]
+ kvm_vm_ioctl_register_coalesced_mmio+0x78/0x1cc arch/arm64/kvm/../../../virt/kvm/coalesced_mmio.c:146
+ kvm_vm_ioctl+0x7e8/0x14c4 arch/arm64/kvm/../../../virt/kvm/kvm_main.c:3746
+ vfs_ioctl fs/ioctl.c:48 [inline]
+ __do_sys_ioctl fs/ioctl.c:753 [inline]
+ __se_sys_ioctl fs/ioctl.c:739 [inline]
+ __arm64_sys_ioctl+0xf88/0x131c fs/ioctl.c:739
+ __invoke_syscall arch/arm64/kernel/syscall.c:36 [inline]
+ invoke_syscall arch/arm64/kernel/syscall.c:48 [inline]
+ el0_svc_common arch/arm64/kernel/syscall.c:158 [inline]
+ do_el0_svc+0x120/0x290 arch/arm64/kernel/syscall.c:220
+ el0_svc+0x1c/0x28 arch/arm64/kernel/entry-common.c:367
+ el0_sync_handler+0x98/0x170 arch/arm64/kernel/entry-common.c:383
+ el0_sync+0x140/0x180 arch/arm64/kernel/entry.S:670
+
+Freed by task 4269:
+ stack_trace_save+0x80/0xb8 kernel/stacktrace.c:121
+ kasan_save_stack mm/kasan/common.c:48 [inline]
+ kasan_set_track+0x38/0x6c mm/kasan/common.c:56
+ kasan_set_free_info+0x20/0x40 mm/kasan/generic.c:355
+ __kasan_slab_free+0x124/0x150 mm/kasan/common.c:422
+ kasan_slab_free+0x10/0x1c mm/kasan/common.c:431
+ slab_free_hook mm/slub.c:1544 [inline]
+ slab_free_freelist_hook mm/slub.c:1577 [inline]
+ slab_free mm/slub.c:3142 [inline]
+ kfree+0x104/0x38c mm/slub.c:4124
+ coalesced_mmio_destructor+0x94/0xa4 arch/arm64/kvm/../../../virt/kvm/coalesced_mmio.c:102
+ kvm_iodevice_destructor include/kvm/iodev.h:61 [inline]
+ kvm_io_bus_unregister_dev+0x248/0x280 arch/arm64/kvm/../../../virt/kvm/kvm_main.c:4374
+ kvm_vm_ioctl_unregister_coalesced_mmio+0x158/0x1ec arch/arm64/kvm/../../../virt/kvm/coalesced_mmio.c:186
+ kvm_vm_ioctl+0xe30/0x14c4 arch/arm64/kvm/../../../virt/kvm/kvm_main.c:3755
+ vfs_ioctl fs/ioctl.c:48 [inline]
+ __do_sys_ioctl fs/ioctl.c:753 [inline]
+ __se_sys_ioctl fs/ioctl.c:739 [inline]
+ __arm64_sys_ioctl+0xf88/0x131c fs/ioctl.c:739
+ __invoke_syscall arch/arm64/kernel/syscall.c:36 [inline]
+ invoke_syscall arch/arm64/kernel/syscall.c:48 [inline]
+ el0_svc_common arch/arm64/kernel/syscall.c:158 [inline]
+ do_el0_svc+0x120/0x290 arch/arm64/kernel/syscall.c:220
+ el0_svc+0x1c/0x28 arch/arm64/kernel/entry-common.c:367
+ el0_sync_handler+0x98/0x170 arch/arm64/kernel/entry-common.c:383
+ el0_sync+0x140/0x180 arch/arm64/kernel/entry.S:670
+
+If kvm_io_bus_unregister_dev() return -ENOMEM, we already call kvm_iodevice_destructor()
+inside this function to delete 'struct kvm_coalesced_mmio_dev *dev' from list
+and free the dev, but kvm_iodevice_destructor() is called again, it will lead
+the above issue.
+
+Let's check the the return value of kvm_io_bus_unregister_dev(), only call
+kvm_iodevice_destructor() if the return value is 0.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: kvm@vger.kernel.org
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Message-Id: <20210626070304.143456-1-wangkefeng.wang@huawei.com>
+Cc: stable@vger.kernel.org
+Fixes: 5d3c4c79384a ("KVM: Stop looking for coalesced MMIO zones if the bus is destroyed", 2021-04-20)
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/coalesced_mmio.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/virt/kvm/coalesced_mmio.c
++++ b/virt/kvm/coalesced_mmio.c
+@@ -186,7 +186,6 @@ int kvm_vm_ioctl_unregister_coalesced_mm
+                   coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
+                       r = kvm_io_bus_unregister_dev(kvm,
+                               zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
+-                      kvm_iodevice_destructor(&dev->dev);
+                       /*
+                        * On failure, unregister destroys all devices on the
+@@ -196,6 +195,7 @@ int kvm_vm_ioctl_unregister_coalesced_mm
+                        */
+                       if (r)
+                               break;
++                      kvm_iodevice_destructor(&dev->dev);
+               }
+       }
diff --git a/queue-5.10/kvm-nsvm-check-the-value-written-to-msr_vm_hsave_pa.patch b/queue-5.10/kvm-nsvm-check-the-value-written-to-msr_vm_hsave_pa.patch
new file mode 100644 (file)
index 0000000..bef965b
--- /dev/null
@@ -0,0 +1,46 @@
+From fce7e152ffc8f89d02a80617b16c7aa1527847c8 Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Mon, 28 Jun 2021 12:44:20 +0200
+Subject: KVM: nSVM: Check the value written to MSR_VM_HSAVE_PA
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+commit fce7e152ffc8f89d02a80617b16c7aa1527847c8 upstream.
+
+APM states that #GP is raised upon write to MSR_VM_HSAVE_PA when
+the supplied address is not page-aligned or is outside of "maximum
+supported physical address for this implementation".
+page_address_valid() check seems suitable. Also, forcefully page-align
+the address when it's written from VMM.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Message-Id: <20210628104425.391276-2-vkuznets@redhat.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+[Add comment about behavior for host-provided values. - Paolo]
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/svm.c |   11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2745,7 +2745,16 @@ static int svm_set_msr(struct kvm_vcpu *
+                       svm_disable_lbrv(vcpu);
+               break;
+       case MSR_VM_HSAVE_PA:
+-              svm->nested.hsave_msr = data;
++              /*
++               * Old kernels did not validate the value written to
++               * MSR_VM_HSAVE_PA.  Allow KVM_SET_MSR to set an invalid
++               * value to allow live migrating buggy or malicious guests
++               * originating from those kernels.
++               */
++              if (!msr->host_initiated && !page_address_valid(vcpu, data))
++                      return 1;
++
++              svm->nested.hsave_msr = data & PAGE_MASK;
+               break;
+       case MSR_VM_CR:
+               return svm_set_vm_cr(vcpu, data);
diff --git a/queue-5.10/kvm-x86-disable-hardware-breakpoints-unconditionally-before-kvm_x86-run.patch b/queue-5.10/kvm-x86-disable-hardware-breakpoints-unconditionally-before-kvm_x86-run.patch
new file mode 100644 (file)
index 0000000..5e26ce1
--- /dev/null
@@ -0,0 +1,49 @@
+From f85d40160691881a17a397c448d799dfc90987ba Mon Sep 17 00:00:00 2001
+From: Lai Jiangshan <laijs@linux.alibaba.com>
+Date: Tue, 29 Jun 2021 01:26:32 +0800
+Subject: KVM: X86: Disable hardware breakpoints unconditionally before kvm_x86->run()
+
+From: Lai Jiangshan <laijs@linux.alibaba.com>
+
+commit f85d40160691881a17a397c448d799dfc90987ba upstream.
+
+When the host is using debug registers but the guest is not using them
+nor is the guest in guest-debug state, the kvm code does not reset
+the host debug registers before kvm_x86->run().  Rather, it relies on
+the hardware vmentry instruction to automatically reset the dr7 registers
+which ensures that the host breakpoints do not affect the guest.
+
+This however violates the non-instrumentable nature around VM entry
+and exit; for example, when a host breakpoint is set on vcpu->arch.cr2,
+
+Another issue is consistency.  When the guest debug registers are active,
+the host breakpoints are reset before kvm_x86->run(). But when the
+guest debug registers are inactive, the host breakpoints are delayed to
+be disabled.  The host tracing tools may see different results depending
+on what the guest is doing.
+
+To fix the problems, we clear %db7 unconditionally before kvm_x86->run()
+if the host has set any breakpoints, no matter if the guest is using
+them or not.
+
+Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
+Message-Id: <20210628172632.81029-1-jiangshanlai@gmail.com>
+Cc: stable@vger.kernel.org
+[Only clear %db7 instead of reloading all debug registers. - Paolo]
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9020,6 +9020,8 @@ static int vcpu_enter_guest(struct kvm_v
+               set_debugreg(vcpu->arch.eff_db[3], 3);
+               set_debugreg(vcpu->arch.dr6, 6);
+               vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
++      } else if (unlikely(hw_breakpoint_active())) {
++              set_debugreg(0, 7);
+       }
+       exit_fastpath = kvm_x86_ops.run(vcpu);
diff --git a/queue-5.10/kvm-x86-mmu-do-not-apply-hpa-memory-encryption-mask-to-gpas.patch b/queue-5.10/kvm-x86-mmu-do-not-apply-hpa-memory-encryption-mask-to-gpas.patch
new file mode 100644 (file)
index 0000000..27efa8b
--- /dev/null
@@ -0,0 +1,111 @@
+From fc9bf2e087efcd81bda2e52d09616d2a1bf982a8 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 23 Jun 2021 16:05:49 -0700
+Subject: KVM: x86/mmu: Do not apply HPA (memory encryption) mask to GPAs
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit fc9bf2e087efcd81bda2e52d09616d2a1bf982a8 upstream.
+
+Ignore "dynamic" host adjustments to the physical address mask when
+generating the masks for guest PTEs, i.e. the guest PA masks.  The host
+physical address space and guest physical address space are two different
+beasts, e.g. even though SEV's C-bit is the same bit location for both
+host and guest, disabling SME in the host (which clears shadow_me_mask)
+does not affect the guest PTE->GPA "translation".
+
+For non-SEV guests, not dropping bits is the correct behavior.  Assuming
+KVM and userspace correctly enumerate/configure guest MAXPHYADDR, bits
+that are lost as collateral damage from memory encryption are treated as
+reserved bits, i.e. KVM will never get to the point where it attempts to
+generate a gfn using the affected bits.  And if userspace wants to create
+a bogus vCPU, then userspace gets to deal with the fallout of hardware
+doing odd things with bad GPAs.
+
+For SEV guests, not dropping the C-bit is technically wrong, but it's a
+moot point because KVM can't read SEV guest's page tables in any case
+since they're always encrypted.  Not to mention that the current KVM code
+is also broken since sme_me_mask does not have to be non-zero for SEV to
+be supported by KVM.  The proper fix would be to teach all of KVM to
+correctly handle guest private memory, but that's a task for the future.
+
+Fixes: d0ec49d4de90 ("kvm/x86/svm: Support Secure Memory Encryption within KVM")
+Cc: stable@vger.kernel.org
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210623230552.4027702-5-seanjc@google.com>
+[Use a new header instead of adding header guards to paging_tmpl.h. - Paolo]
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/mmu.c         |    2 ++
+ arch/x86/kvm/mmu/paging.h      |   14 ++++++++++++++
+ arch/x86/kvm/mmu/paging_tmpl.h |    4 ++--
+ arch/x86/kvm/mmu/spte.h        |    6 ------
+ 4 files changed, 18 insertions(+), 8 deletions(-)
+ create mode 100644 arch/x86/kvm/mmu/paging.h
+
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -52,6 +52,8 @@
+ #include <asm/kvm_page_track.h>
+ #include "trace.h"
++#include "paging.h"
++
+ extern bool itlb_multihit_kvm_mitigation;
+ static int __read_mostly nx_huge_pages = -1;
+--- /dev/null
++++ b/arch/x86/kvm/mmu/paging.h
+@@ -0,0 +1,14 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/* Shadow paging constants/helpers that don't need to be #undef'd. */
++#ifndef __KVM_X86_PAGING_H
++#define __KVM_X86_PAGING_H
++
++#define GUEST_PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
++#define PT64_LVL_ADDR_MASK(level) \
++      (GUEST_PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
++                                              * PT64_LEVEL_BITS))) - 1))
++#define PT64_LVL_OFFSET_MASK(level) \
++      (GUEST_PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
++                                              * PT64_LEVEL_BITS))) - 1))
++#endif /* __KVM_X86_PAGING_H */
++
+--- a/arch/x86/kvm/mmu/paging_tmpl.h
++++ b/arch/x86/kvm/mmu/paging_tmpl.h
+@@ -24,7 +24,7 @@
+       #define pt_element_t u64
+       #define guest_walker guest_walker64
+       #define FNAME(name) paging##64_##name
+-      #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
++      #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
+       #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
+       #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
+       #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
+@@ -57,7 +57,7 @@
+       #define pt_element_t u64
+       #define guest_walker guest_walkerEPT
+       #define FNAME(name) ept_##name
+-      #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
++      #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
+       #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
+       #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
+       #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
+--- a/arch/x86/kvm/mmu/spte.h
++++ b/arch/x86/kvm/mmu/spte.h
+@@ -23,12 +23,6 @@
+ #else
+ #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
+ #endif
+-#define PT64_LVL_ADDR_MASK(level) \
+-      (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
+-                                              * PT64_LEVEL_BITS))) - 1))
+-#define PT64_LVL_OFFSET_MASK(level) \
+-      (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
+-                                              * PT64_LEVEL_BITS))) - 1))
+ #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
+                       | shadow_x_mask | shadow_nx_mask | shadow_me_mask)
diff --git a/queue-5.10/kvm-x86-use-guest-maxphyaddr-from-cpuid.0x8000_0008-iff-tdp-is-enabled.patch b/queue-5.10/kvm-x86-use-guest-maxphyaddr-from-cpuid.0x8000_0008-iff-tdp-is-enabled.patch
new file mode 100644 (file)
index 0000000..2d9b720
--- /dev/null
@@ -0,0 +1,44 @@
+From 4bf48e3c0aafd32b960d341c4925b48f416f14a5 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 23 Jun 2021 16:05:46 -0700
+Subject: KVM: x86: Use guest MAXPHYADDR from CPUID.0x8000_0008 iff TDP is enabled
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 4bf48e3c0aafd32b960d341c4925b48f416f14a5 upstream.
+
+Ignore the guest MAXPHYADDR reported by CPUID.0x8000_0008 if TDP, i.e.
+NPT, is disabled, and instead use the host's MAXPHYADDR.  Per AMD'S APM:
+
+  Maximum guest physical address size in bits. This number applies only
+  to guests using nested paging. When this field is zero, refer to the
+  PhysAddrSize field for the maximum guest physical address size.
+
+Fixes: 24c82e576b78 ("KVM: Sanitize cpuid")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210623230552.4027702-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/cpuid.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -827,8 +827,14 @@ static inline int __do_cpuid_func(struct
+               unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
+               unsigned phys_as = entry->eax & 0xff;
+-              if (!g_phys_as)
++              /*
++               * Use bare metal's MAXPHADDR if the CPU doesn't report guest
++               * MAXPHYADDR separately, or if TDP (NPT) is disabled, as the
++               * guest version "applies only to guests using nested paging".
++               */
++              if (!g_phys_as || !tdp_enabled)
+                       g_phys_as = phys_as;
++
+               entry->eax = g_phys_as | (virt_as << 8);
+               entry->edx = 0;
+               cpuid_entry_override(entry, CPUID_8000_0008_EBX);
diff --git a/queue-5.10/series b/queue-5.10/series
new file mode 100644 (file)
index 0000000..9f0649a
--- /dev/null
@@ -0,0 +1,6 @@
+cifs-handle-reconnect-of-tcon-when-there-is-no-cached-dfs-referral.patch
+kvm-mmio-fix-use-after-free-read-in-kvm_vm_ioctl_unregister_coalesced_mmio.patch
+kvm-x86-use-guest-maxphyaddr-from-cpuid.0x8000_0008-iff-tdp-is-enabled.patch
+kvm-x86-mmu-do-not-apply-hpa-memory-encryption-mask-to-gpas.patch
+kvm-nsvm-check-the-value-written-to-msr_vm_hsave_pa.patch
+kvm-x86-disable-hardware-breakpoints-unconditionally-before-kvm_x86-run.patch