--- /dev/null
+From 68cf617309b5f6f3a651165f49f20af1494753ae Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Wed, 8 Jul 2020 17:25:46 +0100
+Subject: KVM: arm64: Fix definition of PAGE_HYP_DEVICE
+
+From: Will Deacon <will@kernel.org>
+
+commit 68cf617309b5f6f3a651165f49f20af1494753ae upstream.
+
+PAGE_HYP_DEVICE is intended to encode attribute bits for an EL2 stage-1
+pte mapping a device. Unfortunately, it includes PROT_DEVICE_nGnRE which
+encodes attributes for EL1 stage-1 mappings such as UXN and nG, which are
+RES0 for EL2, and DBM which is meaningless as TCR_EL2.HD is not set.
+
+Fix the definition of PAGE_HYP_DEVICE so that it doesn't set RES0 bits
+at EL2.
+
+Acked-by: Marc Zyngier <maz@kernel.org>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200708162546.26176-1-will@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/pgtable-prot.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/pgtable-prot.h
++++ b/arch/arm64/include/asm/pgtable-prot.h
+@@ -65,7 +65,7 @@
+ #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
+ #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
+ #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
+-#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
++#define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN)
+
+ #define PAGE_S2_MEMATTR(attr) \
+ ({ \
--- /dev/null
+From b9e10d4a6c9f5cbe6369ce2c17ebc67d2e5a4be5 Mon Sep 17 00:00:00 2001
+From: Andrew Scull <ascull@google.com>
+Date: Mon, 6 Jul 2020 10:52:59 +0100
+Subject: KVM: arm64: Stop clobbering x0 for HVC_SOFT_RESTART
+
+From: Andrew Scull <ascull@google.com>
+
+commit b9e10d4a6c9f5cbe6369ce2c17ebc67d2e5a4be5 upstream.
+
+HVC_SOFT_RESTART is given values for x0-2 that it should installed
+before exiting to the new address so should not set x0 to stub HVC
+success or failure code.
+
+Fixes: af42f20480bf1 ("arm64: hyp-stub: Zero x0 on successful stub handling")
+Cc: stable@vger.kernel.org
+Signed-off-by: Andrew Scull <ascull@google.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20200706095259.1338221-1-ascull@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kvm/hyp-init.S | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/kvm/hyp-init.S
++++ b/arch/arm64/kvm/hyp-init.S
+@@ -144,11 +144,15 @@ ENTRY(__kvm_handle_stub_hvc)
+
+ 1: cmp x0, #HVC_RESET_VECTORS
+ b.ne 1f
+-reset:
++
+ /*
+- * Reset kvm back to the hyp stub. Do not clobber x0-x4 in
+- * case we coming via HVC_SOFT_RESTART.
++ * Set the HVC_RESET_VECTORS return code before entering the common
++ * path so that we do not clobber x0-x2 in case we are coming via
++ * HVC_SOFT_RESTART.
+ */
++ mov x0, xzr
++reset:
++ /* Reset kvm back to the hyp stub. */
+ mrs x5, sctlr_el2
+ ldr x6, =SCTLR_ELx_FLAGS
+ bic x5, x5, x6 // Clear SCTL_M and etc
+@@ -159,7 +163,6 @@ reset:
+ /* Install stub vectors */
+ adr_l x5, __hyp_stub_vectors
+ msr vbar_el2, x5
+- mov x0, xzr
+ eret
+
+ 1: /* Bad stub call */
--- /dev/null
+From 5ecad245de2ae23dc4e2dbece92f8ccfbaed2fa7 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 30 Jun 2020 07:07:20 -0400
+Subject: KVM: x86: bit 8 of non-leaf PDPEs is not reserved
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 5ecad245de2ae23dc4e2dbece92f8ccfbaed2fa7 upstream.
+
+Bit 8 would be the "global" bit, which does not quite make sense for non-leaf
+page table entries. Intel ignores it; AMD ignores it in PDEs and PDPEs, but
+reserves it in PML4Es.
+
+Probably, earlier versions of the AMD manual documented it as reserved in PDPEs
+as well, and that behavior made it into KVM as well as kvm-unit-tests; fix it.
+
+Cc: stable@vger.kernel.org
+Reported-by: Nadav Amit <namit@vmware.com>
+Fixes: a0c0feb57992 ("KVM: x86: reserve bit 8 of non-leaf PDPEs and PML4Es in 64-bit mode on AMD", 2014-09-03)
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -4474,7 +4474,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu
+ nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
+ rsvd_bits(maxphyaddr, 51);
+ rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
+- nonleaf_bit8_rsvd | gbpages_bit_rsvd |
++ gbpages_bit_rsvd |
+ rsvd_bits(maxphyaddr, 51);
+ rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
+ rsvd_bits(maxphyaddr, 51);
--- /dev/null
+From d74fcfc1f0ff4b6c26ecef1f9e48d8089ab4eaac Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Thu, 2 Jul 2020 19:17:14 -0700
+Subject: KVM: x86: Inject #GP if guest attempts to toggle CR4.LA57 in 64-bit mode
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit d74fcfc1f0ff4b6c26ecef1f9e48d8089ab4eaac upstream.
+
+Inject a #GP on MOV CR4 if CR4.LA57 is toggled in 64-bit mode, which is
+illegal per Intel's SDM:
+
+ CR4.LA57
+ 57-bit linear addresses (bit 12 of CR4) ... blah blah blah ...
+ This bit cannot be modified in IA-32e mode.
+
+Note, the pseudocode for MOV CR doesn't call out the fault condition,
+which is likely why the check was missed during initial development.
+This is arguably an SDM bug and will hopefully be fixed in future
+release of the SDM.
+
+Fixes: fd8cb433734ee ("KVM: MMU: Expose the LA57 feature to VM.")
+Cc: stable@vger.kernel.org
+Reported-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Message-Id: <20200703021714.5549-1-sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -865,6 +865,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, u
+ if (is_long_mode(vcpu)) {
+ if (!(cr4 & X86_CR4_PAE))
+ return 1;
++ if ((cr4 ^ old_cr4) & X86_CR4_LA57)
++ return 1;
+ } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
+ && ((cr4 ^ old_cr4) & pdptr_bits)
+ && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
--- /dev/null
+From 7c83d096aed055a7763a03384f92115363448b71 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Thu, 2 Jul 2020 21:04:21 -0700
+Subject: KVM: x86: Mark CR4.TSD as being possibly owned by the guest
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 7c83d096aed055a7763a03384f92115363448b71 upstream.
+
+Mark CR4.TSD as being possibly owned by the guest as that is indeed the
+case on VMX. Without TSD being tagged as possibly owned by the guest, a
+targeted read of CR4 to get TSD could observe a stale value. This bug
+is benign in the current code base as the sole consumer of TSD is the
+emulator (for RDTSC) and the emulator always "reads" the entirety of CR4
+when grabbing bits.
+
+Add a build-time assertion in to ensure VMX doesn't hand over more CR4
+bits without also updating x86.
+
+Fixes: 52ce3c21aec3 ("x86,kvm,vmx: Don't trap writes to CR4.TSD")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Message-Id: <20200703040422.31536-2-sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/kvm_cache_regs.h | 2 +-
+ arch/x86/kvm/vmx.c | 2 ++
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/kvm_cache_regs.h
++++ b/arch/x86/kvm/kvm_cache_regs.h
+@@ -5,7 +5,7 @@
+ #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
+ #define KVM_POSSIBLE_CR4_GUEST_BITS \
+ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
+- | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
++ | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
+
+ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6335,6 +6335,8 @@ static void vmx_set_constant_host_state(
+
+ static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
+ {
++ BUILD_BUG_ON(KVM_CR4_GUEST_OWNED_BITS & ~KVM_POSSIBLE_CR4_GUEST_BITS);
++
+ vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
+ if (enable_ept)
+ vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
alsa-opl3-fix-infoleak-in-opl3.patch
alsa-hda-let-hs_mic-be-picked-ahead-of-hp_mic.patch
alsa-usb-audio-add-quirk-for-macrosilicon-ms2109.patch
+kvm-arm64-fix-definition-of-page_hyp_device.patch
+kvm-arm64-stop-clobbering-x0-for-hvc_soft_restart.patch
+kvm-x86-bit-8-of-non-leaf-pdpes-is-not-reserved.patch
+kvm-x86-inject-gp-if-guest-attempts-to-toggle-cr4.la57-in-64-bit-mode.patch
+kvm-x86-mark-cr4.tsd-as-being-possibly-owned-by-the-guest.patch