--- /dev/null
+From ff1712f953e27f0b0718762ec17d0adb15c9fd0b Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Fri, 20 Nov 2020 13:57:48 +0000
+Subject: arm64: pgtable: Ensure dirty bit is preserved across pte_wrprotect()
+
+From: Will Deacon <will@kernel.org>
+
+commit ff1712f953e27f0b0718762ec17d0adb15c9fd0b upstream.
+
+With hardware dirty bit management, calling pte_wrprotect() on a writable,
+dirty PTE will lose the dirty state and return a read-only, clean entry.
+
+Move the logic from ptep_set_wrprotect() into pte_wrprotect() to ensure that
+the dirty bit is preserved for writable entries, as this is required for
+soft-dirty bit management if we enable it in the future.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 2f4b829c625e ("arm64: Add support for hardware updates of the access and dirty pte bits")
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Link: https://lore.kernel.org/r/20201120143557.6715-3-will@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/pgtable.h | 27 ++++++++++++++-------------
+ 1 file changed, 14 insertions(+), 13 deletions(-)
+
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -145,13 +145,6 @@ static inline pte_t set_pte_bit(pte_t pt
+ return pte;
+ }
+
+-static inline pte_t pte_wrprotect(pte_t pte)
+-{
+- pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
+- pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
+- return pte;
+-}
+-
+ static inline pte_t pte_mkwrite(pte_t pte)
+ {
+ pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
+@@ -177,6 +170,20 @@ static inline pte_t pte_mkdirty(pte_t pt
+ return pte;
+ }
+
++static inline pte_t pte_wrprotect(pte_t pte)
++{
++ /*
++ * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
++ * clear), set the PTE_DIRTY bit.
++ */
++ if (pte_hw_dirty(pte))
++ pte = pte_mkdirty(pte);
++
++ pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
++ pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
++ return pte;
++}
++
+ static inline pte_t pte_mkold(pte_t pte)
+ {
+ return clear_pte_bit(pte, __pgprot(PTE_AF));
+@@ -669,12 +676,6 @@ static inline void ptep_set_wrprotect(st
+ pte = READ_ONCE(*ptep);
+ do {
+ old_pte = pte;
+- /*
+- * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
+- * clear), set the PTE_DIRTY bit.
+- */
+- if (pte_hw_dirty(pte))
+- pte = pte_mkdirty(pte);
+ pte = pte_wrprotect(pte);
+ pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
+ pte_val(old_pte), pte_val(pte));
--- /dev/null
+From 07509e10dcc77627f8b6a57381e878fe269958d3 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Fri, 20 Nov 2020 13:28:01 +0000
+Subject: arm64: pgtable: Fix pte_accessible()
+
+From: Will Deacon <will@kernel.org>
+
+commit 07509e10dcc77627f8b6a57381e878fe269958d3 upstream.
+
+pte_accessible() is used by ptep_clear_flush() to figure out whether TLB
+invalidation is necessary when unmapping pages for reclaim. Although our
+implementation is correct according to the architecture, returning true
+only for valid, young ptes in the absence of racing page-table
+modifications, this is in fact flawed due to lazy invalidation of old
+ptes in ptep_clear_flush_young() where we elide the expensive DSB
+instruction for completing the TLB invalidation.
+
+Rather than penalise the aging path, adjust pte_accessible() to return
+true for any valid pte, even if the access flag is cleared.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 76c714be0e5e ("arm64: pgtable: implement pte_accessible()")
+Reported-by: Yu Zhao <yuzhao@google.com>
+Acked-by: Yu Zhao <yuzhao@google.com>
+Reviewed-by: Minchan Kim <minchan@kernel.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Link: https://lore.kernel.org/r/20201120143557.6715-2-will@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/pgtable.h | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -107,8 +107,6 @@ extern unsigned long empty_zero_page[PAG
+ #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
+ #define pte_valid_not_user(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+-#define pte_valid_young(pte) \
+- ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
+ #define pte_valid_user(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
+
+@@ -116,9 +114,12 @@ extern unsigned long empty_zero_page[PAG
+ * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
+ * so that we don't erroneously return false for pages that have been
+ * remapped as PROT_NONE but are yet to be flushed from the TLB.
++ * Note that we can't make any assumptions based on the state of the access
++ * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
++ * TLB.
+ */
+ #define pte_accessible(mm, pte) \
+- (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
++ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
+
+ /*
+ * p??_access_permitted() is true for valid user mappings (subject to the
--- /dev/null
+From 23bde34771f1ea92fb5e6682c0d8c04304d34b3b Mon Sep 17 00:00:00 2001
+From: Zenghui Yu <yuzenghui@huawei.com>
+Date: Tue, 17 Nov 2020 23:16:29 +0800
+Subject: KVM: arm64: vgic-v3: Drop the reporting of GICR_TYPER.Last for userspace
+
+From: Zenghui Yu <yuzenghui@huawei.com>
+
+commit 23bde34771f1ea92fb5e6682c0d8c04304d34b3b upstream.
+
+It was recently reported that if GICR_TYPER is accessed before the RD base
+address is set, we'll suffer from the unset @rdreg dereferencing. Oops...
+
+ gpa_t last_rdist_typer = rdreg->base + GICR_TYPER +
+ (rdreg->free_index - 1) * KVM_VGIC_V3_REDIST_SIZE;
+
+It's "expected" that users will access registers in the redistributor if
+the RD has been properly configured (e.g., the RD base address is set). But
+it hasn't yet been covered by the existing documentation.
+
+Per discussion on the list [1], the reporting of the GICR_TYPER.Last bit
+for userspace never actually worked. And it's difficult for us to emulate
+it correctly given that userspace has the flexibility to access it any
+time. Let's just drop the reporting of the Last bit for userspace for now
+(userspace should have full knowledge about it anyway) and it at least
+prevents kernel from panic ;-)
+
+[1] https://lore.kernel.org/kvmarm/c20865a267e44d1e2c0d52ce4e012263@kernel.org/
+
+Fixes: ba7b3f1275fd ("KVM: arm/arm64: Revisit Redistributor TYPER last bit computation")
+Reported-by: Keqian Zhu <zhukeqian1@huawei.com>
+Signed-off-by: Zenghui Yu <yuzenghui@huawei.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Link: https://lore.kernel.org/r/20201117151629.1738-1-yuzenghui@huawei.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/vgic/vgic-mmio-v3.c | 22 ++++++++++++++++++++--
+ 1 file changed, 20 insertions(+), 2 deletions(-)
+
+--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
++++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
+@@ -226,6 +226,23 @@ static unsigned long vgic_mmio_read_v3r_
+ return extract_bytes(value, addr & 7, len);
+ }
+
++static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
++ gpa_t addr, unsigned int len)
++{
++ unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
++ int target_vcpu_id = vcpu->vcpu_id;
++ u64 value;
++
++ value = (u64)(mpidr & GENMASK(23, 0)) << 32;
++ value |= ((target_vcpu_id & 0xffff) << 8);
++
++ if (vgic_has_its(vcpu->kvm))
++ value |= GICR_TYPER_PLPIS;
++
++ /* reporting of the Last bit is not supported for userspace */
++ return extract_bytes(value, addr & 7, len);
++}
++
+ static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+ {
+@@ -532,8 +549,9 @@ static const struct vgic_register_region
+ REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
+ vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
+ VGIC_ACCESS_32bit),
+- REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
+- vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
++ REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
++ vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
++ vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8,
+ VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
+ vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
--- /dev/null
+From 71cc849b7093bb83af966c0e60cb11b7f35cd746 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 27 Nov 2020 09:18:20 +0100
+Subject: KVM: x86: Fix split-irqchip vs interrupt injection window request
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 71cc849b7093bb83af966c0e60cb11b7f35cd746 upstream.
+
+kvm_cpu_accept_dm_intr and kvm_vcpu_ready_for_interrupt_injection are
+a hodge-podge of conditions, hacked together to get something that
+more or less works. But what is actually needed is much simpler;
+in both cases the fundamental question is, do we have a place to stash
+an interrupt if userspace does KVM_INTERRUPT?
+
+In userspace irqchip mode, that is !vcpu->arch.interrupt.injected.
+Currently kvm_event_needs_reinjection(vcpu) covers it, but it is
+unnecessarily restrictive.
+
+In split irqchip mode it's a bit more complicated, we need to check
+kvm_apic_accept_pic_intr(vcpu) (the IRQ window exit is basically an INTACK
+cycle and thus requires ExtINTs not to be masked) as well as
+!pending_userspace_extint(vcpu). However, there is no need to
+check kvm_event_needs_reinjection(vcpu), since split irqchip keeps
+pending ExtINT state separate from event injection state, and checking
+kvm_cpu_has_interrupt(vcpu) is wrong too since ExtINT has higher
+priority than APIC interrupts. In fact the latter fixes a bug:
+when userspace requests an IRQ window vmexit, an interrupt in the
+local APIC can cause kvm_cpu_has_interrupt() to be true and thus
+kvm_vcpu_ready_for_interrupt_injection() to return false. When this
+happens, vcpu_run does not exit to userspace but the interrupt window
+vmexits keep occurring. The VM loops without any hope of making progress.
+
+Once we try to fix these with something like
+
+ return kvm_arch_interrupt_allowed(vcpu) &&
+- !kvm_cpu_has_interrupt(vcpu) &&
+- !kvm_event_needs_reinjection(vcpu) &&
+- kvm_cpu_accept_dm_intr(vcpu);
++ (!lapic_in_kernel(vcpu)
++ ? !vcpu->arch.interrupt.injected
++ : (kvm_apic_accept_pic_intr(vcpu)
++ && !pending_userspace_extint(v)));
+
+we realize two things. First, thanks to the previous patch the complex
+conditional can reuse !kvm_cpu_has_extint(vcpu). Second, the interrupt
+window request in vcpu_enter_guest()
+
+ bool req_int_win =
+ dm_request_for_irq_injection(vcpu) &&
+ kvm_cpu_accept_dm_intr(vcpu);
+
+should be kept in sync with kvm_vcpu_ready_for_interrupt_injection():
+it is unnecessary to ask the processor for an interrupt window
+if we would not be able to return to userspace. Therefore,
+kvm_cpu_accept_dm_intr(vcpu) is basically !kvm_cpu_has_extint(vcpu)
+ANDed with the existing check for masked ExtINT. It all makes sense:
+
+- we can accept an interrupt from userspace if there is a place
+ to stash it (and, for irqchip split, ExtINTs are not masked).
+ Interrupts from userspace _can_ be accepted even if right now
+ EFLAGS.IF=0.
+
+- in order to tell userspace we will inject its interrupt ("IRQ
+ window open" i.e. kvm_vcpu_ready_for_interrupt_injection), both
+ KVM and the vCPU need to be ready to accept the interrupt.
+
+... and this is what the patch implements.
+
+Reported-by: David Woodhouse <dwmw@amazon.co.uk>
+Analyzed-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Nikos Tsironis <ntsironis@arrikto.com>
+Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
+Tested-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/irq.c | 2 +-
+ arch/x86/kvm/x86.c | 18 ++++++++++--------
+ 3 files changed, 12 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1472,6 +1472,7 @@ int kvm_test_age_hva(struct kvm *kvm, un
+ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
+ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
++int kvm_cpu_has_extint(struct kvm_vcpu *v);
+ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
+ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
+--- a/arch/x86/kvm/irq.c
++++ b/arch/x86/kvm/irq.c
+@@ -52,7 +52,7 @@ static int pending_userspace_extint(stru
+ * check if there is pending interrupt from
+ * non-APIC source without intack.
+ */
+-static int kvm_cpu_has_extint(struct kvm_vcpu *v)
++int kvm_cpu_has_extint(struct kvm_vcpu *v)
+ {
+ /*
+ * FIXME: interrupt.injected represents an interrupt whose
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3351,21 +3351,23 @@ static int kvm_vcpu_ioctl_set_lapic(stru
+
+ static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
+ {
++ /*
++ * We can accept userspace's request for interrupt injection
++ * as long as we have a place to store the interrupt number.
++ * The actual injection will happen when the CPU is able to
++ * deliver the interrupt.
++ */
++ if (kvm_cpu_has_extint(vcpu))
++ return false;
++
++ /* Acknowledging ExtINT does not happen if LINT0 is masked. */
+ return (!lapic_in_kernel(vcpu) ||
+ kvm_apic_accept_pic_intr(vcpu));
+ }
+
+-/*
+- * if userspace requested an interrupt window, check that the
+- * interrupt window is open.
+- *
+- * No need to exit to userspace if we already have an interrupt queued.
+- */
+ static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
+ {
+ return kvm_arch_interrupt_allowed(vcpu) &&
+- !kvm_cpu_has_interrupt(vcpu) &&
+- !kvm_event_needs_reinjection(vcpu) &&
+ kvm_cpu_accept_dm_intr(vcpu);
+ }
+
--- /dev/null
+From 72c3bcdcda494cbd600712a32e67702cdee60c07 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 27 Nov 2020 08:53:52 +0100
+Subject: KVM: x86: handle !lapic_in_kernel case in kvm_cpu_*_extint
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 72c3bcdcda494cbd600712a32e67702cdee60c07 upstream.
+
+Centralize handling of interrupts from the userspace APIC
+in kvm_cpu_has_extint and kvm_cpu_get_extint, since
+userspace APIC interrupts are handled more or less the
+same as ExtINTs are with split irqchip. This removes
+duplicated code from kvm_cpu_has_injectable_intr and
+kvm_cpu_has_interrupt, and makes the code more similar
+between kvm_cpu_has_{extint,interrupt} on one side
+and kvm_cpu_get_{extint,interrupt} on the other.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Filippo Sironi <sironi@amazon.de>
+Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
+Tested-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/irq.c | 83 ++++++++++++++++++++-------------------------------
+ arch/x86/kvm/lapic.c | 2 -
+ 2 files changed, 34 insertions(+), 51 deletions(-)
+
+--- a/arch/x86/kvm/irq.c
++++ b/arch/x86/kvm/irq.c
+@@ -54,27 +54,8 @@ static int pending_userspace_extint(stru
+ */
+ static int kvm_cpu_has_extint(struct kvm_vcpu *v)
+ {
+- u8 accept = kvm_apic_accept_pic_intr(v);
+-
+- if (accept) {
+- if (irqchip_split(v->kvm))
+- return pending_userspace_extint(v);
+- else
+- return v->kvm->arch.vpic->output;
+- } else
+- return 0;
+-}
+-
+-/*
+- * check if there is injectable interrupt:
+- * when virtual interrupt delivery enabled,
+- * interrupt from apic will handled by hardware,
+- * we don't need to check it here.
+- */
+-int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
+-{
+ /*
+- * FIXME: interrupt.injected represents an interrupt that it's
++ * FIXME: interrupt.injected represents an interrupt whose
+ * side-effects have already been applied (e.g. bit from IRR
+ * already moved to ISR). Therefore, it is incorrect to rely
+ * on interrupt.injected to know if there is a pending
+@@ -87,6 +68,23 @@ int kvm_cpu_has_injectable_intr(struct k
+ if (!lapic_in_kernel(v))
+ return v->arch.interrupt.injected;
+
++ if (!kvm_apic_accept_pic_intr(v))
++ return 0;
++
++ if (irqchip_split(v->kvm))
++ return pending_userspace_extint(v);
++ else
++ return v->kvm->arch.vpic->output;
++}
++
++/*
++ * check if there is injectable interrupt:
++ * when virtual interrupt delivery enabled,
++ * interrupt from apic will handled by hardware,
++ * we don't need to check it here.
++ */
++int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
++{
+ if (kvm_cpu_has_extint(v))
+ return 1;
+
+@@ -102,20 +100,6 @@ int kvm_cpu_has_injectable_intr(struct k
+ */
+ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
+ {
+- /*
+- * FIXME: interrupt.injected represents an interrupt that it's
+- * side-effects have already been applied (e.g. bit from IRR
+- * already moved to ISR). Therefore, it is incorrect to rely
+- * on interrupt.injected to know if there is a pending
+- * interrupt in the user-mode LAPIC.
+- * This leads to nVMX/nSVM not be able to distinguish
+- * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
+- * pending interrupt or should re-inject an injected
+- * interrupt.
+- */
+- if (!lapic_in_kernel(v))
+- return v->arch.interrupt.injected;
+-
+ if (kvm_cpu_has_extint(v))
+ return 1;
+
+@@ -129,16 +113,21 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt)
+ */
+ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
+ {
+- if (kvm_cpu_has_extint(v)) {
+- if (irqchip_split(v->kvm)) {
+- int vector = v->arch.pending_external_vector;
+-
+- v->arch.pending_external_vector = -1;
+- return vector;
+- } else
+- return kvm_pic_read_irq(v->kvm); /* PIC */
+- } else
++ if (!kvm_cpu_has_extint(v)) {
++ WARN_ON(!lapic_in_kernel(v));
+ return -1;
++ }
++
++ if (!lapic_in_kernel(v))
++ return v->arch.interrupt.nr;
++
++ if (irqchip_split(v->kvm)) {
++ int vector = v->arch.pending_external_vector;
++
++ v->arch.pending_external_vector = -1;
++ return vector;
++ } else
++ return kvm_pic_read_irq(v->kvm); /* PIC */
+ }
+
+ /*
+@@ -146,13 +135,7 @@ static int kvm_cpu_get_extint(struct kvm
+ */
+ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
+ {
+- int vector;
+-
+- if (!lapic_in_kernel(v))
+- return v->arch.interrupt.nr;
+-
+- vector = kvm_cpu_get_extint(v);
+-
++ int vector = kvm_cpu_get_extint(v);
+ if (vector != -1)
+ return vector; /* PIC */
+
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2284,7 +2284,7 @@ int kvm_apic_has_interrupt(struct kvm_vc
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ u32 ppr;
+
+- if (!kvm_apic_hw_enabled(apic))
++ if (!kvm_apic_present(vcpu))
+ return -1;
+
+ __apic_update_ppr(apic, &ppr);
btrfs-don-t-access-possibly-stale-fs_info-data-for-printing-duplicate-device.patch
btrfs-fix-lockdep-splat-when-reading-qgroup-config-on-mount.patch
wireless-use-linux-stddef.h-instead-of-stddef.h.patch
+kvm-arm64-vgic-v3-drop-the-reporting-of-gicr_typer.last-for-userspace.patch
+kvm-x86-handle-lapic_in_kernel-case-in-kvm_cpu_-_extint.patch
+kvm-x86-fix-split-irqchip-vs-interrupt-injection-window-request.patch
+arm64-pgtable-fix-pte_accessible.patch
+arm64-pgtable-ensure-dirty-bit-is-preserved-across-pte_wrprotect.patch