]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 23 Jul 2023 20:42:39 +0000 (22:42 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 23 Jul 2023 20:42:39 +0000 (22:42 +0200)
added patches:
ext4-correct-inline-offset-when-handling-xattrs-in-inode-body.patch
kvm-arm64-correctly-handle-page-aging-notifiers-for-unaligned-memslot.patch
kvm-arm64-disable-preemption-in-kvm_arch_hardware_enable.patch
kvm-arm64-timers-use-cnthctl_el2-when-setting-non-cntkctl_el1-bits.patch
kvm-arm64-vgic-v4-make-the-doorbell-request-robust-w.r.t-preemption.patch

queue-6.4/ext4-correct-inline-offset-when-handling-xattrs-in-inode-body.patch [new file with mode: 0644]
queue-6.4/kvm-arm64-correctly-handle-page-aging-notifiers-for-unaligned-memslot.patch [new file with mode: 0644]
queue-6.4/kvm-arm64-disable-preemption-in-kvm_arch_hardware_enable.patch [new file with mode: 0644]
queue-6.4/kvm-arm64-timers-use-cnthctl_el2-when-setting-non-cntkctl_el1-bits.patch [new file with mode: 0644]
queue-6.4/kvm-arm64-vgic-v4-make-the-doorbell-request-robust-w.r.t-preemption.patch [new file with mode: 0644]
queue-6.4/series

diff --git a/queue-6.4/ext4-correct-inline-offset-when-handling-xattrs-in-inode-body.patch b/queue-6.4/ext4-correct-inline-offset-when-handling-xattrs-in-inode-body.patch
new file mode 100644 (file)
index 0000000..da8e336
--- /dev/null
@@ -0,0 +1,54 @@
+From 6909cf5c4101214f4305a62d582a5b93c7e1eb9a Mon Sep 17 00:00:00 2001
+From: Eric Whitney <enwlinux@gmail.com>
+Date: Mon, 22 May 2023 14:15:20 -0400
+Subject: ext4: correct inline offset when handling xattrs in inode body
+
+From: Eric Whitney <enwlinux@gmail.com>
+
+commit 6909cf5c4101214f4305a62d582a5b93c7e1eb9a upstream.
+
+When run on a file system where the inline_data feature has been
+enabled, xfstests generic/269, generic/270, and generic/476 cause ext4
+to emit error messages indicating that inline directory entries are
+corrupted.  This occurs because the inline offset used to locate
+inline directory entries in the inode body is not updated when an
+xattr in that shared region is deleted and the region is shifted in
+memory to recover the space it occupied.  If the deleted xattr precedes
+the system.data attribute, which points to the inline directory entries,
+that attribute will be moved further up in the region.  The inline
+offset continues to point to whatever is located in system.data's former
+location, with unfortunate effects when used to access directory entries
+or (presumably) inline data in the inode body.
+
+Cc: stable@kernel.org
+Signed-off-by: Eric Whitney <enwlinux@gmail.com>
+Link: https://lore.kernel.org/r/20230522181520.1570360-1-enwlinux@gmail.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/xattr.c |   14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1782,6 +1782,20 @@ static int ext4_xattr_set_entry(struct e
+               memmove(here, (void *)here + size,
+                       (void *)last - (void *)here + sizeof(__u32));
+               memset(last, 0, size);
++
++              /*
++               * Update i_inline_off - moved ibody region might contain
++               * system.data attribute.  Handling a failure here won't
++               * cause other complications for setting an xattr.
++               */
++              if (!is_block && ext4_has_inline_data(inode)) {
++                      ret = ext4_find_inline_data_nolock(inode);
++                      if (ret) {
++                              ext4_warning_inode(inode,
++                                      "unable to update i_inline_off");
++                              goto out;
++                      }
++              }
+       } else if (s->not_found) {
+               /* Insert new name. */
+               size_t size = EXT4_XATTR_LEN(name_len);
diff --git a/queue-6.4/kvm-arm64-correctly-handle-page-aging-notifiers-for-unaligned-memslot.patch b/queue-6.4/kvm-arm64-correctly-handle-page-aging-notifiers-for-unaligned-memslot.patch
new file mode 100644 (file)
index 0000000..67ebb0a
--- /dev/null
@@ -0,0 +1,204 @@
+From df6556adf27b7372cfcd97e1c0afb0d516c8279f Mon Sep 17 00:00:00 2001
+From: Oliver Upton <oliver.upton@linux.dev>
+Date: Tue, 27 Jun 2023 23:54:05 +0000
+Subject: KVM: arm64: Correctly handle page aging notifiers for unaligned memslot
+
+From: Oliver Upton <oliver.upton@linux.dev>
+
+commit df6556adf27b7372cfcd97e1c0afb0d516c8279f upstream.
+
+Userspace is allowed to select any PAGE_SIZE aligned hva to back guest
+memory. This is even the case with hugepages, although it is a rather
+suboptimal configuration as PTE level mappings are used at stage-2.
+
+The arm64 page aging handlers have an assumption that the specified
+range is exactly one page/block of memory, which in the aforementioned
+case is not necessarily true. All together this leads to the WARN() in
+kvm_age_gfn() firing.
+
+However, the WARN is only part of the issue as the table walkers visit
+at most a single leaf PTE. For hugepage-backed memory in a memslot that
+isn't hugepage-aligned, page aging entirely misses accesses to the
+hugepage beyond the first page in the memslot.
+
+Add a new walker dedicated to handling page aging MMU notifiers capable
+of walking a range of PTEs. Convert kvm(_test)_age_gfn() over to the new
+walker and drop the WARN that caught the issue in the first place. The
+implementation of this walker was inspired by the test_clear_young()
+implementation by Yu Zhao [*], but repurposed to address a bug in the
+existing aging implementation.
+
+Cc: stable@vger.kernel.org # v5.15
+Fixes: 056aad67f836 ("kvm: arm/arm64: Rework gpa callback handlers")
+Link: https://lore.kernel.org/kvmarm/20230526234435.662652-6-yuzhao@google.com/
+Co-developed-by: Yu Zhao <yuzhao@google.com>
+Signed-off-by: Yu Zhao <yuzhao@google.com>
+Reported-by: Reiji Watanabe <reijiw@google.com>
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Shaoqin Huang <shahuang@redhat.com>
+Link: https://lore.kernel.org/r/20230627235405.4069823-1-oliver.upton@linux.dev
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_pgtable.h |   26 ++++++-------------
+ arch/arm64/kvm/hyp/pgtable.c         |   47 ++++++++++++++++++++++++++++-------
+ arch/arm64/kvm/mmu.c                 |   18 +++++--------
+ 3 files changed, 55 insertions(+), 36 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_pgtable.h
++++ b/arch/arm64/include/asm/kvm_pgtable.h
+@@ -556,22 +556,26 @@ int kvm_pgtable_stage2_wrprotect(struct
+ kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
+ /**
+- * kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry.
++ * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
++ *                                       flag in a page-table entry.
+  * @pgt:      Page-table structure initialised by kvm_pgtable_stage2_init*().
+  * @addr:     Intermediate physical address to identify the page-table entry.
++ * @size:     Size of the address range to visit.
++ * @mkold:    True if the access flag should be cleared.
+  *
+  * The offset of @addr within a page is ignored.
+  *
+- * If there is a valid, leaf page-table entry used to translate @addr, then
+- * clear the access flag in that entry.
++ * Tests and conditionally clears the access flag for every valid, leaf
++ * page-table entry used to translate the range [@addr, @addr + @size).
+  *
+  * Note that it is the caller's responsibility to invalidate the TLB after
+  * calling this function to ensure that the updated permissions are visible
+  * to the CPUs.
+  *
+- * Return: The old page-table entry prior to clearing the flag, 0 on failure.
++ * Return: True if any of the visited PTEs had the access flag set.
+  */
+-kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
++bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
++                                       u64 size, bool mkold);
+ /**
+  * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
+@@ -594,18 +598,6 @@ int kvm_pgtable_stage2_relax_perms(struc
+                                  enum kvm_pgtable_prot prot);
+ /**
+- * kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the
+- *                               access flag set.
+- * @pgt:      Page-table structure initialised by kvm_pgtable_stage2_init*().
+- * @addr:     Intermediate physical address to identify the page-table entry.
+- *
+- * The offset of @addr within a page is ignored.
+- *
+- * Return: True if the page-table entry has the access flag set, false otherwise.
+- */
+-bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr);
+-
+-/**
+  * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
+  *                                  of Coherency for guest stage-2 address
+  *                                  range.
+--- a/arch/arm64/kvm/hyp/pgtable.c
++++ b/arch/arm64/kvm/hyp/pgtable.c
+@@ -1173,25 +1173,54 @@ kvm_pte_t kvm_pgtable_stage2_mkyoung(str
+       return pte;
+ }
+-kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
++struct stage2_age_data {
++      bool    mkold;
++      bool    young;
++};
++
++static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx,
++                           enum kvm_pgtable_walk_flags visit)
+ {
+-      kvm_pte_t pte = 0;
+-      stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF,
+-                               &pte, NULL, 0);
++      kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF;
++      struct stage2_age_data *data = ctx->arg;
++
++      if (!kvm_pte_valid(ctx->old) || new == ctx->old)
++              return 0;
++
++      data->young = true;
++
++      /*
++       * stage2_age_walker() is always called while holding the MMU lock for
++       * write, so this will always succeed. Nonetheless, this deliberately
++       * follows the race detection pattern of the other stage-2 walkers in
++       * case the locking mechanics of the MMU notifiers is ever changed.
++       */
++      if (data->mkold && !stage2_try_set_pte(ctx, new))
++              return -EAGAIN;
++
+       /*
+        * "But where's the TLBI?!", you scream.
+        * "Over in the core code", I sigh.
+        *
+        * See the '->clear_flush_young()' callback on the KVM mmu notifier.
+        */
+-      return pte;
++      return 0;
+ }
+-bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr)
++bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
++                                       u64 size, bool mkold)
+ {
+-      kvm_pte_t pte = 0;
+-      stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL, 0);
+-      return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF;
++      struct stage2_age_data data = {
++              .mkold          = mkold,
++      };
++      struct kvm_pgtable_walker walker = {
++              .cb             = stage2_age_walker,
++              .arg            = &data,
++              .flags          = KVM_PGTABLE_WALK_LEAF,
++      };
++
++      WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
++      return data.young;
+ }
+ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
+--- a/arch/arm64/kvm/mmu.c
++++ b/arch/arm64/kvm/mmu.c
+@@ -1639,27 +1639,25 @@ bool kvm_set_spte_gfn(struct kvm *kvm, s
+ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+ {
+       u64 size = (range->end - range->start) << PAGE_SHIFT;
+-      kvm_pte_t kpte;
+-      pte_t pte;
+       if (!kvm->arch.mmu.pgt)
+               return false;
+-      WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
+-
+-      kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt,
+-                                      range->start << PAGE_SHIFT);
+-      pte = __pte(kpte);
+-      return pte_valid(pte) && pte_young(pte);
++      return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
++                                                 range->start << PAGE_SHIFT,
++                                                 size, true);
+ }
+ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+ {
++      u64 size = (range->end - range->start) << PAGE_SHIFT;
++
+       if (!kvm->arch.mmu.pgt)
+               return false;
+-      return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
+-                                         range->start << PAGE_SHIFT);
++      return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
++                                                 range->start << PAGE_SHIFT,
++                                                 size, false);
+ }
+ phys_addr_t kvm_mmu_get_httbr(void)
diff --git a/queue-6.4/kvm-arm64-disable-preemption-in-kvm_arch_hardware_enable.patch b/queue-6.4/kvm-arm64-disable-preemption-in-kvm_arch_hardware_enable.patch
new file mode 100644 (file)
index 0000000..893eef4
--- /dev/null
@@ -0,0 +1,66 @@
+From 970dee09b230895fe2230d2b32ad05a2826818c6 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Mon, 3 Jul 2023 17:35:48 +0100
+Subject: KVM: arm64: Disable preemption in kvm_arch_hardware_enable()
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 970dee09b230895fe2230d2b32ad05a2826818c6 upstream.
+
+Since 0bf50497f03b ("KVM: Drop kvm_count_lock and instead protect
+kvm_usage_count with kvm_lock"), hotplugging back a CPU whilst
+a guest is running results in a number of ugly splats as most
+of this code expects to run with preemption disabled, which isn't
+the case anymore.
+
+While the context is preemptable, it isn't migratable, which should
+be enough. But we have plenty of preemptible() checks all over
+the place, and our per-CPU accessors also disable preemption.
+
+Since this affects released versions, let's do the easy fix first,
+disabling preemption in kvm_arch_hardware_enable(). We can always
+revisit this with a more invasive fix in the future.
+
+Fixes: 0bf50497f03b ("KVM: Drop kvm_count_lock and instead protect kvm_usage_count with kvm_lock")
+Reported-by: Kristina Martsenko <kristina.martsenko@arm.com>
+Tested-by: Kristina Martsenko <kristina.martsenko@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/aeab7562-2d39-e78e-93b1-4711f8cc3fa5@arm.com
+Cc: stable@vger.kernel.org # v6.3, v6.4
+Link: https://lore.kernel.org/r/20230703163548.1498943-1-maz@kernel.org
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/arm.c |   13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -1793,8 +1793,17 @@ static void _kvm_arch_hardware_enable(vo
+ int kvm_arch_hardware_enable(void)
+ {
+-      int was_enabled = __this_cpu_read(kvm_arm_hardware_enabled);
++      int was_enabled;
++      /*
++       * Most calls to this function are made with migration
++       * disabled, but not with preemption disabled. The former is
++       * enough to ensure correctness, but most of the helpers
++       * expect the later and will throw a tantrum otherwise.
++       */
++      preempt_disable();
++
++      was_enabled = __this_cpu_read(kvm_arm_hardware_enabled);
+       _kvm_arch_hardware_enable(NULL);
+       if (!was_enabled) {
+@@ -1802,6 +1811,8 @@ int kvm_arch_hardware_enable(void)
+               kvm_timer_cpu_up();
+       }
++      preempt_enable();
++
+       return 0;
+ }
diff --git a/queue-6.4/kvm-arm64-timers-use-cnthctl_el2-when-setting-non-cntkctl_el1-bits.patch b/queue-6.4/kvm-arm64-timers-use-cnthctl_el2-when-setting-non-cntkctl_el1-bits.patch
new file mode 100644 (file)
index 0000000..5184db3
--- /dev/null
@@ -0,0 +1,65 @@
+From fe769e6c1f80f542d6f4e7f7c8c6bf20c1307f99 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Tue, 27 Jun 2023 15:05:57 +0100
+Subject: KVM: arm64: timers: Use CNTHCTL_EL2 when setting non-CNTKCTL_EL1 bits
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit fe769e6c1f80f542d6f4e7f7c8c6bf20c1307f99 upstream.
+
+It recently appeared that, when running VHE, there is a notable
+difference between using CNTKCTL_EL1 and CNTHCTL_EL2, despite what
+the architecture documents:
+
+- When accessed from EL2, bits [19:18] and [16:10] of CNTKCTL_EL1 have
+  the same assignment as CNTHCTL_EL2
+- When accessed from EL1, bits [19:18] and [16:10] are RES0
+
+It is all OK, until you factor in NV, where the EL2 guest runs at EL1.
+In this configuration, CNTKCTL_EL11 doesn't trap, nor ends up in
+the VNCR page. This means that any write from the guest affecting
+CNTHCTL_EL2 using CNTKCTL_EL1 ends up losing some state. Not good.
+
+The fix it obvious: don't use CNTKCTL_EL1 if you want to change bits
+that are not part of the EL1 definition of CNTKCTL_EL1, and use
+CNTHCTL_EL2 instead. This doesn't change anything for a bare-metal OS,
+and fixes it when running under NV. The NV hypervisor will itself
+have to work harder to merge the two accessors.
+
+Note that there is a pending update to the architecture to address
+this issue by making the affected bits UNKNOWN when CNTKCTL_EL1 is
+used from EL2 with VHE enabled.
+
+Fixes: c605ee245097 ("KVM: arm64: timers: Allow physical offset without CNTPOFF_EL2")
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org # v6.4
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Link: https://lore.kernel.org/r/20230627140557.544885-1-maz@kernel.org
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/arch_timer.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kvm/arch_timer.c
++++ b/arch/arm64/kvm/arch_timer.c
+@@ -827,8 +827,8 @@ static void timer_set_traps(struct kvm_v
+       assign_clear_set_bit(tpt, CNTHCTL_EL1PCEN << 10, set, clr);
+       assign_clear_set_bit(tpc, CNTHCTL_EL1PCTEN << 10, set, clr);
+-      /* This only happens on VHE, so use the CNTKCTL_EL1 accessor */
+-      sysreg_clear_set(cntkctl_el1, clr, set);
++      /* This only happens on VHE, so use the CNTHCTL_EL2 accessor. */
++      sysreg_clear_set(cnthctl_el2, clr, set);
+ }
+ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
+@@ -1559,7 +1559,7 @@ no_vgic:
+ void kvm_timer_init_vhe(void)
+ {
+       if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF))
+-              sysreg_clear_set(cntkctl_el1, 0, CNTHCTL_ECV);
++              sysreg_clear_set(cnthctl_el2, 0, CNTHCTL_ECV);
+ }
+ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
diff --git a/queue-6.4/kvm-arm64-vgic-v4-make-the-doorbell-request-robust-w.r.t-preemption.patch b/queue-6.4/kvm-arm64-vgic-v4-make-the-doorbell-request-robust-w.r.t-preemption.patch
new file mode 100644 (file)
index 0000000..21aed15
--- /dev/null
@@ -0,0 +1,134 @@
+From b321c31c9b7b309dcde5e8854b741c8e6a9a05f0 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 13 Jul 2023 08:06:57 +0100
+Subject: KVM: arm64: vgic-v4: Make the doorbell request robust w.r.t preemption
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit b321c31c9b7b309dcde5e8854b741c8e6a9a05f0 upstream.
+
+Xiang reports that VMs occasionally fail to boot on GICv4.1 systems when
+running a preemptible kernel, as it is possible that a vCPU is blocked
+without requesting a doorbell interrupt.
+
+The issue is that any preemption that occurs between vgic_v4_put() and
+schedule() on the block path will mark the vPE as nonresident and *not*
+request a doorbell irq. This occurs because when the vcpu thread is
+resumed on its way to block, vcpu_load() will make the vPE resident
+again. Once the vcpu actually blocks, we don't request a doorbell
+anymore, and the vcpu won't be woken up on interrupt delivery.
+
+Fix it by tracking that we're entering WFI, and key the doorbell
+request on that flag. This allows us not to make the vPE resident
+when going through a preempt/schedule cycle, meaning we don't lose
+any state.
+
+Cc: stable@vger.kernel.org
+Fixes: 8e01d9a396e6 ("KVM: arm64: vgic-v4: Move the GICv4 residency flow to be driven by vcpu_load/put")
+Reported-by: Xiang Chen <chenxiang66@hisilicon.com>
+Suggested-by: Zenghui Yu <yuzenghui@huawei.com>
+Tested-by: Xiang Chen <chenxiang66@hisilicon.com>
+Co-developed-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Acked-by: Zenghui Yu <yuzenghui@huawei.com>
+Link: https://lore.kernel.org/r/20230713070657.3873244-1-maz@kernel.org
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h |    2 ++
+ arch/arm64/kvm/arm.c              |    6 ++++--
+ arch/arm64/kvm/vgic/vgic-v3.c     |    2 +-
+ arch/arm64/kvm/vgic/vgic-v4.c     |    7 +++++--
+ include/kvm/arm_vgic.h            |    2 +-
+ 5 files changed, 13 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -701,6 +701,8 @@ struct kvm_vcpu_arch {
+ #define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5))
+ /* PMUSERENR for the guest EL0 is on physical CPU */
+ #define PMUSERENR_ON_CPU      __vcpu_single_flag(sflags, BIT(6))
++/* WFI instruction trapped */
++#define IN_WFI                        __vcpu_single_flag(sflags, BIT(7))
+ /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -704,13 +704,15 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
+        */
+       preempt_disable();
+       kvm_vgic_vmcr_sync(vcpu);
+-      vgic_v4_put(vcpu, true);
++      vcpu_set_flag(vcpu, IN_WFI);
++      vgic_v4_put(vcpu);
+       preempt_enable();
+       kvm_vcpu_halt(vcpu);
+       vcpu_clear_flag(vcpu, IN_WFIT);
+       preempt_disable();
++      vcpu_clear_flag(vcpu, IN_WFI);
+       vgic_v4_load(vcpu);
+       preempt_enable();
+ }
+@@ -778,7 +780,7 @@ static int check_vcpu_requests(struct kv
+               if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
+                       /* The distributor enable bits were changed */
+                       preempt_disable();
+-                      vgic_v4_put(vcpu, false);
++                      vgic_v4_put(vcpu);
+                       vgic_v4_load(vcpu);
+                       preempt_enable();
+               }
+--- a/arch/arm64/kvm/vgic/vgic-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-v3.c
+@@ -749,7 +749,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
+ {
+       struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+-      WARN_ON(vgic_v4_put(vcpu, false));
++      WARN_ON(vgic_v4_put(vcpu));
+       vgic_v3_vmcr_sync(vcpu);
+--- a/arch/arm64/kvm/vgic/vgic-v4.c
++++ b/arch/arm64/kvm/vgic/vgic-v4.c
+@@ -336,14 +336,14 @@ void vgic_v4_teardown(struct kvm *kvm)
+       its_vm->vpes = NULL;
+ }
+-int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
++int vgic_v4_put(struct kvm_vcpu *vcpu)
+ {
+       struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+       if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
+               return 0;
+-      return its_make_vpe_non_resident(vpe, need_db);
++      return its_make_vpe_non_resident(vpe, !!vcpu_get_flag(vcpu, IN_WFI));
+ }
+ int vgic_v4_load(struct kvm_vcpu *vcpu)
+@@ -354,6 +354,9 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
+       if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
+               return 0;
++      if (vcpu_get_flag(vcpu, IN_WFI))
++              return 0;
++
+       /*
+        * Before making the VPE resident, make sure the redistributor
+        * corresponding to our current CPU expects us here. See the
+--- a/include/kvm/arm_vgic.h
++++ b/include/kvm/arm_vgic.h
+@@ -431,7 +431,7 @@ int kvm_vgic_v4_unset_forwarding(struct
+ int vgic_v4_load(struct kvm_vcpu *vcpu);
+ void vgic_v4_commit(struct kvm_vcpu *vcpu);
+-int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
++int vgic_v4_put(struct kvm_vcpu *vcpu);
+ /* CPU HP callbacks */
+ void kvm_vgic_cpu_up(void);
index f480c0c67476244ec1dc46cd28705607687a40cf..21e8a73caca0d2bea62ed675800cf3d96ead5480 100644 (file)
@@ -69,3 +69,8 @@ asoc-codecs-wcd938x-fix-missing-mbhc-init-error-handling.patch
 asoc-codecs-wcd934x-fix-resource-leaks-on-component-remove.patch
 asoc-codecs-wcd938x-fix-codec-initialisation-race.patch
 asoc-codecs-wcd938x-fix-soundwire-initialisation-race.patch
+kvm-arm64-timers-use-cnthctl_el2-when-setting-non-cntkctl_el1-bits.patch
+kvm-arm64-correctly-handle-page-aging-notifiers-for-unaligned-memslot.patch
+kvm-arm64-disable-preemption-in-kvm_arch_hardware_enable.patch
+kvm-arm64-vgic-v4-make-the-doorbell-request-robust-w.r.t-preemption.patch
+ext4-correct-inline-offset-when-handling-xattrs-in-inode-body.patch