]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Apr 2023 08:57:17 +0000 (10:57 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Apr 2023 08:57:17 +0000 (10:57 +0200)
added patches:
kvm-arm64-disable-interrupts-while-walking-userspace-pts.patch
kvm-arm64-pmu-fix-get_one_reg-for-vpmc-regs-to-return-the-current-value.patch

queue-5.15/kvm-arm64-disable-interrupts-while-walking-userspace-pts.patch [new file with mode: 0644]
queue-5.15/kvm-arm64-pmu-fix-get_one_reg-for-vpmc-regs-to-return-the-current-value.patch [new file with mode: 0644]
queue-5.15/series

diff --git a/queue-5.15/kvm-arm64-disable-interrupts-while-walking-userspace-pts.patch b/queue-5.15/kvm-arm64-disable-interrupts-while-walking-userspace-pts.patch
new file mode 100644 (file)
index 0000000..686d145
--- /dev/null
@@ -0,0 +1,119 @@
+From e86fc1a3a3e9b4850fe74d738e3cfcf4297d8bba Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 16 Mar 2023 17:45:45 +0000
+Subject: KVM: arm64: Disable interrupts while walking userspace PTs
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit e86fc1a3a3e9b4850fe74d738e3cfcf4297d8bba upstream.
+
+We walk the userspace PTs to discover what mapping size was
+used there. However, this can race against the userspace tables
+being freed, and we end-up in the weeds.
+
+Thankfully, the mm code is being generous and will IPI us when
+doing so. So let's implement our part of the bargain and disable
+interrupts around the walk. This ensures that nothing terrible
+happens during that time.
+
+We still need to handle the removal of the page tables before
+the walk. For that, allow get_user_mapping_size() to return an
+error, and make sure this error can be propagated all the way
+to the the exit handler.
+
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230316174546.3777507-2-maz@kernel.org
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/mmu.c |   45 ++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 38 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/kvm/mmu.c
++++ b/arch/arm64/kvm/mmu.c
+@@ -468,14 +468,33 @@ static int get_user_mapping_size(struct
+                                  CONFIG_PGTABLE_LEVELS),
+               .mm_ops         = &kvm_user_mm_ops,
+       };
++      unsigned long flags;
+       kvm_pte_t pte = 0;      /* Keep GCC quiet... */
+       u32 level = ~0;
+       int ret;
++      /*
++       * Disable IRQs so that we hazard against a concurrent
++       * teardown of the userspace page tables (which relies on
++       * IPI-ing threads).
++       */
++      local_irq_save(flags);
+       ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
+-      VM_BUG_ON(ret);
+-      VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS);
+-      VM_BUG_ON(!(pte & PTE_VALID));
++      local_irq_restore(flags);
++
++      if (ret)
++              return ret;
++
++      /*
++       * Not seeing an error, but not updating level? Something went
++       * deeply wrong...
++       */
++      if (WARN_ON(level >= KVM_PGTABLE_MAX_LEVELS))
++              return -EFAULT;
++
++      /* Oops, the userspace PTs are gone... Replay the fault */
++      if (!kvm_pte_valid(pte))
++              return -EAGAIN;
+       return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
+ }
+@@ -826,7 +845,7 @@ static bool fault_supports_stage2_huge_m
+  *
+  * Returns the size of the mapping.
+  */
+-static unsigned long
++static long
+ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
+                           unsigned long hva, kvm_pfn_t *pfnp,
+                           phys_addr_t *ipap)
+@@ -838,8 +857,15 @@ transparent_hugepage_adjust(struct kvm *
+        * sure that the HVA and IPA are sufficiently aligned and that the
+        * block map is contained within the memslot.
+        */
+-      if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
+-          get_user_mapping_size(kvm, hva) >= PMD_SIZE) {
++      if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
++              int sz = get_user_mapping_size(kvm, hva);
++
++              if (sz < 0)
++                      return sz;
++
++              if (sz < PMD_SIZE)
++                      return PAGE_SIZE;
++
+               /*
+                * The address we faulted on is backed by a transparent huge
+                * page.  However, because we map the compound huge page and
+@@ -957,7 +983,7 @@ static int user_mem_abort(struct kvm_vcp
+       kvm_pfn_t pfn;
+       bool logging_active = memslot_is_logging(memslot);
+       unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
+-      unsigned long vma_pagesize, fault_granule;
++      long vma_pagesize, fault_granule;
+       enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
+       struct kvm_pgtable *pgt;
+@@ -1104,6 +1130,11 @@ static int user_mem_abort(struct kvm_vcp
+                       vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
+                                                                  hva, &pfn,
+                                                                  &fault_ipa);
++
++              if (vma_pagesize < 0) {
++                      ret = vma_pagesize;
++                      goto out_unlock;
++              }
+       }
+       if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
diff --git a/queue-5.15/kvm-arm64-pmu-fix-get_one_reg-for-vpmc-regs-to-return-the-current-value.patch b/queue-5.15/kvm-arm64-pmu-fix-get_one_reg-for-vpmc-regs-to-return-the-current-value.patch
new file mode 100644 (file)
index 0000000..666dd4c
--- /dev/null
@@ -0,0 +1,78 @@
+From 9228b26194d1cc00449f12f306f53ef2e234a55b Mon Sep 17 00:00:00 2001
+From: Reiji Watanabe <reijiw@google.com>
+Date: Sun, 12 Mar 2023 20:32:08 -0700
+Subject: KVM: arm64: PMU: Fix GET_ONE_REG for vPMC regs to return the current value
+
+From: Reiji Watanabe <reijiw@google.com>
+
+commit 9228b26194d1cc00449f12f306f53ef2e234a55b upstream.
+
+Have KVM_GET_ONE_REG for vPMU counter (vPMC) registers (PMCCNTR_EL0
+and PMEVCNTR<n>_EL0) return the sum of the register value in the sysreg
+file and the current perf event counter value.
+
+Values of vPMC registers are saved in sysreg files on certain occasions.
+These saved values don't represent the current values of the vPMC
+registers if the perf events for the vPMCs count events after the save.
+The current values of those registers are the sum of the sysreg file
+value and the current perf event counter value.  But, when userspace
+reads those registers (using KVM_GET_ONE_REG), KVM returns the sysreg
+file value to userspace (not the sum value).
+
+Fix this to return the sum value for KVM_GET_ONE_REG.
+
+Fixes: 051ff581ce70 ("arm64: KVM: Add access handler for event counter register")
+Cc: stable@vger.kernel.org
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Reiji Watanabe <reijiw@google.com>
+Link: https://lore.kernel.org/r/20230313033208.1475499-1-reijiw@google.com
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/sys_regs.c |   21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -764,6 +764,22 @@ static bool pmu_counter_idx_valid(struct
+       return true;
+ }
++static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
++                        u64 *val)
++{
++      u64 idx;
++
++      if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
++              /* PMCCNTR_EL0 */
++              idx = ARMV8_PMU_CYCLE_IDX;
++      else
++              /* PMEVCNTRn_EL0 */
++              idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
++
++      *val = kvm_pmu_get_counter_value(vcpu, idx);
++      return 0;
++}
++
+ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
+                             struct sys_reg_params *p,
+                             const struct sys_reg_desc *r)
+@@ -980,7 +996,7 @@ static bool access_pmuserenr(struct kvm_
+ /* Macro to expand the PMEVCNTRn_EL0 register */
+ #define PMU_PMEVCNTR_EL0(n)                                           \
+       { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)),                            \
+-        .reset = reset_pmevcntr,                                      \
++        .reset = reset_pmevcntr, .get_user = get_pmu_evcntr,          \
+         .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
+ /* Macro to expand the PMEVTYPERn_EL0 register */
+@@ -1651,7 +1667,8 @@ static const struct sys_reg_desc sys_reg
+       { PMU_SYS_REG(SYS_PMCEID1_EL0),
+         .access = access_pmceid, .reset = NULL },
+       { PMU_SYS_REG(SYS_PMCCNTR_EL0),
+-        .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
++        .access = access_pmu_evcntr, .reset = reset_unknown,
++        .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
+       { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
+         .access = access_pmu_evtyper, .reset = NULL },
+       { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
index d13e9829c3ba51fcff46b5cbbe9db3b741c461f1..b18c91b984f3f37d6c7ea4f3c4cffd9feb765305 100644 (file)
@@ -85,3 +85,5 @@ xtensa-fix-kasan-report-for-show_stack.patch
 rcu-fix-rcu_torture_read-ftrace-event.patch
 drm-etnaviv-fix-reference-leak-when-mmaping-imported-buffer.patch
 drm-amd-display-add-dsc-support-for-synaptics-cascaded-mst-hub.patch
+kvm-arm64-pmu-fix-get_one_reg-for-vpmc-regs-to-return-the-current-value.patch
+kvm-arm64-disable-interrupts-while-walking-userspace-pts.patch