]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.2-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Apr 2023 09:03:26 +0000 (11:03 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Apr 2023 09:03:26 +0000 (11:03 +0200)
added patches:
drm-i915-disable-dc-states-for-all-commits.patch
drm-i915-move-csc-load-back-into-.color_commit_arm-when-psr-is-enabled-on-skl-glk.patch
drm-i915-split-icl_color_commit_noarm-from-skl_color_commit_noarm.patch
kvm-arm64-check-for-kvm_vma_mte_allowed-in-the-critical-section.patch
kvm-arm64-disable-interrupts-while-walking-userspace-pts.patch
kvm-arm64-pmu-don-t-save-pmcr_el0.-c-p-for-the-vcpu.patch
kvm-arm64-pmu-fix-get_one_reg-for-vpmc-regs-to-return-the-current-value.patch
kvm-arm64-retry-fault-if-vma_lookup-results-become-invalid.patch

queue-6.2/drm-i915-disable-dc-states-for-all-commits.patch [new file with mode: 0644]
queue-6.2/drm-i915-move-csc-load-back-into-.color_commit_arm-when-psr-is-enabled-on-skl-glk.patch [new file with mode: 0644]
queue-6.2/drm-i915-split-icl_color_commit_noarm-from-skl_color_commit_noarm.patch [new file with mode: 0644]
queue-6.2/kvm-arm64-check-for-kvm_vma_mte_allowed-in-the-critical-section.patch [new file with mode: 0644]
queue-6.2/kvm-arm64-disable-interrupts-while-walking-userspace-pts.patch [new file with mode: 0644]
queue-6.2/kvm-arm64-pmu-don-t-save-pmcr_el0.-c-p-for-the-vcpu.patch [new file with mode: 0644]
queue-6.2/kvm-arm64-pmu-fix-get_one_reg-for-vpmc-regs-to-return-the-current-value.patch [new file with mode: 0644]
queue-6.2/kvm-arm64-retry-fault-if-vma_lookup-results-become-invalid.patch [new file with mode: 0644]
queue-6.2/series

diff --git a/queue-6.2/drm-i915-disable-dc-states-for-all-commits.patch b/queue-6.2/drm-i915-disable-dc-states-for-all-commits.patch
new file mode 100644 (file)
index 0000000..bb56098
--- /dev/null
@@ -0,0 +1,109 @@
+From a2b6e99d8a623544f3bdccd28ee35b9c1b00daa5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Mon, 20 Mar 2023 20:35:32 +0200
+Subject: drm/i915: Disable DC states for all commits
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit a2b6e99d8a623544f3bdccd28ee35b9c1b00daa5 upstream.
+
+Keeping DC states enabled is incompatible with the _noarm()/_arm()
+split we use for writing pipe/plane registers. When DC5 and PSR
+are enabled, all pipe/plane registers effectively become self-arming
+on account of DC5 exit arming the update, and PSR exit latching it.
+
+What probably saves us most of the time is that (with PIPE_MISC[21]=0)
+all pipe register writes themselves trigger PSR exit, and then
+we don't re-enter PSR until the idle frame count has elapsed.
+So it may be that the PSR exit happens already before we've
+updated the state too much.
+
+Also the PSR1 panel (at least on this KBL) seems to discard the first
+frame we trasmit, presumably still scanning out from its internal
+framebuffer at that point. So only the second frame we transmit is
+actually visible. But I suppose that could also be panel specific
+behaviour. I haven't checked out how other PSR panels behave, nor
+did I bother to check what the eDP spec has to say about this.
+
+And since this really is all about DC states, let's switch from
+the MODESET domain to the DC_OFF domain. Functionally they are
+100% identical. We should probably remove the MODESET domain...
+
+And for good measure let's toss in an assert to the place where
+we do the _noarm() register writes to make sure DC states are
+in fact off.
+
+v2: Just use intel_display_power_is_enabled() (Imre)
+
+Cc: <stable@vger.kernel.org> #v5.17+
+Cc: Manasi Navare <navaremanasi@google.com>
+Cc: Drew Davenport <ddavenport@chromium.org>
+Cc: Jouni Högander <jouni.hogander@intel.com>
+Reviewed-by: Imre Deak <imre.deak@intel.com>
+Fixes: d13dde449580 ("drm/i915: Split pipe+output CSC programming to noarm+arm pair")
+Fixes: f8a005eb8972 ("drm/i915: Optimize icl+ universal plane programming")
+Fixes: 890b6ec4a522 ("drm/i915: Split skl+ plane update into noarm+arm pair")
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230320183532.17727-1-ville.syrjala@linux.intel.com
+(cherry picked from commit 41b4c7fe72b6105a4b49395eea9aa40cef94288d)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_display.c |   28 ++++++++++++++++++++++++---
+ 1 file changed, 25 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -7107,6 +7107,8 @@ static void intel_update_crtc(struct int
+       intel_fbc_update(state, crtc);
++      drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
++
+       if (!modeset &&
+           intel_crtc_needs_color_update(new_crtc_state))
+               intel_color_commit_noarm(new_crtc_state);
+@@ -7480,8 +7482,28 @@ static void intel_atomic_commit_tail(str
+       drm_atomic_helper_wait_for_dependencies(&state->base);
+       drm_dp_mst_atomic_wait_for_dependencies(&state->base);
+-      if (state->modeset)
+-              wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
++      /*
++       * During full modesets we write a lot of registers, wait
++       * for PLLs, etc. Doing that while DC states are enabled
++       * is not a good idea.
++       *
++       * During fastsets and other updates we also need to
++       * disable DC states due to the following scenario:
++       * 1. DC5 exit and PSR exit happen
++       * 2. Some or all _noarm() registers are written
++       * 3. Due to some long delay PSR is re-entered
++       * 4. DC5 entry -> DMC saves the already written new
++       *    _noarm() registers and the old not yet written
++       *    _arm() registers
++       * 5. DC5 exit -> DMC restores a mixture of old and
++       *    new register values and arms the update
++       * 6. PSR exit -> hardware latches a mixture of old and
++       *    new register values -> corrupted frame, or worse
++       * 7. New _arm() registers are finally written
++       * 8. Hardware finally latches a complete set of new
++       *    register values, and subsequent frames will be OK again
++       */
++      wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
+       intel_atomic_prepare_plane_clear_colors(state);
+@@ -7625,8 +7647,8 @@ static void intel_atomic_commit_tail(str
+                * the culprit.
+                */
+               intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
+-              intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
+       }
++      intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
+       intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+       /*
diff --git a/queue-6.2/drm-i915-move-csc-load-back-into-.color_commit_arm-when-psr-is-enabled-on-skl-glk.patch b/queue-6.2/drm-i915-move-csc-load-back-into-.color_commit_arm-when-psr-is-enabled-on-skl-glk.patch
new file mode 100644 (file)
index 0000000..2fd7cb3
--- /dev/null
@@ -0,0 +1,100 @@
+From a8e03e00b62073b494886dbff32f8b5338066c8b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Mon, 20 Mar 2023 11:54:34 +0200
+Subject: drm/i915: Move CSC load back into .color_commit_arm() when PSR is enabled on skl/glk
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit a8e03e00b62073b494886dbff32f8b5338066c8b upstream.
+
+SKL/GLK CSC unit suffers from a nasty issue where a CSC
+coeff/offset register read or write between DC5 exit and
+PSR exit will undo the CSC arming performed by DMC, and
+then during PSR exit the hardware will latch zeroes into
+the active CSC registers. This causes any plane going
+through the CSC to output all black.
+
+We can sidestep the issue by making sure the PSR exit has
+already actually happened before we touch the CSC coeff/offset
+registers. Easiest way to guarantee that is to just move the
+CSC programming back into the .color_commir_arm() as we force
+a PSR exit (and crucially wait for it to actually happen)
+prior to touching the arming registers.
+
+When PSR (and thus also DC states) are disabled we don't
+have anything to worry about, so we can keep using the
+more optional _noarm() hook for writing the CSC registers.
+
+Cc: <stable@vger.kernel.org> #v5.19+
+Cc: Manasi Navare <navaremanasi@google.com>
+Cc: Drew Davenport <ddavenport@chromium.org>
+Cc: Imre Deak <imre.deak@intel.com>
+Cc: Jouni Högander <jouni.hogander@intel.com>
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/8283
+Fixes: d13dde449580 ("drm/i915: Split pipe+output CSC programming to noarm+arm pair")
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230320095438.17328-3-ville.syrjala@linux.intel.com
+Reviewed-by: Imre Deak <imre.deak@intel.com>
+(cherry picked from commit 80a892a4c2428b65366721599fc5fe50eaed35fd)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_color.c |   23 +++++++++++++++++++++--
+ 1 file changed, 21 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_color.c
++++ b/drivers/gpu/drm/i915/display/intel_color.c
+@@ -514,6 +514,22 @@ static void icl_color_commit_noarm(const
+       icl_load_csc_matrix(crtc_state);
+ }
++static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
++{
++      /*
++       * Possibly related to display WA #1184, SKL CSC loses the latched
++       * CSC coeff/offset register values if the CSC registers are disarmed
++       * between DC5 exit and PSR exit. This will cause the plane(s) to
++       * output all black (until CSC_MODE is rearmed and properly latched).
++       * Once PSR exit (and proper register latching) has occurred the
++       * danger is over. Thus when PSR is enabled the CSC coeff/offset
++       * register programming will be peformed from skl_color_commit_arm()
++       * which is called after PSR exit.
++       */
++      if (!crtc_state->has_psr)
++              ilk_load_csc_matrix(crtc_state);
++}
++
+ static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+ {
+       ilk_load_csc_matrix(crtc_state);
+@@ -556,6 +572,9 @@ static void skl_color_commit_arm(const s
+       enum pipe pipe = crtc->pipe;
+       u32 val = 0;
++      if (crtc_state->has_psr)
++              ilk_load_csc_matrix(crtc_state);
++
+       /*
+        * We don't (yet) allow userspace to control the pipe background color,
+        * so force it to black, but apply pipe gamma and CSC appropriately
+@@ -2313,7 +2332,7 @@ static const struct intel_color_funcs ic
+ static const struct intel_color_funcs glk_color_funcs = {
+       .color_check = glk_color_check,
+-      .color_commit_noarm = ilk_color_commit_noarm,
++      .color_commit_noarm = skl_color_commit_noarm,
+       .color_commit_arm = skl_color_commit_arm,
+       .load_luts = glk_load_luts,
+       .read_luts = glk_read_luts,
+@@ -2321,7 +2340,7 @@ static const struct intel_color_funcs gl
+ static const struct intel_color_funcs skl_color_funcs = {
+       .color_check = ivb_color_check,
+-      .color_commit_noarm = ilk_color_commit_noarm,
++      .color_commit_noarm = skl_color_commit_noarm,
+       .color_commit_arm = skl_color_commit_arm,
+       .load_luts = bdw_load_luts,
+       .read_luts = NULL,
diff --git a/queue-6.2/drm-i915-split-icl_color_commit_noarm-from-skl_color_commit_noarm.patch b/queue-6.2/drm-i915-split-icl_color_commit_noarm-from-skl_color_commit_noarm.patch
new file mode 100644 (file)
index 0000000..79aafd7
--- /dev/null
@@ -0,0 +1,70 @@
+From 76b767d4d1cd052e455cf18e06929e8b2b70101d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Mon, 20 Mar 2023 11:54:33 +0200
+Subject: drm/i915: Split icl_color_commit_noarm() from skl_color_commit_noarm()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit 76b767d4d1cd052e455cf18e06929e8b2b70101d upstream.
+
+We're going to want different behavior for skl/glk vs. icl
+in .color_commit_noarm(), so split the hook into two. Arguably
+we already had slightly different behaviour since
+csc_enable/gamma_enable are never set on icl+, so the old
+code was perhaps a bit confusing as well.
+
+Cc: <stable@vger.kernel.org> #v5.19+
+Cc: Manasi Navare <navaremanasi@google.com>
+Cc: Drew Davenport <ddavenport@chromium.org>
+Cc: Imre Deak <imre.deak@intel.com>
+Cc: Jouni Högander <jouni.hogander@intel.com>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230320095438.17328-2-ville.syrjala@linux.intel.com
+Reviewed-by: Imre Deak <imre.deak@intel.com>
+(cherry picked from commit f161eb01f50ab31f2084975b43bce54b7b671e17)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_color.c |   21 ++++++++++++++++++++-
+ 1 file changed, 20 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_color.c
++++ b/drivers/gpu/drm/i915/display/intel_color.c
+@@ -574,6 +574,25 @@ static void skl_color_commit_arm(const s
+                         crtc_state->csc_mode);
+ }
++static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state)
++{
++      struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
++      struct drm_i915_private *i915 = to_i915(crtc->base.dev);
++      enum pipe pipe = crtc->pipe;
++
++      /*
++       * We don't (yet) allow userspace to control the pipe background color,
++       * so force it to black.
++       */
++      intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0);
++
++      intel_de_write(i915, GAMMA_MODE(crtc->pipe),
++                     crtc_state->gamma_mode);
++
++      intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
++                        crtc_state->csc_mode);
++}
++
+ static struct drm_property_blob *
+ create_linear_lut(struct drm_i915_private *i915, int lut_size)
+ {
+@@ -2287,7 +2306,7 @@ static const struct intel_color_funcs i9
+ static const struct intel_color_funcs icl_color_funcs = {
+       .color_check = icl_color_check,
+       .color_commit_noarm = icl_color_commit_noarm,
+-      .color_commit_arm = skl_color_commit_arm,
++      .color_commit_arm = icl_color_commit_arm,
+       .load_luts = icl_load_luts,
+       .read_luts = icl_read_luts,
+ };
diff --git a/queue-6.2/kvm-arm64-check-for-kvm_vma_mte_allowed-in-the-critical-section.patch b/queue-6.2/kvm-arm64-check-for-kvm_vma_mte_allowed-in-the-critical-section.patch
new file mode 100644 (file)
index 0000000..f1cbbbb
--- /dev/null
@@ -0,0 +1,57 @@
+From 8c2e8ac8ad4be68409e806ce1cc78fc7a04539f3 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 16 Mar 2023 17:45:46 +0000
+Subject: KVM: arm64: Check for kvm_vma_mte_allowed in the critical section
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 8c2e8ac8ad4be68409e806ce1cc78fc7a04539f3 upstream.
+
+On page fault, we find about the VMA that backs the page fault
+early on, and quickly release the mmap_read_lock. However, using
+the VMA pointer after the critical section is pretty dangerous,
+as a teardown may happen in the meantime and the VMA be long gone.
+
+Move the sampling of the MTE permission early, and NULL-ify the
+VMA pointer after that, just to be on the safe side.
+
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230316174546.3777507-3-maz@kernel.org
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/mmu.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kvm/mmu.c
++++ b/arch/arm64/kvm/mmu.c
+@@ -1218,7 +1218,7 @@ static int user_mem_abort(struct kvm_vcp
+ {
+       int ret = 0;
+       bool write_fault, writable, force_pte = false;
+-      bool exec_fault;
++      bool exec_fault, mte_allowed;
+       bool device = false;
+       unsigned long mmu_seq;
+       struct kvm *kvm = vcpu->kvm;
+@@ -1309,6 +1309,10 @@ static int user_mem_abort(struct kvm_vcp
+               fault_ipa &= ~(vma_pagesize - 1);
+       gfn = fault_ipa >> PAGE_SHIFT;
++      mte_allowed = kvm_vma_mte_allowed(vma);
++
++      /* Don't use the VMA after the unlock -- it may have vanished */
++      vma = NULL;
+       /*
+        * Read mmu_invalidate_seq so that KVM can detect if the results of
+@@ -1379,7 +1383,7 @@ static int user_mem_abort(struct kvm_vcp
+       if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
+               /* Check the VMM hasn't introduced a new disallowed VMA */
+-              if (kvm_vma_mte_allowed(vma)) {
++              if (mte_allowed) {
+                       sanitise_mte_tags(kvm, pfn, vma_pagesize);
+               } else {
+                       ret = -EFAULT;
diff --git a/queue-6.2/kvm-arm64-disable-interrupts-while-walking-userspace-pts.patch b/queue-6.2/kvm-arm64-disable-interrupts-while-walking-userspace-pts.patch
new file mode 100644 (file)
index 0000000..312b832
--- /dev/null
@@ -0,0 +1,119 @@
+From e86fc1a3a3e9b4850fe74d738e3cfcf4297d8bba Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 16 Mar 2023 17:45:45 +0000
+Subject: KVM: arm64: Disable interrupts while walking userspace PTs
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit e86fc1a3a3e9b4850fe74d738e3cfcf4297d8bba upstream.
+
+We walk the userspace PTs to discover what mapping size was
+used there. However, this can race against the userspace tables
+being freed, and we end-up in the weeds.
+
+Thankfully, the mm code is being generous and will IPI us when
+doing so. So let's implement our part of the bargain and disable
+interrupts around the walk. This ensures that nothing terrible
+happens during that time.
+
+We still need to handle the removal of the page tables before
+the walk. For that, allow get_user_mapping_size() to return an
+error, and make sure this error can be propagated all the way
+to the the exit handler.
+
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230316174546.3777507-2-maz@kernel.org
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/mmu.c |   45 ++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 38 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/kvm/mmu.c
++++ b/arch/arm64/kvm/mmu.c
+@@ -665,14 +665,33 @@ static int get_user_mapping_size(struct
+                                  CONFIG_PGTABLE_LEVELS),
+               .mm_ops         = &kvm_user_mm_ops,
+       };
++      unsigned long flags;
+       kvm_pte_t pte = 0;      /* Keep GCC quiet... */
+       u32 level = ~0;
+       int ret;
++      /*
++       * Disable IRQs so that we hazard against a concurrent
++       * teardown of the userspace page tables (which relies on
++       * IPI-ing threads).
++       */
++      local_irq_save(flags);
+       ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
+-      VM_BUG_ON(ret);
+-      VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS);
+-      VM_BUG_ON(!(pte & PTE_VALID));
++      local_irq_restore(flags);
++
++      if (ret)
++              return ret;
++
++      /*
++       * Not seeing an error, but not updating level? Something went
++       * deeply wrong...
++       */
++      if (WARN_ON(level >= KVM_PGTABLE_MAX_LEVELS))
++              return -EFAULT;
++
++      /* Oops, the userspace PTs are gone... Replay the fault */
++      if (!kvm_pte_valid(pte))
++              return -EAGAIN;
+       return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
+ }
+@@ -1079,7 +1098,7 @@ static bool fault_supports_stage2_huge_m
+  *
+  * Returns the size of the mapping.
+  */
+-static unsigned long
++static long
+ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
+                           unsigned long hva, kvm_pfn_t *pfnp,
+                           phys_addr_t *ipap)
+@@ -1091,8 +1110,15 @@ transparent_hugepage_adjust(struct kvm *
+        * sure that the HVA and IPA are sufficiently aligned and that the
+        * block map is contained within the memslot.
+        */
+-      if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
+-          get_user_mapping_size(kvm, hva) >= PMD_SIZE) {
++      if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
++              int sz = get_user_mapping_size(kvm, hva);
++
++              if (sz < 0)
++                      return sz;
++
++              if (sz < PMD_SIZE)
++                      return PAGE_SIZE;
++
+               /*
+                * The address we faulted on is backed by a transparent huge
+                * page.  However, because we map the compound huge page and
+@@ -1203,7 +1229,7 @@ static int user_mem_abort(struct kvm_vcp
+       kvm_pfn_t pfn;
+       bool logging_active = memslot_is_logging(memslot);
+       unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
+-      unsigned long vma_pagesize, fault_granule;
++      long vma_pagesize, fault_granule;
+       enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
+       struct kvm_pgtable *pgt;
+@@ -1344,6 +1370,11 @@ static int user_mem_abort(struct kvm_vcp
+                       vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
+                                                                  hva, &pfn,
+                                                                  &fault_ipa);
++
++              if (vma_pagesize < 0) {
++                      ret = vma_pagesize;
++                      goto out_unlock;
++              }
+       }
+       if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
diff --git a/queue-6.2/kvm-arm64-pmu-don-t-save-pmcr_el0.-c-p-for-the-vcpu.patch b/queue-6.2/kvm-arm64-pmu-don-t-save-pmcr_el0.-c-p-for-the-vcpu.patch
new file mode 100644 (file)
index 0000000..c9d9df0
--- /dev/null
@@ -0,0 +1,43 @@
+From f6da81f650fa47b61b847488f3938d43f90d093d Mon Sep 17 00:00:00 2001
+From: Reiji Watanabe <reijiw@google.com>
+Date: Sun, 12 Mar 2023 20:32:34 -0700
+Subject: KVM: arm64: PMU: Don't save PMCR_EL0.{C,P} for the vCPU
+
+From: Reiji Watanabe <reijiw@google.com>
+
+commit f6da81f650fa47b61b847488f3938d43f90d093d upstream.
+
+Presently, when a guest writes 1 to PMCR_EL0.{C,P}, which is WO/RAZ,
+KVM saves the register value, including these bits.
+When userspace reads the register using KVM_GET_ONE_REG, KVM returns
+the saved register value as it is (the saved value might have these
+bits set).  This could result in userspace setting these bits on the
+destination during migration.  Consequently, KVM may end up resetting
+the vPMU counter registers (PMCCNTR_EL0 and/or PMEVCNTR<n>_EL0) to
+zero on the first KVM_RUN after migration.
+
+Fix this by not saving those bits when a guest writes 1 to those bits.
+
+Fixes: ab9468340d2b ("arm64: KVM: Add access handler for PMCR register")
+Cc: stable@vger.kernel.org
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Reiji Watanabe <reijiw@google.com>
+Link: https://lore.kernel.org/r/20230313033234.1475987-1-reijiw@google.com
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/pmu-emul.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kvm/pmu-emul.c
++++ b/arch/arm64/kvm/pmu-emul.c
+@@ -538,7 +538,8 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu
+       if (!kvm_pmu_is_3p5(vcpu))
+               val &= ~ARMV8_PMU_PMCR_LP;
+-      __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
++      /* The reset bits don't indicate any state, and shouldn't be saved. */
++      __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
+       if (val & ARMV8_PMU_PMCR_E) {
+               kvm_pmu_enable_counter_mask(vcpu,
diff --git a/queue-6.2/kvm-arm64-pmu-fix-get_one_reg-for-vpmc-regs-to-return-the-current-value.patch b/queue-6.2/kvm-arm64-pmu-fix-get_one_reg-for-vpmc-regs-to-return-the-current-value.patch
new file mode 100644 (file)
index 0000000..55b4ef6
--- /dev/null
@@ -0,0 +1,78 @@
+From 9228b26194d1cc00449f12f306f53ef2e234a55b Mon Sep 17 00:00:00 2001
+From: Reiji Watanabe <reijiw@google.com>
+Date: Sun, 12 Mar 2023 20:32:08 -0700
+Subject: KVM: arm64: PMU: Fix GET_ONE_REG for vPMC regs to return the current value
+
+From: Reiji Watanabe <reijiw@google.com>
+
+commit 9228b26194d1cc00449f12f306f53ef2e234a55b upstream.
+
+Have KVM_GET_ONE_REG for vPMU counter (vPMC) registers (PMCCNTR_EL0
+and PMEVCNTR<n>_EL0) return the sum of the register value in the sysreg
+file and the current perf event counter value.
+
+Values of vPMC registers are saved in sysreg files on certain occasions.
+These saved values don't represent the current values of the vPMC
+registers if the perf events for the vPMCs count events after the save.
+The current values of those registers are the sum of the sysreg file
+value and the current perf event counter value.  But, when userspace
+reads those registers (using KVM_GET_ONE_REG), KVM returns the sysreg
+file value to userspace (not the sum value).
+
+Fix this to return the sum value for KVM_GET_ONE_REG.
+
+Fixes: 051ff581ce70 ("arm64: KVM: Add access handler for event counter register")
+Cc: stable@vger.kernel.org
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Reiji Watanabe <reijiw@google.com>
+Link: https://lore.kernel.org/r/20230313033208.1475499-1-reijiw@google.com
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/sys_regs.c |   21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -765,6 +765,22 @@ static bool pmu_counter_idx_valid(struct
+       return true;
+ }
++static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
++                        u64 *val)
++{
++      u64 idx;
++
++      if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
++              /* PMCCNTR_EL0 */
++              idx = ARMV8_PMU_CYCLE_IDX;
++      else
++              /* PMEVCNTRn_EL0 */
++              idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
++
++      *val = kvm_pmu_get_counter_value(vcpu, idx);
++      return 0;
++}
++
+ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
+                             struct sys_reg_params *p,
+                             const struct sys_reg_desc *r)
+@@ -981,7 +997,7 @@ static bool access_pmuserenr(struct kvm_
+ /* Macro to expand the PMEVCNTRn_EL0 register */
+ #define PMU_PMEVCNTR_EL0(n)                                           \
+       { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)),                            \
+-        .reset = reset_pmevcntr,                                      \
++        .reset = reset_pmevcntr, .get_user = get_pmu_evcntr,          \
+         .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
+ /* Macro to expand the PMEVTYPERn_EL0 register */
+@@ -1745,7 +1761,8 @@ static const struct sys_reg_desc sys_reg
+       { PMU_SYS_REG(SYS_PMCEID1_EL0),
+         .access = access_pmceid, .reset = NULL },
+       { PMU_SYS_REG(SYS_PMCCNTR_EL0),
+-        .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
++        .access = access_pmu_evcntr, .reset = reset_unknown,
++        .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
+       { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
+         .access = access_pmu_evtyper, .reset = NULL },
+       { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
diff --git a/queue-6.2/kvm-arm64-retry-fault-if-vma_lookup-results-become-invalid.patch b/queue-6.2/kvm-arm64-retry-fault-if-vma_lookup-results-become-invalid.patch
new file mode 100644 (file)
index 0000000..e60bddf
--- /dev/null
@@ -0,0 +1,106 @@
+From 13ec9308a85702af7c31f3638a2720863848a7f2 Mon Sep 17 00:00:00 2001
+From: David Matlack <dmatlack@google.com>
+Date: Mon, 13 Mar 2023 16:54:54 -0700
+Subject: KVM: arm64: Retry fault if vma_lookup() results become invalid
+
+From: David Matlack <dmatlack@google.com>
+
+commit 13ec9308a85702af7c31f3638a2720863848a7f2 upstream.
+
+Read mmu_invalidate_seq before dropping the mmap_lock so that KVM can
+detect if the results of vma_lookup() (e.g. vma_shift) become stale
+before it acquires kvm->mmu_lock. This fixes a theoretical bug where a
+VMA could be changed by userspace after vma_lookup() and before KVM
+reads the mmu_invalidate_seq, causing KVM to install page table entries
+based on a (possibly) no-longer-valid vma_shift.
+
+Re-order the MMU cache top-up to earlier in user_mem_abort() so that it
+is not done after KVM has read mmu_invalidate_seq (i.e. so as to avoid
+inducing spurious fault retries).
+
+This bug has existed since KVM/ARM's inception. It's unlikely that any
+sane userspace currently modifies VMAs in such a way as to trigger this
+race. And even with directed testing I was unable to reproduce it. But a
+sufficiently motivated host userspace might be able to exploit this
+race.
+
+Fixes: 94f8e6418d39 ("KVM: ARM: Handle guest faults in KVM")
+Cc: stable@vger.kernel.org
+Reported-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: David Matlack <dmatlack@google.com>
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20230313235454.2964067-1-dmatlack@google.com
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/mmu.c |   48 +++++++++++++++++++++---------------------------
+ 1 file changed, 21 insertions(+), 27 deletions(-)
+
+--- a/arch/arm64/kvm/mmu.c
++++ b/arch/arm64/kvm/mmu.c
+@@ -1218,6 +1218,20 @@ static int user_mem_abort(struct kvm_vcp
+       }
+       /*
++       * Permission faults just need to update the existing leaf entry,
++       * and so normally don't require allocations from the memcache. The
++       * only exception to this is when dirty logging is enabled at runtime
++       * and a write fault needs to collapse a block entry into a table.
++       */
++      if (fault_status != ESR_ELx_FSC_PERM ||
++          (logging_active && write_fault)) {
++              ret = kvm_mmu_topup_memory_cache(memcache,
++                                               kvm_mmu_cache_min_pages(kvm));
++              if (ret)
++                      return ret;
++      }
++
++      /*
+        * Let's check if we will get back a huge page backed by hugetlbfs, or
+        * get block mapping for device MMIO region.
+        */
+@@ -1269,37 +1283,17 @@ static int user_mem_abort(struct kvm_vcp
+               fault_ipa &= ~(vma_pagesize - 1);
+       gfn = fault_ipa >> PAGE_SHIFT;
+-      mmap_read_unlock(current->mm);
+-
+-      /*
+-       * Permission faults just need to update the existing leaf entry,
+-       * and so normally don't require allocations from the memcache. The
+-       * only exception to this is when dirty logging is enabled at runtime
+-       * and a write fault needs to collapse a block entry into a table.
+-       */
+-      if (fault_status != ESR_ELx_FSC_PERM ||
+-          (logging_active && write_fault)) {
+-              ret = kvm_mmu_topup_memory_cache(memcache,
+-                                               kvm_mmu_cache_min_pages(kvm));
+-              if (ret)
+-                      return ret;
+-      }
+-      mmu_seq = vcpu->kvm->mmu_invalidate_seq;
+       /*
+-       * Ensure the read of mmu_invalidate_seq happens before we call
+-       * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
+-       * the page we just got a reference to gets unmapped before we have a
+-       * chance to grab the mmu_lock, which ensure that if the page gets
+-       * unmapped afterwards, the call to kvm_unmap_gfn will take it away
+-       * from us again properly. This smp_rmb() interacts with the smp_wmb()
+-       * in kvm_mmu_notifier_invalidate_<page|range_end>.
++       * Read mmu_invalidate_seq so that KVM can detect if the results of
++       * vma_lookup() or __gfn_to_pfn_memslot() become stale prior to
++       * acquiring kvm->mmu_lock.
+        *
+-       * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is
+-       * used to avoid unnecessary overhead introduced to locate the memory
+-       * slot because it's always fixed even @gfn is adjusted for huge pages.
++       * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
++       * with the smp_wmb() in kvm_mmu_invalidate_end().
+        */
+-      smp_rmb();
++      mmu_seq = vcpu->kvm->mmu_invalidate_seq;
++      mmap_read_unlock(current->mm);
+       pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
+                                  write_fault, &writable, NULL);
index c250be8ab923a720dc5418b78dc2eecee84fadf2..13e6ddb410b3df2ce9f69cb46a70eac83e24fd81 100644 (file)
@@ -170,3 +170,11 @@ drm-amd-display-add-dsc-support-for-synaptics-cascaded-mst-hub.patch
 drm-amd-display-take-fec-overhead-into-timeslot-calculation.patch
 drm-i915-gem-flush-lmem-contents-after-construction.patch
 drm-i915-dpt-treat-the-dpt-bo-as-a-framebuffer.patch
+drm-i915-disable-dc-states-for-all-commits.patch
+drm-i915-split-icl_color_commit_noarm-from-skl_color_commit_noarm.patch
+drm-i915-move-csc-load-back-into-.color_commit_arm-when-psr-is-enabled-on-skl-glk.patch
+kvm-arm64-pmu-fix-get_one_reg-for-vpmc-regs-to-return-the-current-value.patch
+kvm-arm64-pmu-don-t-save-pmcr_el0.-c-p-for-the-vcpu.patch
+kvm-arm64-retry-fault-if-vma_lookup-results-become-invalid.patch
+kvm-arm64-disable-interrupts-while-walking-userspace-pts.patch
+kvm-arm64-check-for-kvm_vma_mte_allowed-in-the-critical-section.patch