]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.2-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Apr 2023 14:02:12 +0000 (16:02 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Apr 2023 14:02:12 +0000 (16:02 +0200)
added patches:
drm-amd-display-clear-mst-topology-if-it-fails-to-resume.patch
drm-amdgpu-for-s0ix-skip-sdma-5.x-suspend-resume.patch
drm-amdgpu-skip-psp-suspend-for-imu-enabled-asics-mode2-reset.patch
drm-i915-fix-context-runtime-accounting.patch
drm-i915-fix-race-condition-uaf-in-i915_perf_add_config_ioctl.patch
drm-nouveau-disp-support-more-modes-by-checking-with-lower-bpc.patch
drm-panfrost-fix-the-panfrost_mmu_map_fault_addr-error-path.patch
maple_tree-fix-a-potential-concurrency-bug-in-rcu-mode.patch
maple_tree-fix-get-wrong-data_end-in-mtree_lookup_walk.patch
mm-hugetlb-fix-uffd-wr-protection-for-cow-optimization-path.patch
mm-swap-fix-swap_info_struct-race-between-swapoff-and-get_swap_pages.patch
mm-vmalloc-avoid-warn_alloc-noise-caused-by-fatal-signal.patch
ring-buffer-fix-race-while-reader-and-writer-are-on-the-same-page.patch
ublk-read-any-sqe-values-upfront.patch
wifi-mt76-ignore-key-disable-commands.patch
wifi-mt76-mt7921-fix-fw-used-for-offload-check-for-mt7922.patch
zsmalloc-document-freeable-stats.patch

18 files changed:
queue-6.2/drm-amd-display-clear-mst-topology-if-it-fails-to-resume.patch [new file with mode: 0644]
queue-6.2/drm-amdgpu-for-s0ix-skip-sdma-5.x-suspend-resume.patch [new file with mode: 0644]
queue-6.2/drm-amdgpu-skip-psp-suspend-for-imu-enabled-asics-mode2-reset.patch [new file with mode: 0644]
queue-6.2/drm-i915-fix-context-runtime-accounting.patch [new file with mode: 0644]
queue-6.2/drm-i915-fix-race-condition-uaf-in-i915_perf_add_config_ioctl.patch [new file with mode: 0644]
queue-6.2/drm-nouveau-disp-support-more-modes-by-checking-with-lower-bpc.patch [new file with mode: 0644]
queue-6.2/drm-panfrost-fix-the-panfrost_mmu_map_fault_addr-error-path.patch [new file with mode: 0644]
queue-6.2/maple_tree-fix-a-potential-concurrency-bug-in-rcu-mode.patch [new file with mode: 0644]
queue-6.2/maple_tree-fix-get-wrong-data_end-in-mtree_lookup_walk.patch [new file with mode: 0644]
queue-6.2/mm-hugetlb-fix-uffd-wr-protection-for-cow-optimization-path.patch [new file with mode: 0644]
queue-6.2/mm-swap-fix-swap_info_struct-race-between-swapoff-and-get_swap_pages.patch [new file with mode: 0644]
queue-6.2/mm-vmalloc-avoid-warn_alloc-noise-caused-by-fatal-signal.patch [new file with mode: 0644]
queue-6.2/ring-buffer-fix-race-while-reader-and-writer-are-on-the-same-page.patch [new file with mode: 0644]
queue-6.2/series
queue-6.2/ublk-read-any-sqe-values-upfront.patch [new file with mode: 0644]
queue-6.2/wifi-mt76-ignore-key-disable-commands.patch [new file with mode: 0644]
queue-6.2/wifi-mt76-mt7921-fix-fw-used-for-offload-check-for-mt7922.patch [new file with mode: 0644]
queue-6.2/zsmalloc-document-freeable-stats.patch [new file with mode: 0644]

diff --git a/queue-6.2/drm-amd-display-clear-mst-topology-if-it-fails-to-resume.patch b/queue-6.2/drm-amd-display-clear-mst-topology-if-it-fails-to-resume.patch
new file mode 100644 (file)
index 0000000..af5c60f
--- /dev/null
@@ -0,0 +1,39 @@
+From 3f6752b4de41896c7f1609b1585db2080e8150d8 Mon Sep 17 00:00:00 2001
+From: Roman Li <roman.li@amd.com>
+Date: Thu, 1 Dec 2022 09:49:23 -0500
+Subject: drm/amd/display: Clear MST topology if it fails to resume
+
+From: Roman Li <roman.li@amd.com>
+
+commit 3f6752b4de41896c7f1609b1585db2080e8150d8 upstream.
+
+[Why]
+In case of failure to resume MST topology after suspend, an emtpty
+mst tree prevents further mst hub detection on the same connector.
+That causes the issue with MST hub hotplug after it's been unplug in
+suspend.
+
+[How]
+Stop topology manager on the connector after detecting DM_MST failure.
+
+Reviewed-by: Wayne Lin <Wayne.Lin@amd.com>
+Acked-by: Jasdeep Dhillon <jdhillon@amd.com>
+Signed-off-by: Roman Li <roman.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: "Limonciello, Mario" <Mario.Limonciello@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2183,6 +2183,8 @@ static int detect_mst_link_for_all_conne
+                               DRM_ERROR("DM_MST: Failed to start MST\n");
+                               aconnector->dc_link->type =
+                                       dc_connection_single;
++                              ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
++                                                                   aconnector->dc_link);
+                               break;
+                       }
+               }
diff --git a/queue-6.2/drm-amdgpu-for-s0ix-skip-sdma-5.x-suspend-resume.patch b/queue-6.2/drm-amdgpu-for-s0ix-skip-sdma-5.x-suspend-resume.patch
new file mode 100644 (file)
index 0000000..4bc7393
--- /dev/null
@@ -0,0 +1,39 @@
+From 2a7798ea7390fd78f191c9e9bf68f5581d3b4a02 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Fri, 2 Dec 2022 10:13:40 -0500
+Subject: drm/amdgpu: for S0ix, skip SDMA 5.x+ suspend/resume
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 2a7798ea7390fd78f191c9e9bf68f5581d3b4a02 upstream.
+
+SDMA 5.x is part of the GFX block so it's controlled via
+GFXOFF.  Skip suspend as it should be handled the same
+as GFX.
+
+v2: drop SDMA 4.x.  That requires special handling.
+
+Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
+Acked-by: Rajneesh Bhardwaj <rajneesh.bhardwaj@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: "Limonciello, Mario" <Mario.Limonciello@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3045,6 +3045,12 @@ static int amdgpu_device_ip_suspend_phas
+                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
+                       continue;
++              /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
++              if (adev->in_s0ix &&
++                  (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
++                  (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
++                      continue;
++
+               /* XXX handle errors */
+               r = adev->ip_blocks[i].version->funcs->suspend(adev);
+               /* XXX handle errors */
diff --git a/queue-6.2/drm-amdgpu-skip-psp-suspend-for-imu-enabled-asics-mode2-reset.patch b/queue-6.2/drm-amdgpu-skip-psp-suspend-for-imu-enabled-asics-mode2-reset.patch
new file mode 100644 (file)
index 0000000..009ab48
--- /dev/null
@@ -0,0 +1,42 @@
+From e11c775030c5585370fda43035204bb5fa23b139 Mon Sep 17 00:00:00 2001
+From: Tim Huang <tim.huang@amd.com>
+Date: Fri, 20 Jan 2023 22:27:32 +0800
+Subject: drm/amdgpu: skip psp suspend for IMU enabled ASICs mode2 reset
+
+From: Tim Huang <tim.huang@amd.com>
+
+commit e11c775030c5585370fda43035204bb5fa23b139 upstream.
+
+The psp suspend & resume should be skipped to avoid destroy
+the TMR and reload FWs again for IMU enabled APU ASICs.
+
+Signed-off-by: Tim Huang <tim.huang@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3051,6 +3051,18 @@ static int amdgpu_device_ip_suspend_phas
+                   (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
+                       continue;
++              /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
++               * These are in TMR, hence are expected to be reused by PSP-TOS to reload
++               * from this location and RLC Autoload automatically also gets loaded
++               * from here based on PMFW -> PSP message during re-init sequence.
++               * Therefore, the psp suspend & resume should be skipped to avoid destroy
++               * the TMR and reload FWs again for IMU enabled APU ASICs.
++               */
++              if (amdgpu_in_reset(adev) &&
++                  (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
++                  adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
++                      continue;
++
+               /* XXX handle errors */
+               r = adev->ip_blocks[i].version->funcs->suspend(adev);
+               /* XXX handle errors */
diff --git a/queue-6.2/drm-i915-fix-context-runtime-accounting.patch b/queue-6.2/drm-i915-fix-context-runtime-accounting.patch
new file mode 100644 (file)
index 0000000..c2ebbaf
--- /dev/null
@@ -0,0 +1,61 @@
+From dc3421560a67361442f33ec962fc6dd48895a0df Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Date: Mon, 20 Mar 2023 15:14:23 +0000
+Subject: drm/i915: Fix context runtime accounting
+
+From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+
+commit dc3421560a67361442f33ec962fc6dd48895a0df upstream.
+
+When considering whether to mark one context as stopped and another as
+started we need to look at whether the previous and new _contexts_ are
+different and not just requests. Otherwise the software tracked context
+start time was incorrectly updated to the most recent lite-restore time-
+stamp, which was in some cases resulting in active time going backward,
+until the context switch (typically the heartbeat pulse) would synchronise
+with the hardware tracked context runtime. Easiest use case to observe
+this behaviour was with a full screen clients with close to 100% engine
+load.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Fixes: bb6287cb1886 ("drm/i915: Track context current active time")
+Cc: <stable@vger.kernel.org> # v5.19+
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230320151423.1708436-1-tvrtko.ursulin@linux.intel.com
+[tursulin: Fix spelling in commit msg.]
+(cherry picked from commit b3e70051879c665acdd3a1ab50d0ed58d6a8001f)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gt/intel_execlists_submission.c |   12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
++++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+@@ -2018,6 +2018,8 @@ process_csb(struct intel_engine_cs *engi
+        * inspecting the queue to see if we need to resumbit.
+        */
+       if (*prev != *execlists->active) { /* elide lite-restores */
++              struct intel_context *prev_ce = NULL, *active_ce = NULL;
++
+               /*
+                * Note the inherent discrepancy between the HW runtime,
+                * recorded as part of the context switch, and the CPU
+@@ -2029,9 +2031,15 @@ process_csb(struct intel_engine_cs *engi
+                * and correct overselves later when updating from HW.
+                */
+               if (*prev)
+-                      lrc_runtime_stop((*prev)->context);
++                      prev_ce = (*prev)->context;
+               if (*execlists->active)
+-                      lrc_runtime_start((*execlists->active)->context);
++                      active_ce = (*execlists->active)->context;
++              if (prev_ce != active_ce) {
++                      if (prev_ce)
++                              lrc_runtime_stop(prev_ce);
++                      if (active_ce)
++                              lrc_runtime_start(active_ce);
++              }
+               new_timeslice(execlists);
+       }
diff --git a/queue-6.2/drm-i915-fix-race-condition-uaf-in-i915_perf_add_config_ioctl.patch b/queue-6.2/drm-i915-fix-race-condition-uaf-in-i915_perf_add_config_ioctl.patch
new file mode 100644 (file)
index 0000000..da535cc
--- /dev/null
@@ -0,0 +1,48 @@
+From dc30c011469165d57af9adac5baff7d767d20e5c Mon Sep 17 00:00:00 2001
+From: Min Li <lm0963hack@gmail.com>
+Date: Tue, 28 Mar 2023 17:36:27 +0800
+Subject: drm/i915: fix race condition UAF in i915_perf_add_config_ioctl
+
+From: Min Li <lm0963hack@gmail.com>
+
+commit dc30c011469165d57af9adac5baff7d767d20e5c upstream.
+
+Userspace can guess the id value and try to race oa_config object creation
+with config remove, resulting in a use-after-free if we dereference the
+object after unlocking the metrics_lock.  For that reason, unlocking the
+metrics_lock must be done after we are done dereferencing the object.
+
+Signed-off-by: Min Li <lm0963hack@gmail.com>
+Fixes: f89823c21224 ("drm/i915/perf: Implement I915_PERF_ADD/REMOVE_CONFIG interface")
+Cc: <stable@vger.kernel.org> # v4.14+
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Reviewed-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230328093627.5067-1-lm0963hack@gmail.com
+[tursulin: Manually added stable tag.]
+(cherry picked from commit 49f6f6483b652108bcb73accd0204a464b922395)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_perf.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -4612,13 +4612,13 @@ int i915_perf_add_config_ioctl(struct dr
+               err = oa_config->id;
+               goto sysfs_err;
+       }
+-
+-      mutex_unlock(&perf->metrics_lock);
++      id = oa_config->id;
+       drm_dbg(&perf->i915->drm,
+               "Added config %s id=%i\n", oa_config->uuid, oa_config->id);
++      mutex_unlock(&perf->metrics_lock);
+-      return oa_config->id;
++      return id;
+ sysfs_err:
+       mutex_unlock(&perf->metrics_lock);
diff --git a/queue-6.2/drm-nouveau-disp-support-more-modes-by-checking-with-lower-bpc.patch b/queue-6.2/drm-nouveau-disp-support-more-modes-by-checking-with-lower-bpc.patch
new file mode 100644 (file)
index 0000000..f2b5d36
--- /dev/null
@@ -0,0 +1,96 @@
+From 7f67aa097e875c87fba024e850cf405342300059 Mon Sep 17 00:00:00 2001
+From: Karol Herbst <kherbst@redhat.com>
+Date: Fri, 31 Mar 2023 00:39:38 +0200
+Subject: drm/nouveau/disp: Support more modes by checking with lower bpc
+
+From: Karol Herbst <kherbst@redhat.com>
+
+commit 7f67aa097e875c87fba024e850cf405342300059 upstream.
+
+This allows us to advertise more modes especially on HDR displays.
+
+Fixes using 4K@60 modes on my TV and main display both using a HDMI to DP
+adapter. Also fixes similar issues for users running into this.
+
+Cc: stable@vger.kernel.org # 5.10+
+Signed-off-by: Karol Herbst <kherbst@redhat.com>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230330223938.4025569-1-kherbst@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/dispnv50/disp.c |   32 ++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/nouveau/nouveau_dp.c    |    8 +++++---
+ 2 files changed, 37 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -363,6 +363,35 @@ nv50_outp_atomic_check_view(struct drm_e
+       return 0;
+ }
++static void
++nv50_outp_atomic_fix_depth(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state)
++{
++      struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
++      struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++      struct drm_display_mode *mode = &asyh->state.adjusted_mode;
++      unsigned int max_rate, mode_rate;
++
++      switch (nv_encoder->dcb->type) {
++      case DCB_OUTPUT_DP:
++              max_rate = nv_encoder->dp.link_nr * nv_encoder->dp.link_bw;
++
++              /* we don't support more than 10 anyway */
++              asyh->or.bpc = min_t(u8, asyh->or.bpc, 10);
++
++              /* reduce the bpc until it works out */
++              while (asyh->or.bpc > 6) {
++                      mode_rate = DIV_ROUND_UP(mode->clock * asyh->or.bpc * 3, 8);
++                      if (mode_rate <= max_rate)
++                              break;
++
++                      asyh->or.bpc -= 2;
++              }
++              break;
++      default:
++              break;
++      }
++}
++
+ static int
+ nv50_outp_atomic_check(struct drm_encoder *encoder,
+                      struct drm_crtc_state *crtc_state,
+@@ -381,6 +410,9 @@ nv50_outp_atomic_check(struct drm_encode
+       if (crtc_state->mode_changed || crtc_state->connectors_changed)
+               asyh->or.bpc = connector->display_info.bpc;
++      /* We might have to reduce the bpc */
++      nv50_outp_atomic_fix_depth(encoder, crtc_state);
++
+       return 0;
+ }
+--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
++++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
+@@ -263,8 +263,6 @@ nouveau_dp_irq(struct work_struct *work)
+ }
+ /* TODO:
+- * - Use the minimum possible BPC here, once we add support for the max bpc
+- *   property.
+  * - Validate against the DP caps advertised by the GPU (we don't check these
+  *   yet)
+  */
+@@ -276,7 +274,11 @@ nv50_dp_mode_valid(struct drm_connector
+ {
+       const unsigned int min_clock = 25000;
+       unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock;
+-      const u8 bpp = connector->display_info.bpc * 3;
++      /* Check with the minmum bpc always, so we can advertise better modes.
++       * In particlar not doing this causes modes to be dropped on HDR
++       * displays as we might check with a bpc of 16 even.
++       */
++      const u8 bpp = 6 * 3;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
+               return MODE_NO_INTERLACE;
diff --git a/queue-6.2/drm-panfrost-fix-the-panfrost_mmu_map_fault_addr-error-path.patch b/queue-6.2/drm-panfrost-fix-the-panfrost_mmu_map_fault_addr-error-path.patch
new file mode 100644 (file)
index 0000000..034c275
--- /dev/null
@@ -0,0 +1,33 @@
+From 764a2ab9eb56e1200083e771aab16186836edf1d Mon Sep 17 00:00:00 2001
+From: Boris Brezillon <boris.brezillon@collabora.com>
+Date: Fri, 21 May 2021 11:38:11 +0200
+Subject: drm/panfrost: Fix the panfrost_mmu_map_fault_addr() error path
+
+From: Boris Brezillon <boris.brezillon@collabora.com>
+
+commit 764a2ab9eb56e1200083e771aab16186836edf1d upstream.
+
+Make sure all bo->base.pages entries are either NULL or pointing to a
+valid page before calling drm_gem_shmem_put_pages().
+
+Reported-by: Tomeu Vizoso <tomeu.vizoso@collabora.com>
+Cc: <stable@vger.kernel.org>
+Fixes: 187d2929206e ("drm/panfrost: Add support for GPU heap allocations")
+Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
+Reviewed-by: Steven Price <steven.price@arm.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210521093811.1018992-1-boris.brezillon@collabora.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/panfrost/panfrost_mmu.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -504,6 +504,7 @@ static int panfrost_mmu_map_fault_addr(s
+               if (IS_ERR(pages[i])) {
+                       mutex_unlock(&bo->base.pages_lock);
+                       ret = PTR_ERR(pages[i]);
++                      pages[i] = NULL;
+                       goto err_pages;
+               }
+       }
diff --git a/queue-6.2/maple_tree-fix-a-potential-concurrency-bug-in-rcu-mode.patch b/queue-6.2/maple_tree-fix-a-potential-concurrency-bug-in-rcu-mode.patch
new file mode 100644 (file)
index 0000000..85e43dc
--- /dev/null
@@ -0,0 +1,82 @@
+From c45ea315a602d45569b08b93e9ab30f6a63a38aa Mon Sep 17 00:00:00 2001
+From: Peng Zhang <zhangpeng.00@bytedance.com>
+Date: Tue, 14 Mar 2023 20:42:03 +0800
+Subject: maple_tree: fix a potential concurrency bug in RCU mode
+
+From: Peng Zhang <zhangpeng.00@bytedance.com>
+
+commit c45ea315a602d45569b08b93e9ab30f6a63a38aa upstream.
+
+There is a concurrency bug that may cause the wrong value to be loaded
+when a CPU is modifying the maple tree.
+
+CPU1:
+mtree_insert_range()
+  mas_insert()
+    mas_store_root()
+      ...
+      mas_root_expand()
+        ...
+        rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+        ma_set_meta(node, maple_leaf_64, 0, slot);    <---IP
+
+CPU2:
+mtree_load()
+  mtree_lookup_walk()
+    ma_data_end();
+
+When CPU1 is about to execute the instruction pointed to by IP, the
+ma_data_end() executed by CPU2 may return the wrong end position, which
+will cause the value loaded by mtree_load() to be wrong.
+
+An example of triggering the bug:
+
+Add mdelay(100) between rcu_assign_pointer() and ma_set_meta() in
+mas_root_expand().
+
+static DEFINE_MTREE(tree);
+int work(void *p) {
+       unsigned long val;
+       for (int i = 0 ; i< 30; ++i) {
+               val = (unsigned long)mtree_load(&tree, 8);
+               mdelay(5);
+               pr_info("%lu",val);
+       }
+       return 0;
+}
+
+mt_init_flags(&tree, MT_FLAGS_USE_RCU);
+mtree_insert(&tree, 0, (void*)12345, GFP_KERNEL);
+run_thread(work)
+mtree_insert(&tree, 1, (void*)56789, GFP_KERNEL);
+
+In RCU mode, mtree_load() should always return the value before or after
+the data structure is modified, and in this example mtree_load(&tree, 8)
+may return 56789 which is not expected, it should always return NULL.  Fix
+it by put ma_set_meta() before rcu_assign_pointer().
+
+Link: https://lkml.kernel.org/r/20230314124203.91572-4-zhangpeng.00@bytedance.com
+Fixes: 54a611b60590 ("Maple Tree: add new data structure")
+Signed-off-by: Peng Zhang <zhangpeng.00@bytedance.com>
+Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/maple_tree.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -3659,10 +3659,9 @@ static inline int mas_root_expand(struct
+               slot++;
+       mas->depth = 1;
+       mas_set_height(mas);
+-
++      ma_set_meta(node, maple_leaf_64, 0, slot);
+       /* swap the new root into the tree */
+       rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+-      ma_set_meta(node, maple_leaf_64, 0, slot);
+       return slot;
+ }
diff --git a/queue-6.2/maple_tree-fix-get-wrong-data_end-in-mtree_lookup_walk.patch b/queue-6.2/maple_tree-fix-get-wrong-data_end-in-mtree_lookup_walk.patch
new file mode 100644 (file)
index 0000000..c1af081
--- /dev/null
@@ -0,0 +1,56 @@
+From ec07967d7523adb3670f9dfee0232e3bc868f3de Mon Sep 17 00:00:00 2001
+From: Peng Zhang <zhangpeng.00@bytedance.com>
+Date: Tue, 14 Mar 2023 20:42:01 +0800
+Subject: maple_tree: fix get wrong data_end in mtree_lookup_walk()
+
+From: Peng Zhang <zhangpeng.00@bytedance.com>
+
+commit ec07967d7523adb3670f9dfee0232e3bc868f3de upstream.
+
+if (likely(offset > end))
+       max = pivots[offset];
+
+The above code should be changed to if (likely(offset < end)), which is
+correct.  This affects the correctness of ma_data_end().  Now it seems
+that the final result will not be wrong, but it is best to change it.
+This patch does not change the code as above, because it simplifies the
+code by the way.
+
+Link: https://lkml.kernel.org/r/20230314124203.91572-1-zhangpeng.00@bytedance.com
+Link: https://lkml.kernel.org/r/20230314124203.91572-2-zhangpeng.00@bytedance.com
+Fixes: 54a611b60590 ("Maple Tree: add new data structure")
+Signed-off-by: Peng Zhang <zhangpeng.00@bytedance.com>
+Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/maple_tree.c |   15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -3875,18 +3875,13 @@ static inline void *mtree_lookup_walk(st
+               end = ma_data_end(node, type, pivots, max);
+               if (unlikely(ma_dead_node(node)))
+                       goto dead_node;
+-
+-              if (pivots[offset] >= mas->index)
+-                      goto next;
+-
+               do {
+-                      offset++;
+-              } while ((offset < end) && (pivots[offset] < mas->index));
+-
+-              if (likely(offset > end))
+-                      max = pivots[offset];
++                      if (pivots[offset] >= mas->index) {
++                              max = pivots[offset];
++                              break;
++                      }
++              } while (++offset < end);
+-next:
+               slots = ma_slots(node, type);
+               next = mt_slot(mas->tree, slots, offset);
+               if (unlikely(ma_dead_node(node)))
diff --git a/queue-6.2/mm-hugetlb-fix-uffd-wr-protection-for-cow-optimization-path.patch b/queue-6.2/mm-hugetlb-fix-uffd-wr-protection-for-cow-optimization-path.patch
new file mode 100644 (file)
index 0000000..346bffc
--- /dev/null
@@ -0,0 +1,88 @@
+From 60d5b473d61be61ac315e544fcd6a8234a79500e Mon Sep 17 00:00:00 2001
+From: Peter Xu <peterx@redhat.com>
+Date: Tue, 21 Mar 2023 15:18:40 -0400
+Subject: mm/hugetlb: fix uffd wr-protection for CoW optimization path
+
+From: Peter Xu <peterx@redhat.com>
+
+commit 60d5b473d61be61ac315e544fcd6a8234a79500e upstream.
+
+This patch fixes an issue that a hugetlb uffd-wr-protected mapping can be
+writable even with uffd-wp bit set.  It only happens with hugetlb private
+mappings, when someone firstly wr-protects a missing pte (which will
+install a pte marker), then a write to the same page without any prior
+access to the page.
+
+Userfaultfd-wp trap for hugetlb was implemented in hugetlb_fault() before
+reaching hugetlb_wp() to avoid taking more locks that userfault won't
+need.  However there's one CoW optimization path that can trigger
+hugetlb_wp() inside hugetlb_no_page(), which will bypass the trap.
+
+This patch skips hugetlb_wp() for CoW and retries the fault if uffd-wp bit
+is detected.  The new path will only trigger in the CoW optimization path
+because generic hugetlb_fault() (e.g.  when a present pte was
+wr-protected) will resolve the uffd-wp bit already.  Also make sure
+anonymous UNSHARE won't be affected and can still be resolved, IOW only
+skip CoW not CoR.
+
+This patch will be needed for v5.19+ hence copy stable.
+
+[peterx@redhat.com: v2]
+  Link: https://lkml.kernel.org/r/ZBzOqwF2wrHgBVZb@x1n
+[peterx@redhat.com: v3]
+  Link: https://lkml.kernel.org/r/20230324142620.2344140-1-peterx@redhat.com
+Link: https://lkml.kernel.org/r/20230321191840.1897940-1-peterx@redhat.com
+Fixes: 166f3ecc0daf ("mm/hugetlb: hook page faults for uffd write protection")
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Reported-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Tested-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Cc: Nadav Amit <nadav.amit@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c |   14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5476,7 +5476,7 @@ static vm_fault_t hugetlb_wp(struct mm_s
+                      struct page *pagecache_page, spinlock_t *ptl)
+ {
+       const bool unshare = flags & FAULT_FLAG_UNSHARE;
+-      pte_t pte;
++      pte_t pte = huge_ptep_get(ptep);
+       struct hstate *h = hstate_vma(vma);
+       struct page *old_page, *new_page;
+       int outside_reserve = 0;
+@@ -5485,6 +5485,17 @@ static vm_fault_t hugetlb_wp(struct mm_s
+       struct mmu_notifier_range range;
+       /*
++       * Never handle CoW for uffd-wp protected pages.  It should be only
++       * handled when the uffd-wp protection is removed.
++       *
++       * Note that only the CoW optimization path (in hugetlb_no_page())
++       * can trigger this, because hugetlb_fault() will always resolve
++       * uffd-wp bit first.
++       */
++      if (!unshare && huge_pte_uffd_wp(pte))
++              return 0;
++
++      /*
+        * hugetlb does not support FOLL_FORCE-style write faults that keep the
+        * PTE mapped R/O such as maybe_mkwrite() would do.
+        */
+@@ -5497,7 +5508,6 @@ static vm_fault_t hugetlb_wp(struct mm_s
+               return 0;
+       }
+-      pte = huge_ptep_get(ptep);
+       old_page = pte_page(pte);
+       delayacct_wpcopy_start();
diff --git a/queue-6.2/mm-swap-fix-swap_info_struct-race-between-swapoff-and-get_swap_pages.patch b/queue-6.2/mm-swap-fix-swap_info_struct-race-between-swapoff-and-get_swap_pages.patch
new file mode 100644 (file)
index 0000000..dca737c
--- /dev/null
@@ -0,0 +1,119 @@
+From 6fe7d6b992113719e96744d974212df3fcddc76c Mon Sep 17 00:00:00 2001
+From: Rongwei Wang <rongwei.wang@linux.alibaba.com>
+Date: Tue, 4 Apr 2023 23:47:16 +0800
+Subject: mm/swap: fix swap_info_struct race between swapoff and get_swap_pages()
+
+From: Rongwei Wang <rongwei.wang@linux.alibaba.com>
+
+commit 6fe7d6b992113719e96744d974212df3fcddc76c upstream.
+
+The si->lock must be held when deleting the si from the available list.
+Otherwise, another thread can re-add the si to the available list, which
+can lead to memory corruption.  The only place we have found where this
+happens is in the swapoff path.  This case can be described as below:
+
+core 0                       core 1
+swapoff
+
+del_from_avail_list(si)      waiting
+
+try lock si->lock            acquire swap_avail_lock
+                             and re-add si into
+                             swap_avail_head
+
+acquire si->lock but missing si already being added again, and continuing
+to clear SWP_WRITEOK, etc.
+
+It can be easily found that a massive warning messages can be triggered
+inside get_swap_pages() by some special cases, for example, we call
+madvise(MADV_PAGEOUT) on blocks of touched memory concurrently, meanwhile,
+run much swapon-swapoff operations (e.g.  stress-ng-swap).
+
+However, in the worst case, panic can be caused by the above scene.  In
+swapoff(), the memory used by si could be kept in swap_info[] after
+turning off a swap.  This means memory corruption will not be caused
+immediately until allocated and reset for a new swap in the swapon path.
+A panic message caused: (with CONFIG_PLIST_DEBUG enabled)
+
+------------[ cut here ]------------
+top: 00000000e58a3003, n: 0000000013e75cda, p: 000000008cd4451a
+prev: 0000000035b1e58a, n: 000000008cd4451a, p: 000000002150ee8d
+next: 000000008cd4451a, n: 000000008cd4451a, p: 000000008cd4451a
+WARNING: CPU: 21 PID: 1843 at lib/plist.c:60 plist_check_prev_next_node+0x50/0x70
+Modules linked in: rfkill(E) crct10dif_ce(E)...
+CPU: 21 PID: 1843 Comm: stress-ng Kdump: ... 5.10.134+
+Hardware name: Alibaba Cloud ECS, BIOS 0.0.0 02/06/2015
+pstate: 60400005 (nZCv daif +PAN -UAO -TCO BTYPE=--)
+pc : plist_check_prev_next_node+0x50/0x70
+lr : plist_check_prev_next_node+0x50/0x70
+sp : ffff0018009d3c30
+x29: ffff0018009d3c40 x28: ffff800011b32a98
+x27: 0000000000000000 x26: ffff001803908000
+x25: ffff8000128ea088 x24: ffff800011b32a48
+x23: 0000000000000028 x22: ffff001800875c00
+x21: ffff800010f9e520 x20: ffff001800875c00
+x19: ffff001800fdc6e0 x18: 0000000000000030
+x17: 0000000000000000 x16: 0000000000000000
+x15: 0736076307640766 x14: 0730073007380731
+x13: 0736076307640766 x12: 0730073007380731
+x11: 000000000004058d x10: 0000000085a85b76
+x9 : ffff8000101436e4 x8 : ffff800011c8ce08
+x7 : 0000000000000000 x6 : 0000000000000001
+x5 : ffff0017df9ed338 x4 : 0000000000000001
+x3 : ffff8017ce62a000 x2 : ffff0017df9ed340
+x1 : 0000000000000000 x0 : 0000000000000000
+Call trace:
+ plist_check_prev_next_node+0x50/0x70
+ plist_check_head+0x80/0xf0
+ plist_add+0x28/0x140
+ add_to_avail_list+0x9c/0xf0
+ _enable_swap_info+0x78/0xb4
+ __do_sys_swapon+0x918/0xa10
+ __arm64_sys_swapon+0x20/0x30
+ el0_svc_common+0x8c/0x220
+ do_el0_svc+0x2c/0x90
+ el0_svc+0x1c/0x30
+ el0_sync_handler+0xa8/0xb0
+ el0_sync+0x148/0x180
+irq event stamp: 2082270
+
+Now, si->lock locked before calling 'del_from_avail_list()' to make sure
+other thread see the si had been deleted and SWP_WRITEOK cleared together,
+will not reinsert again.
+
+This problem exists in versions after stable 5.10.y.
+
+Link: https://lkml.kernel.org/r/20230404154716.23058-1-rongwei.wang@linux.alibaba.com
+Fixes: a2468cc9bfdff ("swap: choose swap device according to numa node")
+Tested-by: Yongchen Yin <wb-yyc939293@alibaba-inc.com>
+Signed-off-by: Rongwei Wang <rongwei.wang@linux.alibaba.com>
+Cc: Bagas Sanjaya <bagasdotme@gmail.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Aaron Lu <aaron.lu@intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/swapfile.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -679,6 +679,7 @@ static void __del_from_avail_list(struct
+ {
+       int nid;
++      assert_spin_locked(&p->lock);
+       for_each_node(nid)
+               plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
+ }
+@@ -2435,8 +2436,8 @@ SYSCALL_DEFINE1(swapoff, const char __us
+               spin_unlock(&swap_lock);
+               goto out_dput;
+       }
+-      del_from_avail_list(p);
+       spin_lock(&p->lock);
++      del_from_avail_list(p);
+       if (p->prio < 0) {
+               struct swap_info_struct *si = p;
+               int nid;
diff --git a/queue-6.2/mm-vmalloc-avoid-warn_alloc-noise-caused-by-fatal-signal.patch b/queue-6.2/mm-vmalloc-avoid-warn_alloc-noise-caused-by-fatal-signal.patch
new file mode 100644 (file)
index 0000000..799fde2
--- /dev/null
@@ -0,0 +1,122 @@
+From f349b15e183d6956f1b63d6ff57849ff10c7edd5 Mon Sep 17 00:00:00 2001
+From: Yafang Shao <laoar.shao@gmail.com>
+Date: Thu, 30 Mar 2023 16:26:25 +0000
+Subject: mm: vmalloc: avoid warn_alloc noise caused by fatal signal
+
+From: Yafang Shao <laoar.shao@gmail.com>
+
+commit f349b15e183d6956f1b63d6ff57849ff10c7edd5 upstream.
+
+There're some suspicious warn_alloc on my test serer, for example,
+
+[13366.518837] warn_alloc: 81 callbacks suppressed
+[13366.518841] test_verifier: vmalloc error: size 4096, page order 0, failed to allocate pages, mode:0x500dc2(GFP_HIGHUSER|__GFP_ZERO|__GFP_ACCOUNT), nodemask=(null),cpuset=/,mems_allowed=0-1
+[13366.522240] CPU: 30 PID: 722463 Comm: test_verifier Kdump: loaded Tainted: G        W  O       6.2.0+ #638
+[13366.524216] Call Trace:
+[13366.524702]  <TASK>
+[13366.525148]  dump_stack_lvl+0x6c/0x80
+[13366.525712]  dump_stack+0x10/0x20
+[13366.526239]  warn_alloc+0x119/0x190
+[13366.526783]  ? alloc_pages_bulk_array_mempolicy+0x9e/0x2a0
+[13366.527470]  __vmalloc_area_node+0x546/0x5b0
+[13366.528066]  __vmalloc_node_range+0xc2/0x210
+[13366.528660]  __vmalloc_node+0x42/0x50
+[13366.529186]  ? bpf_prog_realloc+0x53/0xc0
+[13366.529743]  __vmalloc+0x1e/0x30
+[13366.530235]  bpf_prog_realloc+0x53/0xc0
+[13366.530771]  bpf_patch_insn_single+0x80/0x1b0
+[13366.531351]  bpf_jit_blind_constants+0xe9/0x1c0
+[13366.531932]  ? __free_pages+0xee/0x100
+[13366.532457]  ? free_large_kmalloc+0x58/0xb0
+[13366.533002]  bpf_int_jit_compile+0x8c/0x5e0
+[13366.533546]  bpf_prog_select_runtime+0xb4/0x100
+[13366.534108]  bpf_prog_load+0x6b1/0xa50
+[13366.534610]  ? perf_event_task_tick+0x96/0xb0
+[13366.535151]  ? security_capable+0x3a/0x60
+[13366.535663]  __sys_bpf+0xb38/0x2190
+[13366.536120]  ? kvm_clock_get_cycles+0x9/0x10
+[13366.536643]  __x64_sys_bpf+0x1c/0x30
+[13366.537094]  do_syscall_64+0x38/0x90
+[13366.537554]  entry_SYSCALL_64_after_hwframe+0x72/0xdc
+[13366.538107] RIP: 0033:0x7f78310f8e29
+[13366.538561] Code: 01 00 48 81 c4 80 00 00 00 e9 f1 fe ff ff 0f 1f 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 17 e0 2c 00 f7 d8 64 89 01 48
+[13366.540286] RSP: 002b:00007ffe2a61fff8 EFLAGS: 00000206 ORIG_RAX: 0000000000000141
+[13366.541031] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f78310f8e29
+[13366.541749] RDX: 0000000000000080 RSI: 00007ffe2a6200b0 RDI: 0000000000000005
+[13366.542470] RBP: 00007ffe2a620010 R08: 00007ffe2a6202a0 R09: 00007ffe2a6200b0
+[13366.543183] R10: 00000000000f423e R11: 0000000000000206 R12: 0000000000407800
+[13366.543900] R13: 00007ffe2a620540 R14: 0000000000000000 R15: 0000000000000000
+[13366.544623]  </TASK>
+[13366.545260] Mem-Info:
+[13366.546121] active_anon:81319 inactive_anon:20733 isolated_anon:0
+ active_file:69450 inactive_file:5624 isolated_file:0
+ unevictable:0 dirty:10 writeback:0
+ slab_reclaimable:69649 slab_unreclaimable:48930
+ mapped:27400 shmem:12868 pagetables:4929
+ sec_pagetables:0 bounce:0
+ kernel_misc_reclaimable:0
+ free:15870308 free_pcp:142935 free_cma:0
+[13366.551886] Node 0 active_anon:224836kB inactive_anon:33528kB active_file:175692kB inactive_file:13752kB unevictable:0kB isolated(anon):0kB isolated(file):0kB mapped:59248kB dirty:32kB writeback:0kB shmem:18252kB shmem_thp: 0kB shmem_pmdmapped: 0kB anon_thp: 0kB writeback_tmp:0kB kernel_stack:4616kB pagetables:10664kB sec_pagetables:0kB all_unreclaimable? no
+[13366.555184] Node 1 active_anon:100440kB inactive_anon:49404kB active_file:102108kB inactive_file:8744kB unevictable:0kB isolated(anon):0kB isolated(file):0kB mapped:50352kB dirty:8kB writeback:0kB shmem:33220kB shmem_thp: 0kB shmem_pmdmapped: 0kB anon_thp: 0kB writeback_tmp:0kB kernel_stack:3896kB pagetables:9052kB sec_pagetables:0kB all_unreclaimable? no
+[13366.558262] Node 0 DMA free:15360kB boost:0kB min:304kB low:380kB high:456kB reserved_highatomic:0KB active_anon:0kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:0kB writepending:0kB present:15992kB managed:15360kB mlocked:0kB bounce:0kB free_pcp:0kB local_pcp:0kB free_cma:0kB
+[13366.560821] lowmem_reserve[]: 0 2735 31873 31873 31873
+[13366.561981] Node 0 DMA32 free:2790904kB boost:0kB min:56028kB low:70032kB high:84036kB reserved_highatomic:0KB active_anon:1936kB inactive_anon:20kB active_file:396kB inactive_file:344kB unevictable:0kB writepending:0kB present:3129200kB managed:2801520kB mlocked:0kB bounce:0kB free_pcp:5188kB local_pcp:0kB free_cma:0kB
+[13366.565148] lowmem_reserve[]: 0 0 29137 29137 29137
+[13366.566168] Node 0 Normal free:28533824kB boost:0kB min:596740kB low:745924kB high:895108kB reserved_highatomic:28672KB active_anon:222900kB inactive_anon:33508kB active_file:175296kB inactive_file:13408kB unevictable:0kB writepending:32kB present:30408704kB managed:29837172kB mlocked:0kB bounce:0kB free_pcp:295724kB local_pcp:0kB free_cma:0kB
+[13366.569485] lowmem_reserve[]: 0 0 0 0 0
+[13366.570416] Node 1 Normal free:32141144kB boost:0kB min:660504kB low:825628kB high:990752kB reserved_highatomic:69632KB active_anon:100440kB inactive_anon:49404kB active_file:102108kB inactive_file:8744kB unevictable:0kB writepending:8kB present:33554432kB managed:33025372kB mlocked:0kB bounce:0kB free_pcp:270880kB local_pcp:46860kB free_cma:0kB
+[13366.573403] lowmem_reserve[]: 0 0 0 0 0
+[13366.574015] Node 0 DMA: 0*4kB 0*8kB 0*16kB 0*32kB 0*64kB 0*128kB 0*256kB 0*512kB 1*1024kB (U) 1*2048kB (M) 3*4096kB (M) = 15360kB
+[13366.575474] Node 0 DMA32: 782*4kB (UME) 756*8kB (UME) 736*16kB (UME) 745*32kB (UME) 694*64kB (UME) 653*128kB (UME) 595*256kB (UME) 552*512kB (UME) 454*1024kB (UME) 347*2048kB (UME) 246*4096kB (UME) = 2790904kB
+[13366.577442] Node 0 Normal: 33856*4kB (UMEH) 51815*8kB (UMEH) 42418*16kB (UMEH) 36272*32kB (UMEH) 22195*64kB (UMEH) 10296*128kB (UMEH) 7238*256kB (UMEH) 5638*512kB (UEH) 5337*1024kB (UMEH) 3506*2048kB (UMEH) 1470*4096kB (UME) = 28533784kB
+[13366.580460] Node 1 Normal: 15776*4kB (UMEH) 37485*8kB (UMEH) 29509*16kB (UMEH) 21420*32kB (UMEH) 14818*64kB (UMEH) 13051*128kB (UMEH) 9918*256kB (UMEH) 7374*512kB (UMEH) 5397*1024kB (UMEH) 3887*2048kB (UMEH) 2002*4096kB (UME) = 32141240kB
+[13366.583027] Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=1048576kB
+[13366.584380] Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB
+[13366.585702] Node 1 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=1048576kB
+[13366.587042] Node 1 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB
+[13366.588372] 87386 total pagecache pages
+[13366.589266] 0 pages in swap cache
+[13366.590327] Free swap  = 0kB
+[13366.591227] Total swap = 0kB
+[13366.592142] 16777082 pages RAM
+[13366.593057] 0 pages HighMem/MovableOnly
+[13366.594037] 357226 pages reserved
+[13366.594979] 0 pages hwpoisoned
+
+This failure really confuse me as there're still lots of available pages.
+Finally I figured out it was caused by a fatal signal.  When a process is
+allocating memory via vm_area_alloc_pages(), it will break directly even
+if it hasn't allocated the requested pages when it receives a fatal
+signal.  In that case, we shouldn't show this warn_alloc, as it is
+useless.  We only need to show this warning when there're really no enough
+pages.
+
+Link: https://lkml.kernel.org/r/20230330162625.13604-1-laoar.shao@gmail.com
+Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
+Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/vmalloc.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -3070,9 +3070,11 @@ static void *__vmalloc_area_node(struct
+        * allocation request, free them via __vfree() if any.
+        */
+       if (area->nr_pages != nr_small_pages) {
+-              warn_alloc(gfp_mask, NULL,
+-                      "vmalloc error: size %lu, page order %u, failed to allocate pages",
+-                      area->nr_pages * PAGE_SIZE, page_order);
++              /* vm_area_alloc_pages() can also fail due to a fatal signal */
++              if (!fatal_signal_pending(current))
++                      warn_alloc(gfp_mask, NULL,
++                              "vmalloc error: size %lu, page order %u, failed to allocate pages",
++                              area->nr_pages * PAGE_SIZE, page_order);
+               goto fail;
+       }
diff --git a/queue-6.2/ring-buffer-fix-race-while-reader-and-writer-are-on-the-same-page.patch b/queue-6.2/ring-buffer-fix-race-while-reader-and-writer-are-on-the-same-page.patch
new file mode 100644 (file)
index 0000000..97c030b
--- /dev/null
@@ -0,0 +1,103 @@
+From 6455b6163d8c680366663cdb8c679514d55fc30c Mon Sep 17 00:00:00 2001
+From: Zheng Yejian <zhengyejian1@huawei.com>
+Date: Sat, 25 Mar 2023 10:12:47 +0800
+Subject: ring-buffer: Fix race while reader and writer are on the same page
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+commit 6455b6163d8c680366663cdb8c679514d55fc30c upstream.
+
+When user reads file 'trace_pipe', kernel keeps printing following logs
+that warn at "cpu_buffer->reader_page->read > rb_page_size(reader)" in
+rb_get_reader_page(). It just looks like there's an infinite loop in
+tracing_read_pipe(). This problem occurs several times on arm64 platform
+when testing v5.10 and below.
+
+  Call trace:
+   rb_get_reader_page+0x248/0x1300
+   rb_buffer_peek+0x34/0x160
+   ring_buffer_peek+0xbc/0x224
+   peek_next_entry+0x98/0xbc
+   __find_next_entry+0xc4/0x1c0
+   trace_find_next_entry_inc+0x30/0x94
+   tracing_read_pipe+0x198/0x304
+   vfs_read+0xb4/0x1e0
+   ksys_read+0x74/0x100
+   __arm64_sys_read+0x24/0x30
+   el0_svc_common.constprop.0+0x7c/0x1bc
+   do_el0_svc+0x2c/0x94
+   el0_svc+0x20/0x30
+   el0_sync_handler+0xb0/0xb4
+   el0_sync+0x160/0x180
+
+Then I dump the vmcore and look into the problematic per_cpu ring_buffer,
+I found that tail_page/commit_page/reader_page are on the same page while
+reader_page->read is obviously abnormal:
+  tail_page == commit_page == reader_page == {
+    .write = 0x100d20,
+    .read = 0x8f9f4805,  // Far greater than 0xd20, obviously abnormal!!!
+    .entries = 0x10004c,
+    .real_end = 0x0,
+    .page = {
+      .time_stamp = 0x857257416af0,
+      .commit = 0xd20,  // This page hasn't been full filled.
+      // .data[0...0xd20] seems normal.
+    }
+ }
+
+The root cause is most likely the race that reader and writer are on the
+same page while reader saw an event that not fully committed by writer.
+
+To fix this, add memory barriers to make sure the reader can see the
+content of what is committed. Since commit a0fcaaed0c46 ("ring-buffer: Fix
+race between reset page and reading page") has added the read barrier in
+rb_get_reader_page(), here we just need to add the write barrier.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230325021247.2923907-1-zhengyejian1@huawei.com
+
+Cc: stable@vger.kernel.org
+Fixes: 77ae365eca89 ("ring-buffer: make lockless")
+Suggested-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c |   13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3102,6 +3102,10 @@ rb_set_commit_to_write(struct ring_buffe
+               if (RB_WARN_ON(cpu_buffer,
+                              rb_is_reader_page(cpu_buffer->tail_page)))
+                       return;
++              /*
++               * No need for a memory barrier here, as the update
++               * of the tail_page did it for this page.
++               */
+               local_set(&cpu_buffer->commit_page->page->commit,
+                         rb_page_write(cpu_buffer->commit_page));
+               rb_inc_page(&cpu_buffer->commit_page);
+@@ -3111,6 +3115,8 @@ rb_set_commit_to_write(struct ring_buffe
+       while (rb_commit_index(cpu_buffer) !=
+              rb_page_write(cpu_buffer->commit_page)) {
++              /* Make sure the readers see the content of what is committed. */
++              smp_wmb();
+               local_set(&cpu_buffer->commit_page->page->commit,
+                         rb_page_write(cpu_buffer->commit_page));
+               RB_WARN_ON(cpu_buffer,
+@@ -4688,7 +4694,12 @@ rb_get_reader_page(struct ring_buffer_pe
+       /*
+        * Make sure we see any padding after the write update
+-       * (see rb_reset_tail())
++       * (see rb_reset_tail()).
++       *
++       * In addition, a writer may be writing on the reader page
++       * if the page has not been fully filled, so the read barrier
++       * is also needed to make sure we see the content of what is
++       * committed by the writer (see rb_set_commit_to_write()).
+        */
+       smp_rmb();
index 0419af16b8acaab3943964a54ea0a38efcbe85a6..685448732eb4ed8769714878a5028d75422004b3 100644 (file)
@@ -139,3 +139,20 @@ block-don-t-set-gd_need_part_scan-if-scan-partition-.patch
 perf-optimize-perf_pmu_migrate_context.patch
 perf-core-fix-the-same-task-check-in-perf_event_set_.patch
 tracing-synthetic-make-lastcmd_mutex-static.patch
+zsmalloc-document-freeable-stats.patch
+mm-vmalloc-avoid-warn_alloc-noise-caused-by-fatal-signal.patch
+wifi-mt76-mt7921-fix-fw-used-for-offload-check-for-mt7922.patch
+wifi-mt76-ignore-key-disable-commands.patch
+ublk-read-any-sqe-values-upfront.patch
+drm-panfrost-fix-the-panfrost_mmu_map_fault_addr-error-path.patch
+drm-nouveau-disp-support-more-modes-by-checking-with-lower-bpc.patch
+drm-i915-fix-context-runtime-accounting.patch
+drm-i915-fix-race-condition-uaf-in-i915_perf_add_config_ioctl.patch
+ring-buffer-fix-race-while-reader-and-writer-are-on-the-same-page.patch
+mm-swap-fix-swap_info_struct-race-between-swapoff-and-get_swap_pages.patch
+mm-hugetlb-fix-uffd-wr-protection-for-cow-optimization-path.patch
+maple_tree-fix-get-wrong-data_end-in-mtree_lookup_walk.patch
+maple_tree-fix-a-potential-concurrency-bug-in-rcu-mode.patch
+drm-amd-display-clear-mst-topology-if-it-fails-to-resume.patch
+drm-amdgpu-for-s0ix-skip-sdma-5.x-suspend-resume.patch
+drm-amdgpu-skip-psp-suspend-for-imu-enabled-asics-mode2-reset.patch
diff --git a/queue-6.2/ublk-read-any-sqe-values-upfront.patch b/queue-6.2/ublk-read-any-sqe-values-upfront.patch
new file mode 100644 (file)
index 0000000..6793fe1
--- /dev/null
@@ -0,0 +1,65 @@
+From 8c68ae3b22fa6fb2dbe83ef955ff10936503d28e Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Wed, 5 Apr 2023 20:00:46 -0600
+Subject: ublk: read any SQE values upfront
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 8c68ae3b22fa6fb2dbe83ef955ff10936503d28e upstream.
+
+Since SQE memory is shared with userspace, we should only be reading it
+once. We cannot read it multiple times, particularly when it's read once
+for validation and then read again for the actual use.
+
+ublk_ch_uring_cmd() is safe when called as a retry operation, as the
+memory backing is stable at that point. But for normal issue, we want
+to ensure that we only read ublksrv_io_cmd once. Wrap the function in
+a helper that reads the value into an on-stack copy of the struct.
+
+Cc: stable@vger.kernel.org # 6.0+
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/ublk_drv.c |   22 ++++++++++++++++++++--
+ 1 file changed, 20 insertions(+), 2 deletions(-)
+
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -1202,9 +1202,10 @@ static void ublk_handle_need_get_data(st
+       ublk_queue_cmd(ubq, req);
+ }
+-static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
++static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
++                             unsigned int issue_flags,
++                             struct ublksrv_io_cmd *ub_cmd)
+ {
+-      struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
+       struct ublk_device *ub = cmd->file->private_data;
+       struct ublk_queue *ubq;
+       struct ublk_io *io;
+@@ -1306,6 +1307,23 @@ static int ublk_ch_uring_cmd(struct io_u
+       return -EIOCBQUEUED;
+ }
++static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
++{
++      struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
++      struct ublksrv_io_cmd ub_cmd;
++
++      /*
++       * Not necessary for async retry, but let's keep it simple and always
++       * copy the values to avoid any potential reuse.
++       */
++      ub_cmd.q_id = READ_ONCE(ub_src->q_id);
++      ub_cmd.tag = READ_ONCE(ub_src->tag);
++      ub_cmd.result = READ_ONCE(ub_src->result);
++      ub_cmd.addr = READ_ONCE(ub_src->addr);
++
++      return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
++}
++
+ static const struct file_operations ublk_ch_fops = {
+       .owner = THIS_MODULE,
+       .open = ublk_ch_open,
diff --git a/queue-6.2/wifi-mt76-ignore-key-disable-commands.patch b/queue-6.2/wifi-mt76-ignore-key-disable-commands.patch
new file mode 100644 (file)
index 0000000..fed4b88
--- /dev/null
@@ -0,0 +1,343 @@
+From e6db67fa871dee37d22701daba806bfcd4d9df49 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 30 Mar 2023 11:12:59 +0200
+Subject: wifi: mt76: ignore key disable commands
+
+From: Felix Fietkau <nbd@nbd.name>
+
+commit e6db67fa871dee37d22701daba806bfcd4d9df49 upstream.
+
+This helps avoid cleartext leakage of already queued or powersave buffered
+packets, when a reassoc triggers the key deletion.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://lore.kernel.org/r/20230330091259.61378-1-nbd@nbd.name
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7603/main.c   |   10 +--
+ drivers/net/wireless/mediatek/mt76/mt7615/mac.c    |   70 ++++++---------------
+ drivers/net/wireless/mediatek/mt76/mt7615/main.c   |   15 ++--
+ drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h |    6 -
+ drivers/net/wireless/mediatek/mt76/mt76x02_util.c  |   18 ++---
+ drivers/net/wireless/mediatek/mt76/mt7915/main.c   |   13 +--
+ drivers/net/wireless/mediatek/mt76/mt7921/main.c   |   13 +--
+ drivers/net/wireless/mediatek/mt76/mt7996/main.c   |   13 +--
+ 8 files changed, 62 insertions(+), 96 deletions(-)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+@@ -512,15 +512,15 @@ mt7603_set_key(struct ieee80211_hw *hw,
+           !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               return -EOPNOTSUPP;
+-      if (cmd == SET_KEY) {
+-              key->hw_key_idx = wcid->idx;
+-              wcid->hw_key_idx = idx;
+-      } else {
++      if (cmd != SET_KEY) {
+               if (idx == wcid->hw_key_idx)
+                       wcid->hw_key_idx = -1;
+-              key = NULL;
++              return 0;
+       }
++
++      key->hw_key_idx = wcid->idx;
++      wcid->hw_key_idx = idx;
+       mt76_wcid_key_setup(&dev->mt76, wcid, key);
+       return mt7603_wtbl_set_key(dev, wcid->idx, key);
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -1193,8 +1193,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_enable_rtsc
+ static int
+ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+                          struct ieee80211_key_conf *key,
+-                         enum mt76_cipher_type cipher, u16 cipher_mask,
+-                         enum set_key_cmd cmd)
++                         enum mt76_cipher_type cipher, u16 cipher_mask)
+ {
+       u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
+       u8 data[32] = {};
+@@ -1203,27 +1202,18 @@ mt7615_mac_wtbl_update_key(struct mt7615
+               return -EINVAL;
+       mt76_rr_copy(dev, addr, data, sizeof(data));
+-      if (cmd == SET_KEY) {
+-              if (cipher == MT_CIPHER_TKIP) {
+-                      /* Rx/Tx MIC keys are swapped */
+-                      memcpy(data, key->key, 16);
+-                      memcpy(data + 16, key->key + 24, 8);
+-                      memcpy(data + 24, key->key + 16, 8);
+-              } else {
+-                      if (cipher_mask == BIT(cipher))
+-                              memcpy(data, key->key, key->keylen);
+-                      else if (cipher != MT_CIPHER_BIP_CMAC_128)
+-                              memcpy(data, key->key, 16);
+-                      if (cipher == MT_CIPHER_BIP_CMAC_128)
+-                              memcpy(data + 16, key->key, 16);
+-              }
++      if (cipher == MT_CIPHER_TKIP) {
++              /* Rx/Tx MIC keys are swapped */
++              memcpy(data, key->key, 16);
++              memcpy(data + 16, key->key + 24, 8);
++              memcpy(data + 24, key->key + 16, 8);
+       } else {
++              if (cipher_mask == BIT(cipher))
++                      memcpy(data, key->key, key->keylen);
++              else if (cipher != MT_CIPHER_BIP_CMAC_128)
++                      memcpy(data, key->key, 16);
+               if (cipher == MT_CIPHER_BIP_CMAC_128)
+-                      memset(data + 16, 0, 16);
+-              else if (cipher_mask)
+-                      memset(data, 0, 16);
+-              if (!cipher_mask)
+-                      memset(data, 0, sizeof(data));
++                      memcpy(data + 16, key->key, 16);
+       }
+       mt76_wr_copy(dev, addr, data, sizeof(data));
+@@ -1234,7 +1224,7 @@ mt7615_mac_wtbl_update_key(struct mt7615
+ static int
+ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+                         enum mt76_cipher_type cipher, u16 cipher_mask,
+-                        int keyidx, enum set_key_cmd cmd)
++                        int keyidx)
+ {
+       u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
+@@ -1253,9 +1243,7 @@ mt7615_mac_wtbl_update_pk(struct mt7615_
+       else
+               w0 &= ~MT_WTBL_W0_RX_IK_VALID;
+-      if (cmd == SET_KEY &&
+-          (cipher != MT_CIPHER_BIP_CMAC_128 ||
+-           cipher_mask == BIT(cipher))) {
++      if (cipher != MT_CIPHER_BIP_CMAC_128 || cipher_mask == BIT(cipher)) {
+               w0 &= ~MT_WTBL_W0_KEY_IDX;
+               w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
+       }
+@@ -1272,19 +1260,10 @@ mt7615_mac_wtbl_update_pk(struct mt7615_
+ static void
+ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+-                            enum mt76_cipher_type cipher, u16 cipher_mask,
+-                            enum set_key_cmd cmd)
++                            enum mt76_cipher_type cipher, u16 cipher_mask)
+ {
+       u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
+-      if (!cipher_mask) {
+-              mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
+-              return;
+-      }
+-
+-      if (cmd != SET_KEY)
+-              return;
+-
+       if (cipher == MT_CIPHER_BIP_CMAC_128 &&
+           cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
+               return;
+@@ -1295,8 +1274,7 @@ mt7615_mac_wtbl_update_cipher(struct mt7
+ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+                             struct mt76_wcid *wcid,
+-                            struct ieee80211_key_conf *key,
+-                            enum set_key_cmd cmd)
++                            struct ieee80211_key_conf *key)
+ {
+       enum mt76_cipher_type cipher;
+       u16 cipher_mask = wcid->cipher;
+@@ -1306,19 +1284,14 @@ int __mt7615_mac_wtbl_set_key(struct mt7
+       if (cipher == MT_CIPHER_NONE)
+               return -EOPNOTSUPP;
+-      if (cmd == SET_KEY)
+-              cipher_mask |= BIT(cipher);
+-      else
+-              cipher_mask &= ~BIT(cipher);
+-
+-      mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
+-      err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
+-                                       cmd);
++      cipher_mask |= BIT(cipher);
++      mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask);
++      err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask);
+       if (err < 0)
+               return err;
+       err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
+-                                      key->keyidx, cmd);
++                                      key->keyidx);
+       if (err < 0)
+               return err;
+@@ -1329,13 +1302,12 @@ int __mt7615_mac_wtbl_set_key(struct mt7
+ int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+                           struct mt76_wcid *wcid,
+-                          struct ieee80211_key_conf *key,
+-                          enum set_key_cmd cmd)
++                          struct ieee80211_key_conf *key)
+ {
+       int err;
+       spin_lock_bh(&dev->mt76.lock);
+-      err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
++      err = __mt7615_mac_wtbl_set_key(dev, wcid, key);
+       spin_unlock_bh(&dev->mt76.lock);
+       return err;
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+@@ -391,18 +391,17 @@ static int mt7615_set_key(struct ieee802
+       if (cmd == SET_KEY)
+               *wcid_keyidx = idx;
+-      else if (idx == *wcid_keyidx)
+-              *wcid_keyidx = -1;
+-      else
++      else {
++              if (idx == *wcid_keyidx)
++                      *wcid_keyidx = -1;
+               goto out;
++      }
+-      mt76_wcid_key_setup(&dev->mt76, wcid,
+-                          cmd == SET_KEY ? key : NULL);
+-
++      mt76_wcid_key_setup(&dev->mt76, wcid, key);
+       if (mt76_is_mmio(&dev->mt76))
+-              err = mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
++              err = mt7615_mac_wtbl_set_key(dev, wcid, key);
+       else
+-              err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
++              err = __mt7615_mac_wtbl_set_key(dev, wcid, key);
+ out:
+       mt7615_mutex_release(dev);
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+@@ -484,11 +484,9 @@ int mt7615_mac_write_txwi(struct mt7615_
+ void mt7615_mac_set_timing(struct mt7615_phy *phy);
+ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+                             struct mt76_wcid *wcid,
+-                            struct ieee80211_key_conf *key,
+-                            enum set_key_cmd cmd);
++                            struct ieee80211_key_conf *key);
+ int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+-                          struct ieee80211_key_conf *key,
+-                          enum set_key_cmd cmd);
++                          struct ieee80211_key_conf *key);
+ void mt7615_mac_reset_work(struct work_struct *work);
+ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid);
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+@@ -455,20 +455,20 @@ int mt76x02_set_key(struct ieee80211_hw
+       msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
+       wcid = msta ? &msta->wcid : &mvif->group_wcid;
+-      if (cmd == SET_KEY) {
+-              key->hw_key_idx = wcid->idx;
+-              wcid->hw_key_idx = idx;
+-              if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+-                      key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+-                      wcid->sw_iv = true;
+-              }
+-      } else {
++      if (cmd != SET_KEY) {
+               if (idx == wcid->hw_key_idx) {
+                       wcid->hw_key_idx = -1;
+                       wcid->sw_iv = false;
+               }
+-              key = NULL;
++              return 0;
++      }
++
++      key->hw_key_idx = wcid->idx;
++      wcid->hw_key_idx = idx;
++      if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
++              key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
++              wcid->sw_iv = true;
+       }
+       mt76_wcid_key_setup(&dev->mt76, wcid, key);
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -410,16 +410,15 @@ static int mt7915_set_key(struct ieee802
+               mt7915_mcu_add_bss_info(phy, vif, true);
+       }
+-      if (cmd == SET_KEY)
++      if (cmd == SET_KEY) {
+               *wcid_keyidx = idx;
+-      else if (idx == *wcid_keyidx)
+-              *wcid_keyidx = -1;
+-      else
++      } else {
++              if (idx == *wcid_keyidx)
++                      *wcid_keyidx = -1;
+               goto out;
++      }
+-      mt76_wcid_key_setup(&dev->mt76, wcid,
+-                          cmd == SET_KEY ? key : NULL);
+-
++      mt76_wcid_key_setup(&dev->mt76, wcid, key);
+       err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+                                     key, MCU_EXT_CMD(STA_REC_UPDATE),
+                                     &msta->wcid, cmd);
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -569,16 +569,15 @@ static int mt7921_set_key(struct ieee802
+       mt7921_mutex_acquire(dev);
+-      if (cmd == SET_KEY)
++      if (cmd == SET_KEY) {
+               *wcid_keyidx = idx;
+-      else if (idx == *wcid_keyidx)
+-              *wcid_keyidx = -1;
+-      else
++      } else {
++              if (idx == *wcid_keyidx)
++                      *wcid_keyidx = -1;
+               goto out;
++      }
+-      mt76_wcid_key_setup(&dev->mt76, wcid,
+-                          cmd == SET_KEY ? key : NULL);
+-
++      mt76_wcid_key_setup(&dev->mt76, wcid, key);
+       err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+                                     key, MCU_UNI_CMD(STA_REC_UPDATE),
+                                     &msta->wcid, cmd);
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+@@ -351,16 +351,15 @@ static int mt7996_set_key(struct ieee802
+               mt7996_mcu_add_bss_info(phy, vif, true);
+       }
+-      if (cmd == SET_KEY)
++      if (cmd == SET_KEY) {
+               *wcid_keyidx = idx;
+-      else if (idx == *wcid_keyidx)
+-              *wcid_keyidx = -1;
+-      else
++      } else {
++              if (idx == *wcid_keyidx)
++                      *wcid_keyidx = -1;
+               goto out;
++      }
+-      mt76_wcid_key_setup(&dev->mt76, wcid,
+-                          cmd == SET_KEY ? key : NULL);
+-
++      mt76_wcid_key_setup(&dev->mt76, wcid, key);
+       err = mt7996_mcu_add_key(&dev->mt76, vif, &msta->bip,
+                                key, MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
+                                &msta->wcid, cmd);
diff --git a/queue-6.2/wifi-mt76-mt7921-fix-fw-used-for-offload-check-for-mt7922.patch b/queue-6.2/wifi-mt76-mt7921-fix-fw-used-for-offload-check-for-mt7922.patch
new file mode 100644 (file)
index 0000000..abf096c
--- /dev/null
@@ -0,0 +1,34 @@
+From eb85df0a5643612285f61f38122564498d0c49f7 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Tue, 28 Mar 2023 12:01:17 +0200
+Subject: wifi: mt76: mt7921: fix fw used for offload check for mt7922
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+commit eb85df0a5643612285f61f38122564498d0c49f7 upstream.
+
+Fix the firmware version used for offload capability check used by 0x0616
+devices. This path enables offload capabilities for 0x0616 devices.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217245
+Fixes: 034ae28b56f1 ("wifi: mt76: mt7921: introduce remain_on_channel support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://lore.kernel.org/r/632d8f0c9781c9902d7160e2c080aa7e9232d50d.1679997487.git.lorenzo@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7921/pci.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+@@ -20,7 +20,7 @@ static const struct pci_device_id mt7921
+       { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608),
+               .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+       { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
+-              .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
++              .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
+       { },
+ };
diff --git a/queue-6.2/zsmalloc-document-freeable-stats.patch b/queue-6.2/zsmalloc-document-freeable-stats.patch
new file mode 100644 (file)
index 0000000..616d35b
--- /dev/null
@@ -0,0 +1,34 @@
+From 618a8a917dbf5830e2064d2fa0568940eb5d2584 Mon Sep 17 00:00:00 2001
+From: Sergey Senozhatsky <senozhatsky@chromium.org>
+Date: Sat, 25 Mar 2023 11:46:31 +0900
+Subject: zsmalloc: document freeable stats
+
+From: Sergey Senozhatsky <senozhatsky@chromium.org>
+
+commit 618a8a917dbf5830e2064d2fa0568940eb5d2584 upstream.
+
+When freeable class stat was added to classes file (back in 2016) we
+forgot to update zsmalloc documentation.  Fix that.
+
+Link: https://lkml.kernel.org/r/20230325024631.2817153-3-senozhatsky@chromium.org
+Fixes: 1120ed548394 ("mm/zsmalloc: add `freeable' column to pool stat")
+Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/mm/zsmalloc.rst |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/Documentation/mm/zsmalloc.rst
++++ b/Documentation/mm/zsmalloc.rst
+@@ -68,6 +68,8 @@ pages_used
+       the number of pages allocated for the class
+ pages_per_zspage
+       the number of 0-order pages to make a zspage
++freeable
++      the approximate number of pages class compaction can free
+ We assign a zspage to ZS_ALMOST_EMPTY fullness group when n <= N / f, where