--- /dev/null
+From 023e46cf727eb7caa0d89c745e03a3d4e2c2314e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 11:36:56 +0200
+Subject: accel/ivpu: Correct mutex unlock order in job submission
+
+From: Karol Wachowski <karol.wachowski@intel.com>
+
+[ Upstream commit 75680b7cd461b169c7ccd2a0fba7542868b7fce2 ]
+
+The mutex unlock for vdev->submitted_jobs_lock was incorrectly placed
+before unlocking file_priv->lock. Change order of unlocks to avoid potential
+race conditions.
+
+Fixes: 5bbccadaf33e ("accel/ivpu: Abort all jobs after command queue unregister")
+Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
+Reviewed-by: Jeff Hugo <jeff.hugo@oss.qualcomm.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://lore.kernel.org/r/20250425093656.2228168-1-jacek.lawrynowicz@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/ivpu/ivpu_job.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
+index 766fc383680f1..79b77d8a35a77 100644
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -646,8 +646,8 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
+ err_erase_xa:
+ xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+ err_unlock:
+- mutex_unlock(&vdev->submitted_jobs_lock);
+ mutex_unlock(&file_priv->lock);
++ mutex_unlock(&vdev->submitted_jobs_lock);
+ ivpu_rpm_put(vdev);
+ return ret;
+ }
+--
+2.39.5
+
--- /dev/null
+From c109f89f2aaa2df53b073f241cd778f2d38fbb78 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jan 2025 18:32:24 +0100
+Subject: accel/ivpu: Separate DB ID and CMDQ ID allocations from CMDQ
+ allocation
+
+From: Karol Wachowski <karol.wachowski@intel.com>
+
+[ Upstream commit 950942b4813f8c44dbec683fdb140cf4a238516b ]
+
+Move doorbell ID and command queue ID XArray allocations from command
+queue memory allocation function. This will allow ID allocations to be
+done without the need for actual memory allocation.
+
+Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
+Signed-off-by: Maciej Falkowski <maciej.falkowski@linux.intel.com>
+Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250107173238.381120-2-maciej.falkowski@linux.intel.com
+Stable-dep-of: 75680b7cd461 ("accel/ivpu: Correct mutex unlock order in job submission")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/ivpu/ivpu_job.c | 88 +++++++++++++++++++++++++----------
+ 1 file changed, 64 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
+index 673801889c7b2..766fc383680f1 100644
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -83,23 +83,9 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
+ if (!cmdq)
+ return NULL;
+
+- ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
+- GFP_KERNEL);
+- if (ret < 0) {
+- ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
+- goto err_free_cmdq;
+- }
+-
+- ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
+- &file_priv->cmdq_id_next, GFP_KERNEL);
+- if (ret < 0) {
+- ivpu_err(vdev, "Failed to allocate command queue id: %d\n", ret);
+- goto err_erase_db_xa;
+- }
+-
+ cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ if (!cmdq->mem)
+- goto err_erase_cmdq_xa;
++ goto err_free_cmdq;
+
+ ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
+ if (ret)
+@@ -107,10 +93,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
+
+ return cmdq;
+
+-err_erase_cmdq_xa:
+- xa_erase(&file_priv->cmdq_xa, cmdq->id);
+-err_erase_db_xa:
+- xa_erase(&vdev->db_xa, cmdq->db_id);
+ err_free_cmdq:
+ kfree(cmdq);
+ return NULL;
+@@ -234,30 +216,88 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
+ return 0;
+ }
+
++static int ivpu_db_id_alloc(struct ivpu_device *vdev, u32 *db_id)
++{
++ int ret;
++ u32 id;
++
++ ret = xa_alloc_cyclic(&vdev->db_xa, &id, NULL, vdev->db_limit, &vdev->db_next, GFP_KERNEL);
++ if (ret < 0)
++ return ret;
++
++ *db_id = id;
++ return 0;
++}
++
++static int ivpu_cmdq_id_alloc(struct ivpu_file_priv *file_priv, u32 *cmdq_id)
++{
++ int ret;
++ u32 id;
++
++ ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &id, NULL, file_priv->cmdq_limit,
++ &file_priv->cmdq_id_next, GFP_KERNEL);
++ if (ret < 0)
++ return ret;
++
++ *cmdq_id = id;
++ return 0;
++}
++
+ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority)
+ {
++ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq;
+- unsigned long cmdq_id;
++ unsigned long id;
+ int ret;
+
+ lockdep_assert_held(&file_priv->lock);
+
+- xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
++ xa_for_each(&file_priv->cmdq_xa, id, cmdq)
+ if (cmdq->priority == priority)
+ break;
+
+ if (!cmdq) {
+ cmdq = ivpu_cmdq_alloc(file_priv);
+- if (!cmdq)
++ if (!cmdq) {
++ ivpu_err(vdev, "Failed to allocate command queue\n");
+ return NULL;
++ }
++
++ ret = ivpu_db_id_alloc(vdev, &cmdq->db_id);
++ if (ret) {
++ ivpu_err(file_priv->vdev, "Failed to allocate doorbell ID: %d\n", ret);
++ goto err_free_cmdq;
++ }
++
++ ret = ivpu_cmdq_id_alloc(file_priv, &cmdq->id);
++ if (ret) {
++ ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret);
++ goto err_erase_db_id;
++ }
++
+ cmdq->priority = priority;
++ ret = xa_err(xa_store(&file_priv->cmdq_xa, cmdq->id, cmdq, GFP_KERNEL));
++ if (ret) {
++ ivpu_err(vdev, "Failed to store command queue in cmdq_xa: %d\n", ret);
++ goto err_erase_cmdq_id;
++ }
+ }
+
+ ret = ivpu_cmdq_init(file_priv, cmdq, priority);
+- if (ret)
+- return NULL;
++ if (ret) {
++ ivpu_err(vdev, "Failed to initialize command queue: %d\n", ret);
++ goto err_free_cmdq;
++ }
+
+ return cmdq;
++
++err_erase_cmdq_id:
++ xa_erase(&file_priv->cmdq_xa, cmdq->id);
++err_erase_db_id:
++ xa_erase(&vdev->db_xa, cmdq->db_id);
++err_free_cmdq:
++ ivpu_cmdq_free(file_priv, cmdq);
++ return NULL;
+ }
+
+ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
+--
+2.39.5
+
--- /dev/null
+From ee6701fef544ad33c0b7a971e48cd39247852241 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Apr 2025 23:56:14 -0400
+Subject: do_umount(): add missing barrier before refcount checks in sync case
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+[ Upstream commit 65781e19dcfcb4aed1167d87a3ffcc2a0c071d47 ]
+
+do_umount() analogue of the race fixed in 119e1ef80ecf "fix
+__legitimize_mnt()/mntput() race". Here we want to make sure that
+if __legitimize_mnt() doesn't notice our lock_mount_hash(), we will
+notice their refcount increment. Harder to hit than mntput_no_expire()
+one, fortunately, and consequences are milder (sync umount acting
+like umount -l on a rare race with RCU pathwalk hitting at just the
+wrong time instead of use-after-free galore mntput_no_expire()
+counterpart used to be hit). Still a bug...
+
+Fixes: 48a066e72d97 ("RCU'd vfsmounts")
+Reviewed-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/namespace.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 280a6ebc46d93..5b84e29613fe4 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -778,7 +778,7 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
+ return 0;
+ mnt = real_mount(bastard);
+ mnt_add_count(mnt, 1);
+- smp_mb(); // see mntput_no_expire()
++ smp_mb(); // see mntput_no_expire() and do_umount()
+ if (likely(!read_seqretry(&mount_lock, seq)))
+ return 0;
+ if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
+@@ -1956,6 +1956,7 @@ static int do_umount(struct mount *mnt, int flags)
+ umount_tree(mnt, UMOUNT_PROPAGATE);
+ retval = 0;
+ } else {
++ smp_mb(); // paired with __legitimize_mnt()
+ shrink_submounts(mnt);
+ retval = -EBUSY;
+ if (!propagate_mount_busy(mnt, 2)) {
+--
+2.39.5
+
--- /dev/null
+From 2091cbb922280cf4dec04bb2975baa92243b6a8d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 May 2025 12:02:56 -0500
+Subject: drm/panel: simple: Update timings for AUO G101EVN010
+
+From: Kevin Baker <kevinb@ventureresearch.com>
+
+[ Upstream commit 7c6fa1797a725732981f2d77711c867166737719 ]
+
+Switch to panel timings based on datasheet for the AUO G101EVN01.0
+LVDS panel. Default timings were tested on the panel.
+
+Previous mode-based timings resulted in horizontal display shift.
+
+Signed-off-by: Kevin Baker <kevinb@ventureresearch.com>
+Fixes: 4fb86404a977 ("drm/panel: simple: Add AUO G101EVN010 panel support")
+Reviewed-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://lore.kernel.org/r/20250505170256.1385113-1-kevinb@ventureresearch.com
+Signed-off-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://lore.kernel.org/r/20250505170256.1385113-1-kevinb@ventureresearch.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-simple.c | 25 +++++++++++++------------
+ 1 file changed, 13 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 9b2f128fd3094..cf9ab2d1f1d2a 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -1027,27 +1027,28 @@ static const struct panel_desc auo_g070vvn01 = {
+ },
+ };
+
+-static const struct drm_display_mode auo_g101evn010_mode = {
+- .clock = 68930,
+- .hdisplay = 1280,
+- .hsync_start = 1280 + 82,
+- .hsync_end = 1280 + 82 + 2,
+- .htotal = 1280 + 82 + 2 + 84,
+- .vdisplay = 800,
+- .vsync_start = 800 + 8,
+- .vsync_end = 800 + 8 + 2,
+- .vtotal = 800 + 8 + 2 + 6,
++static const struct display_timing auo_g101evn010_timing = {
++ .pixelclock = { 64000000, 68930000, 85000000 },
++ .hactive = { 1280, 1280, 1280 },
++ .hfront_porch = { 8, 64, 256 },
++ .hback_porch = { 8, 64, 256 },
++ .hsync_len = { 40, 168, 767 },
++ .vactive = { 800, 800, 800 },
++ .vfront_porch = { 4, 8, 100 },
++ .vback_porch = { 4, 8, 100 },
++ .vsync_len = { 8, 16, 223 },
+ };
+
+ static const struct panel_desc auo_g101evn010 = {
+- .modes = &auo_g101evn010_mode,
+- .num_modes = 1,
++ .timings = &auo_g101evn010_timing,
++ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
++ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+
+--
+2.39.5
+
--- /dev/null
+From e65f0edb4aa9eed8cdb9725e8db0601ca6730a35 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 May 2025 02:23:02 +0000
+Subject: drm/xe: Release force wake first then runtime power
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+[ Upstream commit 9d271a4f5ba52520e448ab223b1a91c6e35f17c7 ]
+
+xe_force_wake_get() is dependent on xe_pm_runtime_get(), so for
+the release path, xe_force_wake_put() should be called first then
+xe_pm_runtime_put().
+Combine the error path and normal path together with goto.
+
+Fixes: 85d547608ef5 ("drm/xe/xe_gt_debugfs: Update handling of xe_force_wake_get return")
+Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
+Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
+Link: https://lore.kernel.org/r/20250507022302.2187527-1-shuicheng.lin@intel.com
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+(cherry picked from commit 432cd94efdca06296cc5e76d673546f58aa90ee1)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_gt_debugfs.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
+index 2d63a69cbfa38..f7005a3643e62 100644
+--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
++++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
+@@ -92,22 +92,23 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ unsigned int fw_ref;
++ int ret = 0;
+
+ xe_pm_runtime_get(xe);
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+- xe_pm_runtime_put(xe);
+- xe_force_wake_put(gt_to_fw(gt), fw_ref);
+- return -ETIMEDOUT;
++ ret = -ETIMEDOUT;
++ goto fw_put;
+ }
+
+ for_each_hw_engine(hwe, gt, id)
+ xe_hw_engine_print(hwe, p);
+
++fw_put:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ xe_pm_runtime_put(xe);
+
+- return 0;
++ return ret;
+ }
+
+ static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
+--
+2.39.5
+
--- /dev/null
+From f951f0fd40797476aa92669c0b3442ea1d3d8fca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Apr 2025 13:53:57 +0530
+Subject: drm/xe/tests/mocs: Hold XE_FORCEWAKE_ALL for LNCF regs
+
+From: Tejas Upadhyay <tejas.upadhyay@intel.com>
+
+[ Upstream commit 51c0ee84e4dc339287b2d7335f2b54d747794c83 ]
+
+LNCF registers report wrong values when XE_FORCEWAKE_GT
+only is held. Holding XE_FORCEWAKE_ALL ensures correct
+operations on LNCF regs.
+
+V2(Himal):
+ - Use xe_force_wake_ref_has_domain
+
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/1999
+Fixes: a6a4ea6d7d37 ("drm/xe: Add mocs kunit")
+Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250428082357.1730068-1-tejas.upadhyay@intel.com
+Signed-off-by: Tejas Upadhyay <tejas.upadhyay@intel.com>
+(cherry picked from commit 70a2585e582058e94fe4381a337be42dec800337)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/tests/xe_mocs.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
+index ef1e5256c56a8..0e502feaca818 100644
+--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
++++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
+@@ -46,8 +46,11 @@ static void read_l3cc_table(struct xe_gt *gt,
+ unsigned int fw_ref, i;
+ u32 reg_val;
+
+- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+- KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n");
++ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
++ xe_force_wake_put(gt_to_fw(gt), fw_ref);
++ KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
++ }
+
+ for (i = 0; i < info->num_mocs_regs; i++) {
+ if (!(i & 1)) {
+--
+2.39.5
+
--- /dev/null
+From 3e869c287542cf57863a89d625fcb13203efd477 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 13 Apr 2025 11:34:27 +0100
+Subject: iio: accel: adxl355: Make timestamp 64-bit aligned using aligned_s64
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+[ Upstream commit 1bb942287e05dc4c304a003ea85e6dd9a5e7db39 ]
+
+The IIO ABI requires 64-bit aligned timestamps. In this case insufficient
+padding would have been added on architectures where an s64 is only 32-bit
+aligned. Use aligned_s64 to enforce the correct alignment.
+
+Fixes: 327a0eaf19d5 ("iio: accel: adxl355: Add triggered buffer support")
+Reported-by: David Lechner <dlechner@baylibre.com>
+Reviewed-by: Nuno Sá <nuno.sa@analog.com>
+Reviewed-by: David Lechner <dlechner@baylibre.com>
+Link: https://patch.msgid.link/20250413103443.2420727-5-jic23@kernel.org
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/accel/adxl355_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c
+index e8cd21fa77a69..cbac622ef8211 100644
+--- a/drivers/iio/accel/adxl355_core.c
++++ b/drivers/iio/accel/adxl355_core.c
+@@ -231,7 +231,7 @@ struct adxl355_data {
+ u8 transf_buf[3];
+ struct {
+ u8 buf[14];
+- s64 ts;
++ aligned_s64 ts;
+ } buffer;
+ } __aligned(IIO_DMA_MINALIGN);
+ };
+--
+2.39.5
+
--- /dev/null
+From ea7cb46787929be2161d71165f99bb5a1c31132c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 9 Mar 2025 19:35:15 +0000
+Subject: iio: accel: adxl367: fix setting odr for activity time update
+
+From: Lothar Rubusch <l.rubusch@gmail.com>
+
+[ Upstream commit 38f67d0264929762e54ae5948703a21f841fe706 ]
+
+Fix setting the odr value to update activity time based on frequency
+derrived by recent odr, and not by obsolete odr value.
+
+The [small] bug: When _adxl367_set_odr() is called with a new odr value,
+it first writes the new odr value to the hardware register
+ADXL367_REG_FILTER_CTL.
+Second, it calls _adxl367_set_act_time_ms(), which calls
+adxl367_time_ms_to_samples(). Here st->odr still holds the old odr value.
+This st->odr member is used to derrive a frequency value, which is
+applied to update ADXL367_REG_TIME_ACT. Hence, the idea is to update
+activity time, based on possibilities and power consumption by the
+current ODR rate.
+Finally, when the function calls return, again in _adxl367_set_odr() the
+new ODR is assigned to st->odr.
+
+The fix: When setting a new ODR value is set to ADXL367_REG_FILTER_CTL,
+also ADXL367_REG_TIME_ACT should probably be updated with a frequency
+based on the recent ODR value and not the old one. Changing the location
+of the assignment to st->odr fixes this.
+
+Fixes: cbab791c5e2a5 ("iio: accel: add ADXL367 driver")
+Signed-off-by: Lothar Rubusch <l.rubusch@gmail.com>
+Reviewed-by: Marcelo Schmitt <marcelo.schmitt1@gmail.com>
+Link: https://patch.msgid.link/20250309193515.2974-1-l.rubusch@gmail.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/accel/adxl367.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
+index a48ac0d7bd96b..2ba7d7de47e44 100644
+--- a/drivers/iio/accel/adxl367.c
++++ b/drivers/iio/accel/adxl367.c
+@@ -604,18 +604,14 @@ static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr)
+ if (ret)
+ return ret;
+
++ st->odr = odr;
++
+ /* Activity timers depend on ODR */
+ ret = _adxl367_set_act_time_ms(st, st->act_time_ms);
+ if (ret)
+ return ret;
+
+- ret = _adxl367_set_inact_time_ms(st, st->inact_time_ms);
+- if (ret)
+- return ret;
+-
+- st->odr = odr;
+-
+- return 0;
++ return _adxl367_set_inact_time_ms(st, st->inact_time_ms);
+ }
+
+ static int adxl367_set_odr(struct iio_dev *indio_dev, enum adxl367_odr odr)
+--
+2.39.5
+
--- /dev/null
+From f4db44c90eb18485bc2b1151018d01d75e6617fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 13 Apr 2025 11:34:26 +0100
+Subject: iio: adc: dln2: Use aligned_s64 for timestamp
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+[ Upstream commit 5097eaae98e53f9ab9d35801c70da819b92ca907 ]
+
+Here the lack of marking allows the overall structure to not be
+sufficiently aligned resulting in misplacement of the timestamp
+in iio_push_to_buffers_with_timestamp(). Use aligned_s64 to
+force the alignment on all architectures.
+
+Fixes: 7c0299e879dd ("iio: adc: Add support for DLN2 ADC")
+Reported-by: David Lechner <dlechner@baylibre.com>
+Reviewed-by: Andy Shevchenko <andy@kernel.org>
+Reviewed-by: Nuno Sá <nuno.sa@analog.com>
+Reviewed-by: David Lechner <dlechner@baylibre.com>
+Link: https://patch.msgid.link/20250413103443.2420727-4-jic23@kernel.org
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/adc/dln2-adc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
+index 221a5fdc1eaac..e416501770855 100644
+--- a/drivers/iio/adc/dln2-adc.c
++++ b/drivers/iio/adc/dln2-adc.c
+@@ -467,7 +467,7 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct {
+ __le16 values[DLN2_ADC_MAX_CHANNELS];
+- int64_t timestamp_space;
++ aligned_s64 timestamp_space;
+ } data;
+ struct dln2_adc_get_all_vals dev_data;
+ struct dln2_adc *dln2 = iio_priv(indio_dev);
+--
+2.39.5
+
--- /dev/null
+From 2fe0fb31583c2a74a71f5e83dfe2da5ba6283752 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Mar 2025 15:01:02 -0300
+Subject: iio: imu: bmi270: fix initial sampling frequency configuration
+
+From: Gustavo Silva <gustavograzs@gmail.com>
+
+[ Upstream commit 6d03811d7a99e08d5928f58120acb45b8ba22b08 ]
+
+In the bmi270_configure_imu() function, the accelerometer and gyroscope
+configuration registers are incorrectly written with the mask
+BMI270_PWR_CONF_ADV_PWR_SAVE_MSK, which is unrelated to these registers.
+
+As a result, the accelerometer's sampling frequency is set to 200 Hz
+instead of the intended 100 Hz.
+
+Remove the mask to ensure the correct bits are set in the configuration
+registers.
+
+Fixes: 3ea51548d6b2 ("iio: imu: Add i2c driver for bmi270 imu")
+Signed-off-by: Gustavo Silva <gustavograzs@gmail.com>
+Reviewed-by: Alex Lanzano <lanzano.alex@gmail.com>
+Link: https://patch.msgid.link/20250304-bmi270-odr-fix-v1-1-384dbcd699fb@gmail.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/imu/bmi270/bmi270_core.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/iio/imu/bmi270/bmi270_core.c b/drivers/iio/imu/bmi270/bmi270_core.c
+index 7fec52e0b4862..950fcacddd40d 100644
+--- a/drivers/iio/imu/bmi270/bmi270_core.c
++++ b/drivers/iio/imu/bmi270/bmi270_core.c
+@@ -654,8 +654,7 @@ static int bmi270_configure_imu(struct bmi270_data *bmi270_device)
+ FIELD_PREP(BMI270_ACC_CONF_ODR_MSK,
+ BMI270_ACC_CONF_ODR_100HZ) |
+ FIELD_PREP(BMI270_ACC_CONF_BWP_MSK,
+- BMI270_ACC_CONF_BWP_NORMAL_MODE) |
+- BMI270_PWR_CONF_ADV_PWR_SAVE_MSK);
++ BMI270_ACC_CONF_BWP_NORMAL_MODE));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to configure accelerometer");
+
+@@ -663,8 +662,7 @@ static int bmi270_configure_imu(struct bmi270_data *bmi270_device)
+ FIELD_PREP(BMI270_GYR_CONF_ODR_MSK,
+ BMI270_GYR_CONF_ODR_200HZ) |
+ FIELD_PREP(BMI270_GYR_CONF_BWP_MSK,
+- BMI270_GYR_CONF_BWP_NORMAL_MODE) |
+- BMI270_PWR_CONF_ADV_PWR_SAVE_MSK);
++ BMI270_GYR_CONF_BWP_NORMAL_MODE));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to configure gyroscope");
+
+--
+2.39.5
+
--- /dev/null
+From 196eac23573d13433d4ffa8fb9398cdfeb0c8f57 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 13 Apr 2025 11:34:36 +0100
+Subject: iio: temp: maxim-thermocouple: Fix potential lack of DMA safe buffer.
+
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+[ Upstream commit f79aeb6c631b57395f37acbfbe59727e355a714c ]
+
+The trick of using __aligned(IIO_DMA_MINALIGN) ensures that there is
+no overlap between buffers used for DMA and those used for driver
+state storage that are before the marking. It doesn't ensure
+anything above state variables found after the marking. Hence
+move this particular bit of state earlier in the structure.
+
+Fixes: 10897f34309b ("iio: temp: maxim_thermocouple: Fix alignment for DMA safety")
+Reviewed-by: David Lechner <dlechner@baylibre.com>
+Link: https://patch.msgid.link/20250413103443.2420727-14-jic23@kernel.org
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/temperature/maxim_thermocouple.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
+index c28a7a6dea5f1..555a61e2f3fdd 100644
+--- a/drivers/iio/temperature/maxim_thermocouple.c
++++ b/drivers/iio/temperature/maxim_thermocouple.c
+@@ -121,9 +121,9 @@ static const struct maxim_thermocouple_chip maxim_thermocouple_chips[] = {
+ struct maxim_thermocouple_data {
+ struct spi_device *spi;
+ const struct maxim_thermocouple_chip *chip;
++ char tc_type;
+
+ u8 buffer[16] __aligned(IIO_DMA_MINALIGN);
+- char tc_type;
+ };
+
+ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
+--
+2.39.5
+
--- /dev/null
+From 0377ab6cc266ad60b9986e7eb67d49ba6a40129c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 May 2025 14:12:03 -0400
+Subject: io_uring/sqpoll: Increase task_work submission batch size
+
+From: Gabriel Krisman Bertazi <krisman@suse.de>
+
+[ Upstream commit 92835cebab120f8a5f023a26a792a2ac3f816c4f ]
+
+Our QA team reported a 10%-23%, throughput reduction on an io_uring
+sqpoll testcase doing IO to a null_blk, that I traced back to a
+reduction of the device submission queue depth utilization. It turns out
+that, after commit af5d68f8892f ("io_uring/sqpoll: manage task_work
+privately"), we capped the number of task_work entries that can be
+completed from a single spin of sqpoll to only 8 entries, before the
+sqpoll goes around to (potentially) sleep. While this cap doesn't drive
+the submission side directly, it impacts the completion behavior, which
+affects the number of IO queued by fio per sqpoll cycle on the
+submission side, and io_uring ends up seeing less ios per sqpoll cycle.
+As a result, block layer plugging is less effective, and we see more
+time spent inside the block layer in profilings charts, and increased
+submission latency measured by fio.
+
+There are other places that have increased overhead once sqpoll sleeps
+more often, such as the sqpoll utilization calculation. But, in this
+microbenchmark, those were not representative enough in perf charts, and
+their removal didn't yield measurable changes in throughput. The major
+overhead comes from the fact we plug less, and less often, when submitting
+to the block layer.
+
+My benchmark is:
+
+fio --ioengine=io_uring --direct=1 --iodepth=128 --runtime=300 --bs=4k \
+ --invalidate=1 --time_based --ramp_time=10 --group_reporting=1 \
+ --filename=/dev/nullb0 --name=RandomReads-direct-nullb-sqpoll-4k-1 \
+ --rw=randread --numjobs=1 --sqthread_poll
+
+In one machine, tested on top of Linux 6.15-rc1, we have the following
+baseline:
+ READ: bw=4994MiB/s (5236MB/s), 4994MiB/s-4994MiB/s (5236MB/s-5236MB/s), io=439GiB (471GB), run=90001-90001msec
+
+With this patch:
+ READ: bw=5762MiB/s (6042MB/s), 5762MiB/s-5762MiB/s (6042MB/s-6042MB/s), io=506GiB (544GB), run=90001-90001msec
+
+which is a 15% improvement in measured bandwidth. The average
+submission latency is noticeably lowered too. As measured by
+fio:
+
+Baseline:
+ lat (usec): min=20, max=241, avg=99.81, stdev=3.38
+Patched:
+ lat (usec): min=26, max=226, avg=86.48, stdev=4.82
+
+If we look at blktrace, we can also see the plugging behavior is
+improved. In the baseline, we end up limited to plugging 8 requests in
+the block layer regardless of the device queue depth size, while after
+patching we can drive more io, and we manage to utilize the full device
+queue.
+
+In the baseline, after a stabilization phase, an ordinary submission
+looks like:
+ 254,0 1 49942 0.016028795 5977 U N [iou-sqp-5976] 7
+
+After patching, I see consistently more requests per unplug.
+ 254,0 1 4996 0.001432872 3145 U N [iou-sqp-3144] 32
+
+Ideally, the cap size would at least be the deep enough to fill the
+device queue, but we can't predict that behavior, or assume all IO goes
+to a single device, and thus can't guess the ideal batch size. We also
+don't want to let the tw run unbounded, though I'm not sure it would
+really be a problem. Instead, let's just give it a more sensible value
+that will allow for more efficient batching. I've tested with different
+cap values, and initially proposed to increase the cap to 1024. Jens
+argued it is too big of a bump and I observed that, with 32, I'm no
+longer able to observe this bottleneck in any of my machines.
+
+Fixes: af5d68f8892f ("io_uring/sqpoll: manage task_work privately")
+Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
+Link: https://lore.kernel.org/r/20250508181203.3785544-1-krisman@suse.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ io_uring/sqpoll.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index d037cc68e9d3e..03c699493b5ab 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -20,7 +20,7 @@
+ #include "sqpoll.h"
+
+ #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
+-#define IORING_TW_CAP_ENTRIES_VALUE 8
++#define IORING_TW_CAP_ENTRIES_VALUE 32
+
+ enum {
+ IO_SQ_THREAD_SHOULD_STOP = 0,
+--
+2.39.5
+
--- /dev/null
+From b771b7722a453d0aeb4a0cb82ee7fbc93476e183 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Apr 2025 22:36:26 +0800
+Subject: loop: Add sanity check for read/write_iter
+
+From: Lizhi Xu <lizhi.xu@windriver.com>
+
+[ Upstream commit f5c84eff634ba003326aa034c414e2a9dcb7c6a7 ]
+
+Some file systems do not support read_iter/write_iter, such as selinuxfs
+in this issue.
+So before calling them, first confirm that the interface is supported and
+then call it.
+
+It is releavant in that vfs_iter_read/write have the check, and removal
+of their used caused szybot to be able to hit this issue.
+
+Fixes: f2fed441c69b ("loop: stop using vfs_iter__{read,write} for buffered I/O")
+Reported-by: syzbot+6af973a3b8dfd2faefdc@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=6af973a3b8dfd2faefdc
+Signed-off-by: Lizhi Xu <lizhi.xu@windriver.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20250428143626.3318717-1-lizhi.xu@windriver.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/loop.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 61ce7ccde3445..b378d2aa49f06 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -504,6 +504,17 @@ static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
+ lo->old_gfp_mask & ~(__GFP_IO | __GFP_FS));
+ }
+
++static int loop_check_backing_file(struct file *file)
++{
++ if (!file->f_op->read_iter)
++ return -EINVAL;
++
++ if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter)
++ return -EINVAL;
++
++ return 0;
++}
++
+ /*
+ * loop_change_fd switched the backing store of a loopback device to
+ * a new file. This is useful for operating system installers to free up
+@@ -525,6 +536,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
+ if (!file)
+ return -EBADF;
+
++ error = loop_check_backing_file(file);
++ if (error)
++ return error;
++
+ /* suppress uevents while reconfiguring the device */
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
+
+@@ -956,6 +971,14 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
+
+ if (!file)
+ return -EBADF;
++
++ if ((mode & BLK_OPEN_WRITE) && !file->f_op->write_iter)
++ return -EINVAL;
++
++ error = loop_check_backing_file(file);
++ if (error)
++ return error;
++
+ is_loop = is_loop_device(file);
+
+ /* This is safe, since we have a reference from open(). */
+--
+2.39.5
+
--- /dev/null
+From 722638b26a7a9aa771899bcc41f71438be7c6bab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 Jan 2025 13:00:38 +0100
+Subject: loop: factor out a loop_assign_backing_file helper
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit d278164832618bf2775c6a89e6434e2633de1eed ]
+
+Split the code for setting up a backing file into a helper in preparation
+of adding more code to this path.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Link: https://lore.kernel.org/r/20250131120120.1315125-2-hch@lst.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: f5c84eff634b ("loop: Add sanity check for read/write_iter")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/loop.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 7668b79d8b0a9..61ce7ccde3445 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -496,6 +496,14 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
+ return 0;
+ }
+
++static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
++{
++ lo->lo_backing_file = file;
++ lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
++ mapping_set_gfp_mask(file->f_mapping,
++ lo->old_gfp_mask & ~(__GFP_IO | __GFP_FS));
++}
++
+ /*
+ * loop_change_fd switched the backing store of a loopback device to
+ * a new file. This is useful for operating system installers to free up
+@@ -549,10 +557,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
+ disk_force_media_change(lo->lo_disk);
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
+ mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
+- lo->lo_backing_file = file;
+- lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
+- mapping_set_gfp_mask(file->f_mapping,
+- lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
++ loop_assign_backing_file(lo, file);
+ loop_update_dio(lo);
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
+ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
+@@ -943,7 +948,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
+ const struct loop_config *config)
+ {
+ struct file *file = fget(config->fd);
+- struct address_space *mapping;
+ struct queue_limits lim;
+ int error;
+ loff_t size;
+@@ -979,8 +983,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
+ if (error)
+ goto out_unlock;
+
+- mapping = file->f_mapping;
+-
+ if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
+ error = -EINVAL;
+ goto out_unlock;
+@@ -1012,9 +1014,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
+ set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
+
+ lo->lo_device = bdev;
+- lo->lo_backing_file = file;
+- lo->old_gfp_mask = mapping_gfp_mask(mapping);
+- mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
++ loop_assign_backing_file(lo, file);
+
+ lim = queue_limits_start_update(lo->lo_queue);
+ loop_update_limits(lo, &lim, config->block_size);
+--
+2.39.5
+
--- /dev/null
+From 1539386d9522163d8d31fe207a498af0e84ddaa8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 3 Apr 2025 18:11:42 +0200
+Subject: MIPS: Fix idle VS timer enqueue
+
+From: Marco Crivellari <marco.crivellari@suse.com>
+
+[ Upstream commit 56651128e2fbad80f632f388d6bf1f39c928267a ]
+
+MIPS re-enables interrupts on its idle routine and performs
+a TIF_NEED_RESCHED check afterwards before putting the CPU to sleep.
+
+The IRQs firing between the check and the 'wait' instruction may set the
+TIF_NEED_RESCHED flag. In order to deal with this possible race, IRQs
+interrupting __r4k_wait() rollback their return address to the
+beginning of __r4k_wait() so that TIF_NEED_RESCHED is checked
+again before going back to sleep.
+
+However idle IRQs can also queue timers that may require a tick
+reprogramming through a new generic idle loop iteration but those timers
+would go unnoticed here because __r4k_wait() only checks
+TIF_NEED_RESCHED. It doesn't check for pending timers.
+
+Fix this with fast-forwarding idle IRQs return address to the end of the
+idle routine instead of the beginning, so that the generic idle loop
+handles both TIF_NEED_RESCHED and pending timers.
+
+CONFIG_CPU_MICROMIPS has been removed along with the nop instructions.
+There, NOPs are 2 byte in size, so change the code with 3 _ssnop which are
+always 4 byte and remove the ifdef. Added ehb to make sure the hazard
+is always cleared.
+
+Fixes: c65a5480ff29 ("[MIPS] Fix potential latency problem due to non-atomic cpu_wait.")
+Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Acked-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/idle.h | 3 +-
+ arch/mips/kernel/genex.S | 62 +++++++++++++++++++++---------------
+ arch/mips/kernel/idle.c | 7 ----
+ 3 files changed, 37 insertions(+), 35 deletions(-)
+
+diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h
+index 0992cad9c632e..2bc3678455ed0 100644
+--- a/arch/mips/include/asm/idle.h
++++ b/arch/mips/include/asm/idle.h
+@@ -6,8 +6,7 @@
+ #include <linux/linkage.h>
+
+ extern void (*cpu_wait)(void);
+-extern void r4k_wait(void);
+-extern asmlinkage void __r4k_wait(void);
++extern asmlinkage void r4k_wait(void);
+ extern void r4k_wait_irqoff(void);
+
+ static inline int using_rollback_handler(void)
+diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
+index a572ce36a24f2..46d975d00298d 100644
+--- a/arch/mips/kernel/genex.S
++++ b/arch/mips/kernel/genex.S
+@@ -104,42 +104,52 @@ handle_vcei:
+
+ __FINIT
+
+- .align 5 /* 32 byte rollback region */
+-LEAF(__r4k_wait)
+- .set push
+- .set noreorder
+- /* start of rollback region */
+- LONG_L t0, TI_FLAGS($28)
+- nop
+- andi t0, _TIF_NEED_RESCHED
+- bnez t0, 1f
+- nop
+- nop
+- nop
+-#ifdef CONFIG_CPU_MICROMIPS
+- nop
+- nop
+- nop
+- nop
+-#endif
++ /* Align to 32 bytes for the maximum idle interrupt region size. */
++ .align 5
++LEAF(r4k_wait)
++ /* Keep the ISA bit clear for calculations on local labels here. */
++0: .fill 0
++ /* Start of idle interrupt region. */
++ local_irq_enable
++ /*
++ * If an interrupt lands here, before going idle on the next
++ * instruction, we must *NOT* go idle since the interrupt could
++ * have set TIF_NEED_RESCHED or caused a timer to need resched.
++ * Fall through -- see rollback_handler below -- and have the
++ * idle loop take care of things.
++ */
++1: .fill 0
++ /* The R2 EI/EHB sequence takes 8 bytes, otherwise pad up. */
++ .if 1b - 0b > 32
++ .error "overlong idle interrupt region"
++ .elseif 1b - 0b > 8
++ .align 4
++ .endif
++2: .fill 0
++ .equ r4k_wait_idle_size, 2b - 0b
++ /* End of idle interrupt region; size has to be a power of 2. */
+ .set MIPS_ISA_ARCH_LEVEL_RAW
++r4k_wait_insn:
+ wait
+- /* end of rollback region (the region size must be power of two) */
+-1:
++r4k_wait_exit:
++ .set mips0
++ local_irq_disable
+ jr ra
+- nop
+- .set pop
+- END(__r4k_wait)
++ END(r4k_wait)
++ .previous
+
+ .macro BUILD_ROLLBACK_PROLOGUE handler
+ FEXPORT(rollback_\handler)
+ .set push
+ .set noat
+ MFC0 k0, CP0_EPC
+- PTR_LA k1, __r4k_wait
+- ori k0, 0x1f /* 32 byte rollback region */
+- xori k0, 0x1f
++ /* Subtract/add 2 to let the ISA bit propagate through the mask. */
++ PTR_LA k1, r4k_wait_insn - 2
++ ori k0, r4k_wait_idle_size - 2
++ .set noreorder
+ bne k0, k1, \handler
++ PTR_ADDIU k0, r4k_wait_exit - r4k_wait_insn + 2
++ .set reorder
+ MTC0 k0, CP0_EPC
+ .set pop
+ .endm
+diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
+index 5abc8b7340f88..80e8a04a642e0 100644
+--- a/arch/mips/kernel/idle.c
++++ b/arch/mips/kernel/idle.c
+@@ -35,13 +35,6 @@ static void __cpuidle r3081_wait(void)
+ write_c0_conf(cfg | R30XX_CONF_HALT);
+ }
+
+-void __cpuidle r4k_wait(void)
+-{
+- raw_local_irq_enable();
+- __r4k_wait();
+- raw_local_irq_disable();
+-}
+-
+ /*
+ * This variant is preferable as it allows testing need_resched and going to
+ * sleep depending on the outcome atomically. Unfortunately the "It is
+--
+2.39.5
+
--- /dev/null
+From 8b099fbb06bef869fde41f36cc7d06a39535869f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 27 Apr 2025 13:34:24 +0200
+Subject: MIPS: Fix MAX_REG_OFFSET
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+[ Upstream commit c44572e0cc13c9afff83fd333135a0aa9b27ba26 ]
+
+Fix MAX_REG_OFFSET to point to the last register in 'pt_regs' and not to
+the marker itself, which could allow regs_get_register() to return an
+invalid offset.
+
+Fixes: 40e084a506eb ("MIPS: Add uprobes support.")
+Suggested-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/ptrace.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
+index 85fa9962266a2..ef72c46b55688 100644
+--- a/arch/mips/include/asm/ptrace.h
++++ b/arch/mips/include/asm/ptrace.h
+@@ -65,7 +65,8 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
+
+ /* Query offset/name of register from its name/offset */
+ extern int regs_query_register_offset(const char *name);
+-#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
++#define MAX_REG_OFFSET \
++ (offsetof(struct pt_regs, __last) - sizeof(unsigned long))
+
+ /**
+ * regs_get_register() - get register value from its offset
+--
+2.39.5
+
--- /dev/null
+From cb6baa1ad081c633d93b3e90eacd40369466a464 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 3 Apr 2025 18:11:43 +0200
+Subject: MIPS: Move r4k_wait() to .cpuidle.text section
+
+From: Marco Crivellari <marco.crivellari@suse.com>
+
+[ Upstream commit b713f27e32d87c35737ec942dd6f5ed6b7475f48 ]
+
+Fix missing .cpuidle.text section assignment for r4k_wait() to correct
+backtracing with nmi_backtrace().
+
+Fixes: 97c8580e85cf ("MIPS: Annotate cpu_wait implementations with __cpuidle")
+Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Acked-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/kernel/genex.S | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
+index 46d975d00298d..2cf312d9a3b09 100644
+--- a/arch/mips/kernel/genex.S
++++ b/arch/mips/kernel/genex.S
+@@ -104,6 +104,7 @@ handle_vcei:
+
+ __FINIT
+
++ .section .cpuidle.text,"ax"
+ /* Align to 32 bytes for the maximum idle interrupt region size. */
+ .align 5
+ LEAF(r4k_wait)
+--
+2.39.5
+
--- /dev/null
+From 50b7daa89e1eb13767b3e90284702ac79ea34c2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 May 2025 10:58:00 +0200
+Subject: nvme: unblock ctrl state transition for firmware update
+
+From: Daniel Wagner <wagi@kernel.org>
+
+[ Upstream commit 650415fca0a97472fdd79725e35152614d1aad76 ]
+
+The original nvme subsystem design didn't have a CONNECTING state; the
+state machine allowed transitions from RESETTING to LIVE directly.
+
+With the introduction of nvme fabrics the CONNECTING state was
+introduce. Over time the nvme-pci started to use the CONNECTING state as
+well.
+
+Eventually, a bug fix for the nvme-fc started to depend that the only
+valid transition to LIVE was from CONNECTING. Though this change didn't
+update the firmware update handler which was still depending on
+RESETTING to LIVE transition.
+
+The simplest way to address it for the time being is to switch into
+CONNECTING state before going to LIVE state.
+
+Fixes: d2fe192348f9 ("nvme: only allow entering LIVE from CONNECTING state")
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Daniel Wagner <wagi@kernel.org>
+Closes: https://lore.kernel.org/all/0134ea15-8d5f-41f7-9e9a-d7e6d82accaa@roeck-us.net
+Reviewed-by: Keith Busch <kbusch@kernel.org>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 150de63b26b2c..a27149e37a988 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4492,7 +4492,8 @@ static void nvme_fw_act_work(struct work_struct *work)
+ msleep(100);
+ }
+
+- if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
++ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) ||
++ !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
+ return;
+
+ nvme_unquiesce_io_queues(ctrl);
+--
+2.39.5
+
--- /dev/null
+From 8ac311bfaf054e034afd8423a2002e81571040e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 May 2025 07:52:18 -0700
+Subject: riscv: Disallow PR_GET_TAGGED_ADDR_CTRL without Supm
+
+From: Samuel Holland <samuel.holland@sifive.com>
+
+[ Upstream commit 7f1c3de1370bc6a8ad5157336b258067dac0ae9c ]
+
+When the prctl() interface for pointer masking was added, it did not
+check that the pointer masking ISA extension was supported, only the
+individual submodes. Userspace could still attempt to disable pointer
+masking and query the pointer masking state. commit 81de1afb2dd1
+("riscv: Fix kernel crash due to PR_SET_TAGGED_ADDR_CTRL") disallowed
+the former, as the senvcfg write could crash on older systems.
+PR_GET_TAGGED_ADDR_CTRL state does not crash, because it reads only
+kernel-internal state and not senvcfg, but it should still be disallowed
+for consistency.
+
+Fixes: 09d6775f503b ("riscv: Add support for userspace pointer masking")
+Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
+Reviewed-by: Nam Cao <namcao@linutronix.de>
+Link: https://lore.kernel.org/r/20250507145230.2272871-1-samuel.holland@sifive.com
+Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/process.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
+index 3db2c0c07acd0..15d8f75902f85 100644
+--- a/arch/riscv/kernel/process.c
++++ b/arch/riscv/kernel/process.c
+@@ -333,6 +333,9 @@ long get_tagged_addr_ctrl(struct task_struct *task)
+ struct thread_info *ti = task_thread_info(task);
+ long ret = 0;
+
++ if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
++ return -EINVAL;
++
+ if (is_compat_thread(ti))
+ return -EINVAL;
+
+--
+2.39.5
+
--- /dev/null
+From fef354f790cae05caa33c95ca3fe926fdd65f9f6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Apr 2025 15:38:49 +0800
+Subject: riscv: misaligned: Add handling for ZCB instructions
+
+From: Nylon Chen <nylon.chen@sifive.com>
+
+[ Upstream commit eb16b3727c05ed36420c90eca1e8f0e279514c1c ]
+
+Add support for the Zcb extension's compressed half-word instructions
+(C.LHU, C.LH, and C.SH) in the RISC-V misaligned access trap handler.
+
+Signed-off-by: Zong Li <zong.li@sifive.com>
+Signed-off-by: Nylon Chen <nylon.chen@sifive.com>
+Fixes: 956d705dd279 ("riscv: Unaligned load/store handling for M_MODE")
+Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Link: https://lore.kernel.org/r/20250411073850.3699180-2-nylon.chen@sifive.com
+Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/traps_misaligned.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
+index 4354c87c0376f..dde5d11dc1b50 100644
+--- a/arch/riscv/kernel/traps_misaligned.c
++++ b/arch/riscv/kernel/traps_misaligned.c
+@@ -88,6 +88,13 @@
+ #define INSN_MATCH_C_FSWSP 0xe002
+ #define INSN_MASK_C_FSWSP 0xe003
+
++#define INSN_MATCH_C_LHU 0x8400
++#define INSN_MASK_C_LHU 0xfc43
++#define INSN_MATCH_C_LH 0x8440
++#define INSN_MASK_C_LH 0xfc43
++#define INSN_MATCH_C_SH 0x8c00
++#define INSN_MASK_C_SH 0xfc43
++
+ #define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4)
+
+ #if defined(CONFIG_64BIT)
+@@ -431,6 +438,13 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs)
+ fp = 1;
+ len = 4;
+ #endif
++ } else if ((insn & INSN_MASK_C_LHU) == INSN_MATCH_C_LHU) {
++ len = 2;
++ insn = RVC_RS2S(insn) << SH_RD;
++ } else if ((insn & INSN_MASK_C_LH) == INSN_MATCH_C_LH) {
++ len = 2;
++ shift = 8 * (sizeof(ulong) - len);
++ insn = RVC_RS2S(insn) << SH_RD;
+ } else {
+ regs->epc = epc;
+ return -1;
+@@ -530,6 +544,9 @@ static int handle_scalar_misaligned_store(struct pt_regs *regs)
+ len = 4;
+ val.data_ulong = GET_F32_RS2C(insn, regs);
+ #endif
++ } else if ((insn & INSN_MASK_C_SH) == INSN_MATCH_C_SH) {
++ len = 2;
++ val.data_ulong = GET_RS2S(insn, regs);
+ } else {
+ regs->epc = epc;
+ return -1;
+--
+2.39.5
+
--- /dev/null
+From b0de9058b3ceefa88174ecc401512b9bcf9d9076 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Apr 2025 18:23:09 +0200
+Subject: riscv: misaligned: enable IRQs while handling misaligned accesses
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Clément Léger <cleger@rivosinc.com>
+
+[ Upstream commit 453805f0a28fc5091e46145e6560c776f7c7a611 ]
+
+We can safely reenable IRQs if coming from userspace. This allows to
+access user memory that could potentially trigger a page fault.
+
+Fixes: b686ecdeacf6 ("riscv: misaligned: Restrict user access to kernel memory")
+Signed-off-by: Clément Léger <cleger@rivosinc.com>
+Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Link: https://lore.kernel.org/r/20250422162324.956065-3-cleger@rivosinc.com
+Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/traps.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index b1d991c78a233..9c83848797a78 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -220,19 +220,23 @@ static void do_trap_misaligned(struct pt_regs *regs, enum misaligned_access_type
+ {
+ irqentry_state_t state;
+
+- if (user_mode(regs))
++ if (user_mode(regs)) {
+ irqentry_enter_from_user_mode(regs);
+- else
++ local_irq_enable();
++ } else {
+ state = irqentry_nmi_enter(regs);
++ }
+
+ if (misaligned_handler[type].handler(regs))
+ do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
+ misaligned_handler[type].type_str);
+
+- if (user_mode(regs))
++ if (user_mode(regs)) {
++ local_irq_disable();
+ irqentry_exit_to_user_mode(regs);
+- else
++ } else {
+ irqentry_nmi_exit(regs, state);
++ }
+ }
+
+ asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
+--
+2.39.5
+
--- /dev/null
+From 79311fd9c897908054cfeafe2b8d496cc25c047b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Apr 2025 18:23:08 +0200
+Subject: riscv: misaligned: factorize trap handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Clément Léger <cleger@rivosinc.com>
+
+[ Upstream commit fd94de9f9e7aac11ec659e386b9db1203d502023 ]
+
+Since both load/store and user/kernel should use almost the same path and
+that we are going to add some code around that, factorize it.
+
+Signed-off-by: Clément Léger <cleger@rivosinc.com>
+Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Link: https://lore.kernel.org/r/20250422162324.956065-2-cleger@rivosinc.com
+Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Stable-dep-of: 453805f0a28f ("riscv: misaligned: enable IRQs while handling misaligned accesses")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/traps.c | 66 +++++++++++++++++++++------------------
+ 1 file changed, 36 insertions(+), 30 deletions(-)
+
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index 8ff8e8b36524b..b1d991c78a233 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -198,47 +198,53 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
+ DO_ERROR_INFO(do_trap_load_fault,
+ SIGSEGV, SEGV_ACCERR, "load access fault");
+
+-asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
++enum misaligned_access_type {
++ MISALIGNED_STORE,
++ MISALIGNED_LOAD,
++};
++static const struct {
++ const char *type_str;
++ int (*handler)(struct pt_regs *regs);
++} misaligned_handler[] = {
++ [MISALIGNED_STORE] = {
++ .type_str = "Oops - store (or AMO) address misaligned",
++ .handler = handle_misaligned_store,
++ },
++ [MISALIGNED_LOAD] = {
++ .type_str = "Oops - load address misaligned",
++ .handler = handle_misaligned_load,
++ },
++};
++
++static void do_trap_misaligned(struct pt_regs *regs, enum misaligned_access_type type)
+ {
+- if (user_mode(regs)) {
++ irqentry_state_t state;
++
++ if (user_mode(regs))
+ irqentry_enter_from_user_mode(regs);
++ else
++ state = irqentry_nmi_enter(regs);
+
+- if (handle_misaligned_load(regs))
+- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
+- "Oops - load address misaligned");
++ if (misaligned_handler[type].handler(regs))
++ do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
++ misaligned_handler[type].type_str);
+
++ if (user_mode(regs))
+ irqentry_exit_to_user_mode(regs);
+- } else {
+- irqentry_state_t state = irqentry_nmi_enter(regs);
+-
+- if (handle_misaligned_load(regs))
+- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
+- "Oops - load address misaligned");
+-
++ else
+ irqentry_nmi_exit(regs, state);
+- }
+ }
+
+-asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
++asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
+ {
+- if (user_mode(regs)) {
+- irqentry_enter_from_user_mode(regs);
+-
+- if (handle_misaligned_store(regs))
+- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
+- "Oops - store (or AMO) address misaligned");
+-
+- irqentry_exit_to_user_mode(regs);
+- } else {
+- irqentry_state_t state = irqentry_nmi_enter(regs);
+-
+- if (handle_misaligned_store(regs))
+- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
+- "Oops - store (or AMO) address misaligned");
++ do_trap_misaligned(regs, MISALIGNED_LOAD);
++}
+
+- irqentry_nmi_exit(regs, state);
+- }
++asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
++{
++ do_trap_misaligned(regs, MISALIGNED_STORE);
+ }
++
+ DO_ERROR_INFO(do_trap_store_fault,
+ SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
+ DO_ERROR_INFO(do_trap_ecall_s,
+--
+2.39.5
+
usb-usbtmc-fix-erroneous-get_stb-ioctl-error-returns.patch
usb-usbtmc-fix-erroneous-wait_srq-ioctl-return.patch
usb-usbtmc-fix-erroneous-generic_read-ioctl-return.patch
+iio-imu-bmi270-fix-initial-sampling-frequency-config.patch
+iio-accel-adxl367-fix-setting-odr-for-activity-time-.patch
+iio-temp-maxim-thermocouple-fix-potential-lack-of-dm.patch
+iio-accel-adxl355-make-timestamp-64-bit-aligned-usin.patch
+iio-adc-dln2-use-aligned_s64-for-timestamp.patch
+mips-fix-idle-vs-timer-enqueue.patch
+mips-move-r4k_wait-to-.cpuidle.text-section.patch
+timekeeping-prevent-coarse-clocks-going-backwards.patch
+accel-ivpu-separate-db-id-and-cmdq-id-allocations-fr.patch
+accel-ivpu-correct-mutex-unlock-order-in-job-submiss.patch
+mips-fix-max_reg_offset.patch
+riscv-misaligned-add-handling-for-zcb-instructions.patch
+loop-factor-out-a-loop_assign_backing_file-helper.patch
+loop-add-sanity-check-for-read-write_iter.patch
+drm-panel-simple-update-timings-for-auo-g101evn010.patch
+nvme-unblock-ctrl-state-transition-for-firmware-upda.patch
+riscv-misaligned-factorize-trap-handling.patch
+riscv-misaligned-enable-irqs-while-handling-misalign.patch
+riscv-disallow-pr_get_tagged_addr_ctrl-without-supm.patch
+drm-xe-tests-mocs-hold-xe_forcewake_all-for-lncf-reg.patch
+drm-xe-release-force-wake-first-then-runtime-power.patch
+io_uring-sqpoll-increase-task_work-submission-batch-.patch
+do_umount-add-missing-barrier-before-refcount-checks.patch
--- /dev/null
+From 3782567eba8f8237831235c187a211c6fe8628e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Apr 2025 22:46:52 -0700
+Subject: timekeeping: Prevent coarse clocks going backwards
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit b71f9804f66c2592d4c3a2397b7374a4039005a5 ]
+
+Lei Chen raised an issue with CLOCK_MONOTONIC_COARSE seeing time
+inconsistencies. Lei tracked down that this was being caused by the
+adjustment:
+
+ tk->tkr_mono.xtime_nsec -= offset;
+
+which is made to compensate for the unaccumulated cycles in offset when the
+multiplicator is adjusted forward, so that the non-_COARSE clockids don't
+see inconsistencies.
+
+However, the _COARSE clockid getter functions use the adjusted xtime_nsec
+value directly and do not compensate the negative offset via the
+clocksource delta multiplied with the new multiplicator. In that case the
+caller can observe time going backwards in consecutive calls.
+
+By design, this negative adjustment should be fine, because the logic run
+from timekeeping_adjust() is done after it accumulated approximately
+
+ multiplicator * interval_cycles
+
+into xtime_nsec. The accumulated value is always larger then the
+
+ mult_adj * offset
+
+value, which is subtracted from xtime_nsec. Both operations are done
+together under the tk_core.lock, so the net change to xtime_nsec is always
+always be positive.
+
+However, do_adjtimex() calls into timekeeping_advance() as well, to
+apply the NTP frequency adjustment immediately. In this case,
+timekeeping_advance() does not return early when the offset is smaller
+then interval_cycles. In that case there is no time accumulated into
+xtime_nsec. But the subsequent call into timekeeping_adjust(), which
+modifies the multiplicator, subtracts from xtime_nsec to correct for the
+new multiplicator.
+
+Here because there was no accumulation, xtime_nsec becomes smaller than
+before, which opens a window up to the next accumulation, where the
+_COARSE clockid getters, which don't compensate for the offset, can
+observe the inconsistency.
+
+This has been tried to be fixed by forwarding the timekeeper in the case
+that adjtimex() adjusts the multiplier, which resets the offset to zero:
+
+ 757b000f7b93 ("timekeeping: Fix possible inconsistencies in _COARSE clockids")
+
+That works correctly, but unfortunately causes a regression on the
+adjtimex() side. There are two issues:
+
+ 1) The forwarding of the base time moves the update out of the original
+ period and establishes a new one.
+
+ 2) The clearing of the accumulated NTP error is changing the behaviour as
+ well.
+
+User-space expects that multiplier/frequency updates are in effect, when the
+syscall returns, so delaying the update to the next tick is not solving the
+problem either.
+
+Commit 757b000f7b93 was reverted so that the established expectations of
+user space implementations (ntpd, chronyd) are restored, but that obviously
+brought the inconsistencies back.
+
+One of the initial approaches to fix this was to establish a separate
+storage for the coarse time getter nanoseconds part by calculating it from
+the offset. That was dropped on the floor because not having yet another
+state to maintain was simpler. But given the result of the above exercise,
+this solution turns out to be the right one. Bring it back in a slightly
+modified form.
+
+Thus introduce timekeeper::coarse_nsec and store that nanoseconds part in
+it, switch the time getter functions and the VDSO update to use that value.
+coarse_nsec is set on operations which forward or initialize the timekeeper
+and after time was accumulated during a tick. If there is no accumulation
+the timestamp is unchanged.
+
+This leaves the adjtimex() behaviour unmodified and prevents coarse time
+from going backwards.
+
+[ jstultz: Simplified the coarse_nsec calculation and kept behavior so
+ coarse clockids aren't adjusted on each inter-tick adjtimex
+ call, slightly reworked the comments and commit message ]
+
+Fixes: da15cfdae033 ("time: Introduce CLOCK_REALTIME_COARSE")
+Reported-by: Lei Chen <lei.chen@smartx.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Stultz <jstultz@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lore.kernel.org/all/20250419054706.2319105-1-jstultz@google.com
+Closes: https://lore.kernel.org/lkml/20250310030004.3705801-1-lei.chen@smartx.com/
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/timekeeper_internal.h | 8 +++--
+ kernel/time/timekeeping.c | 50 ++++++++++++++++++++++++-----
+ kernel/time/vsyscall.c | 4 +--
+ 3 files changed, 49 insertions(+), 13 deletions(-)
+
+diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
+index e39d4d563b197..785048a3b3e60 100644
+--- a/include/linux/timekeeper_internal.h
++++ b/include/linux/timekeeper_internal.h
+@@ -51,7 +51,7 @@ struct tk_read_base {
+ * @offs_real: Offset clock monotonic -> clock realtime
+ * @offs_boot: Offset clock monotonic -> clock boottime
+ * @offs_tai: Offset clock monotonic -> clock tai
+- * @tai_offset: The current UTC to TAI offset in seconds
++ * @coarse_nsec: The nanoseconds part for coarse time getters
+ * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW
+ * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
+ * @clock_was_set_seq: The sequence number of clock was set events
+@@ -76,6 +76,7 @@ struct tk_read_base {
+ * ntp shifted nano seconds.
+ * @ntp_err_mult: Multiplication factor for scaled math conversion
+ * @skip_second_overflow: Flag used to avoid updating NTP twice with same second
++ * @tai_offset: The current UTC to TAI offset in seconds
+ *
+ * Note: For timespec(64) based interfaces wall_to_monotonic is what
+ * we need to add to xtime (or xtime corrected for sub jiffy times)
+@@ -100,7 +101,7 @@ struct tk_read_base {
+ * which results in the following cacheline layout:
+ *
+ * 0: seqcount, tkr_mono
+- * 1: xtime_sec ... tai_offset
++ * 1: xtime_sec ... coarse_nsec
+ * 2: tkr_raw, raw_sec
+ * 3,4: Internal variables
+ *
+@@ -121,7 +122,7 @@ struct timekeeper {
+ ktime_t offs_real;
+ ktime_t offs_boot;
+ ktime_t offs_tai;
+- s32 tai_offset;
++ u32 coarse_nsec;
+
+ /* Cacheline 2: */
+ struct tk_read_base tkr_raw;
+@@ -144,6 +145,7 @@ struct timekeeper {
+ u32 ntp_error_shift;
+ u32 ntp_err_mult;
+ u32 skip_second_overflow;
++ s32 tai_offset;
+ };
+
+ #ifdef CONFIG_GENERIC_TIME_VSYSCALL
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 1e67d076f1955..a009c91f7b05f 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -164,10 +164,34 @@ static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
+ return ts;
+ }
+
++static inline struct timespec64 tk_xtime_coarse(const struct timekeeper *tk)
++{
++ struct timespec64 ts;
++
++ ts.tv_sec = tk->xtime_sec;
++ ts.tv_nsec = tk->coarse_nsec;
++ return ts;
++}
++
++/*
++ * Update the nanoseconds part for the coarse time keepers. They can't rely
++ * on xtime_nsec because xtime_nsec could be adjusted by a small negative
++ * amount when the multiplication factor of the clock is adjusted, which
++ * could cause the coarse clocks to go slightly backwards. See
++ * timekeeping_apply_adjustment(). Thus we keep a separate copy for the coarse
++ * clockids which only is updated when the clock has been set or we have
++ * accumulated time.
++ */
++static inline void tk_update_coarse_nsecs(struct timekeeper *tk)
++{
++ tk->coarse_nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
++}
++
+ static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
+ {
+ tk->xtime_sec = ts->tv_sec;
+ tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
++ tk_update_coarse_nsecs(tk);
+ }
+
+ static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
+@@ -175,6 +199,7 @@ static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
+ tk->xtime_sec += ts->tv_sec;
+ tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
+ tk_normalize_xtime(tk);
++ tk_update_coarse_nsecs(tk);
+ }
+
+ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
+@@ -708,6 +733,7 @@ static void timekeeping_forward_now(struct timekeeper *tk)
+ tk_normalize_xtime(tk);
+ delta -= incr;
+ }
++ tk_update_coarse_nsecs(tk);
+ }
+
+ /**
+@@ -804,8 +830,8 @@ EXPORT_SYMBOL_GPL(ktime_get_with_offset);
+ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
+ {
+ struct timekeeper *tk = &tk_core.timekeeper;
+- unsigned int seq;
+ ktime_t base, *offset = offsets[offs];
++ unsigned int seq;
+ u64 nsecs;
+
+ WARN_ON(timekeeping_suspended);
+@@ -813,7 +839,7 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
+ do {
+ seq = read_seqcount_begin(&tk_core.seq);
+ base = ktime_add(tk->tkr_mono.base, *offset);
+- nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
++ nsecs = tk->coarse_nsec;
+
+ } while (read_seqcount_retry(&tk_core.seq, seq));
+
+@@ -2161,7 +2187,7 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
+ struct timekeeper *real_tk = &tk_core.timekeeper;
+ unsigned int clock_set = 0;
+ int shift = 0, maxshift;
+- u64 offset;
++ u64 offset, orig_offset;
+
+ guard(raw_spinlock_irqsave)(&tk_core.lock);
+
+@@ -2172,7 +2198,7 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
+ offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
+ tk->tkr_mono.cycle_last, tk->tkr_mono.mask,
+ tk->tkr_mono.clock->max_raw_delta);
+-
++ orig_offset = offset;
+ /* Check if there's really nothing to do */
+ if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
+ return false;
+@@ -2205,6 +2231,14 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
+ */
+ clock_set |= accumulate_nsecs_to_secs(tk);
+
++ /*
++ * To avoid inconsistencies caused adjtimex TK_ADV_FREQ calls
++ * making small negative adjustments to the base xtime_nsec
++ * value, only update the coarse clocks if we accumulated time
++ */
++ if (orig_offset != offset)
++ tk_update_coarse_nsecs(tk);
++
+ timekeeping_update_from_shadow(&tk_core, clock_set);
+
+ return !!clock_set;
+@@ -2248,7 +2282,7 @@ void ktime_get_coarse_real_ts64(struct timespec64 *ts)
+ do {
+ seq = read_seqcount_begin(&tk_core.seq);
+
+- *ts = tk_xtime(tk);
++ *ts = tk_xtime_coarse(tk);
+ } while (read_seqcount_retry(&tk_core.seq, seq));
+ }
+ EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
+@@ -2271,7 +2305,7 @@ void ktime_get_coarse_real_ts64_mg(struct timespec64 *ts)
+
+ do {
+ seq = read_seqcount_begin(&tk_core.seq);
+- *ts = tk_xtime(tk);
++ *ts = tk_xtime_coarse(tk);
+ offset = tk_core.timekeeper.offs_real;
+ } while (read_seqcount_retry(&tk_core.seq, seq));
+
+@@ -2350,12 +2384,12 @@ void ktime_get_coarse_ts64(struct timespec64 *ts)
+ do {
+ seq = read_seqcount_begin(&tk_core.seq);
+
+- now = tk_xtime(tk);
++ now = tk_xtime_coarse(tk);
+ mono = tk->wall_to_monotonic;
+ } while (read_seqcount_retry(&tk_core.seq, seq));
+
+ set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
+- now.tv_nsec + mono.tv_nsec);
++ now.tv_nsec + mono.tv_nsec);
+ }
+ EXPORT_SYMBOL(ktime_get_coarse_ts64);
+
+diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c
+index 05d3831431658..c9d946b012d8b 100644
+--- a/kernel/time/vsyscall.c
++++ b/kernel/time/vsyscall.c
+@@ -97,12 +97,12 @@ void update_vsyscall(struct timekeeper *tk)
+ /* CLOCK_REALTIME_COARSE */
+ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
+ vdso_ts->sec = tk->xtime_sec;
+- vdso_ts->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
++ vdso_ts->nsec = tk->coarse_nsec;
+
+ /* CLOCK_MONOTONIC_COARSE */
+ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE];
+ vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
+- nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
++ nsec = tk->coarse_nsec;
+ nsec = nsec + tk->wall_to_monotonic.tv_nsec;
+ vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
+
+--
+2.39.5
+