]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 7 May 2025 09:26:05 +0000 (11:26 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 7 May 2025 09:26:05 +0000 (11:26 +0200)
added patches:
accel-ivpu-abort-all-jobs-after-command-queue-unregister.patch
accel-ivpu-add-handling-of-vpu_jsm_status_mvnci_context_violation_hw.patch
accel-ivpu-fix-locking-order-in-ivpu_job_submit.patch
drm-xe-invalidate-l3-read-only-cachelines-for-geometry-streams-too.patch
mm-slab-clean-up-slab-obj_exts-always.patch

queue-6.14/accel-ivpu-abort-all-jobs-after-command-queue-unregister.patch [new file with mode: 0644]
queue-6.14/accel-ivpu-add-handling-of-vpu_jsm_status_mvnci_context_violation_hw.patch [new file with mode: 0644]
queue-6.14/accel-ivpu-fix-locking-order-in-ivpu_job_submit.patch [new file with mode: 0644]
queue-6.14/drm-xe-invalidate-l3-read-only-cachelines-for-geometry-streams-too.patch [new file with mode: 0644]
queue-6.14/mm-slab-clean-up-slab-obj_exts-always.patch [new file with mode: 0644]
queue-6.14/series

diff --git a/queue-6.14/accel-ivpu-abort-all-jobs-after-command-queue-unregister.patch b/queue-6.14/accel-ivpu-abort-all-jobs-after-command-queue-unregister.patch
new file mode 100644 (file)
index 0000000..195f949
--- /dev/null
@@ -0,0 +1,352 @@
+From 5bbccadaf33eea2b879d8326ad59ae0663be47d1 Mon Sep 17 00:00:00 2001
+From: Karol Wachowski <karol.wachowski@intel.com>
+Date: Tue, 7 Jan 2025 18:32:26 +0100
+Subject: accel/ivpu: Abort all jobs after command queue unregister
+
+From: Karol Wachowski <karol.wachowski@intel.com>
+
+commit 5bbccadaf33eea2b879d8326ad59ae0663be47d1 upstream.
+
+With hardware scheduler it is not expected to receive JOB_DONE
+notifications from NPU FW for the jobs aborted due to command queue destroy
+JSM command.
+
+Remove jobs submitted to unregistered command queue from submitted_jobs_xa
+to avoid triggering a TDR in such case.
+
+Add explicit submitted_jobs_lock that protects access to list of submitted
+jobs which is now used to find jobs to abort.
+
+Move context abort procedure to separate work queue not to slow down
+handling of IPCs or DCT requests in case where job abort takes longer,
+especially when destruction of the last job of a specific context results
+in context release.
+
+Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
+Signed-off-by: Maciej Falkowski <maciej.falkowski@linux.intel.com>
+Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250107173238.381120-4-maciej.falkowski@linux.intel.com
+[ This backport removes all the lines from upstream commit related to the
+  command queue UAPI, as it is not present in the 6.14 kernel and should
+  not be backported. ]
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/accel/ivpu/ivpu_drv.c   |   32 ++-------------
+ drivers/accel/ivpu/ivpu_drv.h   |    2 
+ drivers/accel/ivpu/ivpu_job.c   |   85 +++++++++++++++++++++++++++++++---------
+ drivers/accel/ivpu/ivpu_job.h   |    1 
+ drivers/accel/ivpu/ivpu_mmu.c   |    3 -
+ drivers/accel/ivpu/ivpu_sysfs.c |    5 +-
+ 6 files changed, 79 insertions(+), 49 deletions(-)
+
+--- a/drivers/accel/ivpu/ivpu_drv.c
++++ b/drivers/accel/ivpu/ivpu_drv.c
+@@ -36,8 +36,6 @@
+ #define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
+ #endif
+-static struct lock_class_key submitted_jobs_xa_lock_class_key;
+-
+ int ivpu_dbg_mask;
+ module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
+ MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
+@@ -465,26 +463,6 @@ static const struct drm_driver driver =
+       .major = 1,
+ };
+-static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
+-{
+-      struct ivpu_file_priv *file_priv;
+-      unsigned long ctx_id;
+-
+-      mutex_lock(&vdev->context_list_lock);
+-
+-      xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+-              if (!file_priv->has_mmu_faults || file_priv->aborted)
+-                      continue;
+-
+-              mutex_lock(&file_priv->lock);
+-              ivpu_context_abort_locked(file_priv);
+-              file_priv->aborted = true;
+-              mutex_unlock(&file_priv->lock);
+-      }
+-
+-      mutex_unlock(&vdev->context_list_lock);
+-}
+-
+ static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
+ {
+       struct ivpu_device *vdev = arg;
+@@ -498,9 +476,6 @@ static irqreturn_t ivpu_irq_thread_handl
+               case IVPU_HW_IRQ_SRC_IPC:
+                       ivpu_ipc_irq_thread_handler(vdev);
+                       break;
+-              case IVPU_HW_IRQ_SRC_MMU_EVTQ:
+-                      ivpu_context_abort_invalid(vdev);
+-                      break;
+               case IVPU_HW_IRQ_SRC_DCT:
+                       ivpu_pm_dct_irq_thread_handler(vdev);
+                       break;
+@@ -617,16 +592,21 @@ static int ivpu_dev_init(struct ivpu_dev
+       xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+       xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
+       xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
+-      lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
+       INIT_LIST_HEAD(&vdev->bo_list);
+       vdev->db_limit.min = IVPU_MIN_DB;
+       vdev->db_limit.max = IVPU_MAX_DB;
++      INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_thread_handler);
++
+       ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
+       if (ret)
+               goto err_xa_destroy;
++      ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
++      if (ret)
++              goto err_xa_destroy;
++
+       ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
+       if (ret)
+               goto err_xa_destroy;
+--- a/drivers/accel/ivpu/ivpu_drv.h
++++ b/drivers/accel/ivpu/ivpu_drv.h
+@@ -137,6 +137,7 @@ struct ivpu_device {
+       struct mutex context_list_lock; /* Protects user context addition/removal */
+       struct xarray context_xa;
+       struct xa_limit context_xa_limit;
++      struct work_struct context_abort_work;
+       struct xarray db_xa;
+       struct xa_limit db_limit;
+@@ -145,6 +146,7 @@ struct ivpu_device {
+       struct mutex bo_list_lock; /* Protects bo_list */
+       struct list_head bo_list;
++      struct mutex submitted_jobs_lock; /* Protects submitted_jobs */
+       struct xarray submitted_jobs_xa;
+       struct ivpu_ipc_consumer job_done_consumer;
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -223,7 +223,8 @@ static int ivpu_cmdq_fini(struct ivpu_fi
+       if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+               ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
+               if (!ret)
+-                      ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id);
++                      ivpu_dbg(vdev, JOB, "Command queue %d destroyed, ctx %d\n",
++                               cmdq->id, file_priv->ctx.id);
+       }
+       ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
+@@ -324,6 +325,8 @@ void ivpu_context_abort_locked(struct iv
+       if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
+               ivpu_jsm_context_release(vdev, file_priv->ctx.id);
++
++      file_priv->aborted = true;
+ }
+ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+@@ -462,16 +465,14 @@ static struct ivpu_job *ivpu_job_remove_
+ {
+       struct ivpu_job *job;
+-      xa_lock(&vdev->submitted_jobs_xa);
+-      job = __xa_erase(&vdev->submitted_jobs_xa, job_id);
++      lockdep_assert_held(&vdev->submitted_jobs_lock);
++      job = xa_erase(&vdev->submitted_jobs_xa, job_id);
+       if (xa_empty(&vdev->submitted_jobs_xa) && job) {
+               vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
+                                           vdev->busy_time);
+       }
+-      xa_unlock(&vdev->submitted_jobs_xa);
+-
+       return job;
+ }
+@@ -479,6 +480,8 @@ static int ivpu_job_signal_and_destroy(s
+ {
+       struct ivpu_job *job;
++      lockdep_assert_held(&vdev->submitted_jobs_lock);
++
+       job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
+       if (!job)
+               return -ENOENT;
+@@ -497,6 +500,10 @@ static int ivpu_job_signal_and_destroy(s
+       ivpu_stop_job_timeout_detection(vdev);
+       ivpu_rpm_put(vdev);
++
++      if (!xa_empty(&vdev->submitted_jobs_xa))
++              ivpu_start_job_timeout_detection(vdev);
++
+       return 0;
+ }
+@@ -505,8 +512,12 @@ void ivpu_jobs_abort_all(struct ivpu_dev
+       struct ivpu_job *job;
+       unsigned long id;
++      mutex_lock(&vdev->submitted_jobs_lock);
++
+       xa_for_each(&vdev->submitted_jobs_xa, id, job)
+               ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
++
++      mutex_unlock(&vdev->submitted_jobs_lock);
+ }
+ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
+@@ -531,15 +542,16 @@ static int ivpu_job_submit(struct ivpu_j
+               goto err_unlock_file_priv;
+       }
+-      xa_lock(&vdev->submitted_jobs_xa);
++      mutex_lock(&vdev->submitted_jobs_lock);
++
+       is_first_job = xa_empty(&vdev->submitted_jobs_xa);
+-      ret = __xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
+-                              &file_priv->job_id_next, GFP_KERNEL);
++      ret = xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
++                            &file_priv->job_id_next, GFP_KERNEL);
+       if (ret < 0) {
+               ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
+                        file_priv->ctx.id);
+               ret = -EBUSY;
+-              goto err_unlock_submitted_jobs_xa;
++              goto err_unlock_submitted_jobs;
+       }
+       ret = ivpu_cmdq_push_job(cmdq, job);
+@@ -562,19 +574,21 @@ static int ivpu_job_submit(struct ivpu_j
+                job->job_id, file_priv->ctx.id, job->engine_idx, priority,
+                job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
+-      xa_unlock(&vdev->submitted_jobs_xa);
+-
++      mutex_unlock(&vdev->submitted_jobs_lock);
+       mutex_unlock(&file_priv->lock);
+-      if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
++      if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
++              mutex_lock(&vdev->submitted_jobs_lock);
+               ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
++              mutex_unlock(&vdev->submitted_jobs_lock);
++      }
+       return 0;
+ err_erase_xa:
+-      __xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+-err_unlock_submitted_jobs_xa:
+-      xa_unlock(&vdev->submitted_jobs_xa);
++      xa_erase(&vdev->submitted_jobs_xa, job->job_id);
++err_unlock_submitted_jobs:
++      mutex_unlock(&vdev->submitted_jobs_lock);
+ err_unlock_file_priv:
+       mutex_unlock(&file_priv->lock);
+       ivpu_rpm_put(vdev);
+@@ -745,7 +759,6 @@ ivpu_job_done_callback(struct ivpu_devic
+                      struct vpu_jsm_msg *jsm_msg)
+ {
+       struct vpu_ipc_msg_payload_job_done *payload;
+-      int ret;
+       if (!jsm_msg) {
+               ivpu_err(vdev, "IPC message has no JSM payload\n");
+@@ -758,9 +771,10 @@ ivpu_job_done_callback(struct ivpu_devic
+       }
+       payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
+-      ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
+-      if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
+-              ivpu_start_job_timeout_detection(vdev);
++
++      mutex_lock(&vdev->submitted_jobs_lock);
++      ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
++      mutex_unlock(&vdev->submitted_jobs_lock);
+ }
+ void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
+@@ -773,3 +787,36 @@ void ivpu_job_done_consumer_fini(struct
+ {
+       ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
+ }
++
++void ivpu_context_abort_thread_handler(struct work_struct *work)
++{
++      struct ivpu_device *vdev = container_of(work, struct ivpu_device, context_abort_work);
++      struct ivpu_file_priv *file_priv;
++      unsigned long ctx_id;
++      struct ivpu_job *job;
++      unsigned long id;
++
++      mutex_lock(&vdev->context_list_lock);
++      xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
++              if (!file_priv->has_mmu_faults || file_priv->aborted)
++                      continue;
++
++              mutex_lock(&file_priv->lock);
++              ivpu_context_abort_locked(file_priv);
++              mutex_unlock(&file_priv->lock);
++      }
++      mutex_unlock(&vdev->context_list_lock);
++
++      if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
++              return;
++      /*
++       * In hardware scheduling mode NPU already has stopped processing jobs
++       * and won't send us any further notifications, thus we have to free job related resources
++       * and notify userspace
++       */
++      mutex_lock(&vdev->submitted_jobs_lock);
++      xa_for_each(&vdev->submitted_jobs_xa, id, job)
++              if (job->file_priv->aborted)
++                      ivpu_job_signal_and_destroy(vdev, job->job_id, DRM_IVPU_JOB_STATUS_ABORTED);
++      mutex_unlock(&vdev->submitted_jobs_lock);
++}
+--- a/drivers/accel/ivpu/ivpu_job.h
++++ b/drivers/accel/ivpu/ivpu_job.h
+@@ -66,6 +66,7 @@ void ivpu_cmdq_reset_all_contexts(struct
+ void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
+ void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
++void ivpu_context_abort_thread_handler(struct work_struct *work);
+ void ivpu_jobs_abort_all(struct ivpu_device *vdev);
+--- a/drivers/accel/ivpu/ivpu_mmu.c
++++ b/drivers/accel/ivpu/ivpu_mmu.c
+@@ -890,8 +890,7 @@ void ivpu_mmu_irq_evtq_handler(struct iv
+               REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
+       }
+-      if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_MMU_EVTQ))
+-              ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
++      queue_work(system_wq, &vdev->context_abort_work);
+ }
+ void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
+--- a/drivers/accel/ivpu/ivpu_sysfs.c
++++ b/drivers/accel/ivpu/ivpu_sysfs.c
+@@ -30,11 +30,12 @@ npu_busy_time_us_show(struct device *dev
+       struct ivpu_device *vdev = to_ivpu_device(drm);
+       ktime_t total, now = 0;
+-      xa_lock(&vdev->submitted_jobs_xa);
++      mutex_lock(&vdev->submitted_jobs_lock);
++
+       total = vdev->busy_time;
+       if (!xa_empty(&vdev->submitted_jobs_xa))
+               now = ktime_sub(ktime_get(), vdev->busy_start_ts);
+-      xa_unlock(&vdev->submitted_jobs_xa);
++      mutex_unlock(&vdev->submitted_jobs_lock);
+       return sysfs_emit(buf, "%lld\n", ktime_to_us(ktime_add(total, now)));
+ }
diff --git a/queue-6.14/accel-ivpu-add-handling-of-vpu_jsm_status_mvnci_context_violation_hw.patch b/queue-6.14/accel-ivpu-add-handling-of-vpu_jsm_status_mvnci_context_violation_hw.patch
new file mode 100644 (file)
index 0000000..38f2c47
--- /dev/null
@@ -0,0 +1,73 @@
+From dad945c27a42dfadddff1049cf5ae417209a8996 Mon Sep 17 00:00:00 2001
+From: Karol Wachowski <karol.wachowski@intel.com>
+Date: Tue, 7 Jan 2025 18:32:35 +0100
+Subject: accel/ivpu: Add handling of VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW
+
+From: Karol Wachowski <karol.wachowski@intel.com>
+
+commit dad945c27a42dfadddff1049cf5ae417209a8996 upstream.
+
+Mark as invalid context of a job that returned HW context violation
+error and queue work that aborts jobs from faulty context.
+Add engine reset to the context abort thread handler to not only abort
+currently executing jobs but also to ensure NPU invalid state recovery.
+
+Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
+Signed-off-by: Maciej Falkowski <maciej.falkowski@linux.intel.com>
+Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250107173238.381120-13-maciej.falkowski@linux.intel.com
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/accel/ivpu/ivpu_job.c |   25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -482,6 +482,26 @@ static int ivpu_job_signal_and_destroy(s
+       lockdep_assert_held(&vdev->submitted_jobs_lock);
++      job = xa_load(&vdev->submitted_jobs_xa, job_id);
++      if (!job)
++              return -ENOENT;
++
++      if (job_status == VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW) {
++              guard(mutex)(&job->file_priv->lock);
++
++              if (job->file_priv->has_mmu_faults)
++                      return 0;
++
++              /*
++               * Mark context as faulty and defer destruction of the job to jobs abort thread
++               * handler to synchronize between both faults and jobs returning context violation
++               * status and ensure both are handled in the same way
++               */
++              job->file_priv->has_mmu_faults = true;
++              queue_work(system_wq, &vdev->context_abort_work);
++              return 0;
++      }
++
+       job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
+       if (!job)
+               return -ENOENT;
+@@ -793,6 +813,9 @@ void ivpu_context_abort_thread_handler(s
+       struct ivpu_job *job;
+       unsigned long id;
++      if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
++              ivpu_jsm_reset_engine(vdev, 0);
++
+       mutex_lock(&vdev->context_list_lock);
+       xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+               if (!file_priv->has_mmu_faults || file_priv->aborted)
+@@ -806,6 +829,8 @@ void ivpu_context_abort_thread_handler(s
+       if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+               return;
++
++      ivpu_jsm_hws_resume_engine(vdev, 0);
+       /*
+        * In hardware scheduling mode NPU already has stopped processing jobs
+        * and won't send us any further notifications, thus we have to free job related resources
diff --git a/queue-6.14/accel-ivpu-fix-locking-order-in-ivpu_job_submit.patch b/queue-6.14/accel-ivpu-fix-locking-order-in-ivpu_job_submit.patch
new file mode 100644 (file)
index 0000000..248e4c9
--- /dev/null
@@ -0,0 +1,105 @@
+From ab680dc6c78aa035e944ecc8c48a1caab9f39924 Mon Sep 17 00:00:00 2001
+From: Karol Wachowski <karol.wachowski@intel.com>
+Date: Tue, 7 Jan 2025 18:32:34 +0100
+Subject: accel/ivpu: Fix locking order in ivpu_job_submit
+
+From: Karol Wachowski <karol.wachowski@intel.com>
+
+commit ab680dc6c78aa035e944ecc8c48a1caab9f39924 upstream.
+
+Fix deadlock in job submission and abort handling.
+When a thread aborts currently executing jobs due to a fault,
+it first locks the global lock protecting submitted_jobs (#1).
+
+After the last job is destroyed, it proceeds to release the related context
+and locks file_priv (#2). Meanwhile, in the job submission thread,
+the file_priv lock (#2) is taken first, and then the submitted_jobs
+lock (#1) is obtained when a job is added to the submitted jobs list.
+
+       CPU0                            CPU1
+       ----                                   ----
+  (for example due to a fault)         (jobs submissions keep coming)
+
+  lock(&vdev->submitted_jobs_lock) #1
+  ivpu_jobs_abort_all()
+  job_destroy()
+                                      lock(&file_priv->lock)           #2
+                                      lock(&vdev->submitted_jobs_lock) #1
+  file_priv_release()
+  lock(&vdev->context_list_lock)
+  lock(&file_priv->lock)           #2
+
+This order of locking causes a deadlock. To resolve this issue,
+change the order of locking in ivpu_job_submit().
+
+Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
+Signed-off-by: Maciej Falkowski <maciej.falkowski@linux.intel.com>
+Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250107173238.381120-12-maciej.falkowski@linux.intel.com
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+[ This backport required small adjustments to ivpu_job_submit(),
+  which lacks support for explicit command queue creation added in 6.15.  ]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/accel/ivpu/ivpu_job.c |   15 ++++++---------
+ 1 file changed, 6 insertions(+), 9 deletions(-)
+
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -532,6 +532,7 @@ static int ivpu_job_submit(struct ivpu_j
+       if (ret < 0)
+               return ret;
++      mutex_lock(&vdev->submitted_jobs_lock);
+       mutex_lock(&file_priv->lock);
+       cmdq = ivpu_cmdq_acquire(file_priv, priority);
+@@ -539,11 +540,9 @@ static int ivpu_job_submit(struct ivpu_j
+               ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
+                                     file_priv->ctx.id, job->engine_idx, priority);
+               ret = -EINVAL;
+-              goto err_unlock_file_priv;
++              goto err_unlock;
+       }
+-      mutex_lock(&vdev->submitted_jobs_lock);
+-
+       is_first_job = xa_empty(&vdev->submitted_jobs_xa);
+       ret = xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
+                             &file_priv->job_id_next, GFP_KERNEL);
+@@ -551,7 +550,7 @@ static int ivpu_job_submit(struct ivpu_j
+               ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
+                        file_priv->ctx.id);
+               ret = -EBUSY;
+-              goto err_unlock_submitted_jobs;
++              goto err_unlock;
+       }
+       ret = ivpu_cmdq_push_job(cmdq, job);
+@@ -574,22 +573,20 @@ static int ivpu_job_submit(struct ivpu_j
+                job->job_id, file_priv->ctx.id, job->engine_idx, priority,
+                job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
+-      mutex_unlock(&vdev->submitted_jobs_lock);
+       mutex_unlock(&file_priv->lock);
+       if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
+-              mutex_lock(&vdev->submitted_jobs_lock);
+               ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+-              mutex_unlock(&vdev->submitted_jobs_lock);
+       }
++      mutex_unlock(&vdev->submitted_jobs_lock);
++
+       return 0;
+ err_erase_xa:
+       xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+-err_unlock_submitted_jobs:
++err_unlock:
+       mutex_unlock(&vdev->submitted_jobs_lock);
+-err_unlock_file_priv:
+       mutex_unlock(&file_priv->lock);
+       ivpu_rpm_put(vdev);
+       return ret;
diff --git a/queue-6.14/drm-xe-invalidate-l3-read-only-cachelines-for-geometry-streams-too.patch b/queue-6.14/drm-xe-invalidate-l3-read-only-cachelines-for-geometry-streams-too.patch
new file mode 100644 (file)
index 0000000..36bf08e
--- /dev/null
@@ -0,0 +1,96 @@
+From e775278cd75f24a2758c28558c4e41b36c935740 Mon Sep 17 00:00:00 2001
+From: Kenneth Graunke <kenneth@whitecape.org>
+Date: Sun, 30 Mar 2025 12:59:23 -0400
+Subject: drm/xe: Invalidate L3 read-only cachelines for geometry streams too
+
+From: Kenneth Graunke <kenneth@whitecape.org>
+
+commit e775278cd75f24a2758c28558c4e41b36c935740 upstream.
+
+Historically, the Vertex Fetcher unit has not been an L3 client.  That
+meant that, when a buffer containing vertex data was written to, it was
+necessary to issue a PIPE_CONTROL::VF Cache Invalidate to invalidate any
+VF L2 cachelines associated with that buffer, so the new value would be
+properly read from memory.
+
+Since Tigerlake and later, VERTEX_BUFFER_STATE and 3DSTATE_INDEX_BUFFER
+have included an "L3 Bypass Enable" bit which userspace drivers can set
+to request that the vertex fetcher unit snoop L3.  However, unlike most
+true L3 clients, the "VF Cache Invalidate" bit continues to only
+invalidate the VF L2 cache - and not any associated L3 lines.
+
+To handle that, PIPE_CONTROL has a new "L3 Read Only Cache Invalidation
+Bit", which according to the docs, "controls the invalidation of the
+Geometry streams cached in L3 cache at the top of the pipe."  In other
+words, the vertex and index buffer data that gets cached in L3 when
+"L3 Bypass Disable" is set.
+
+Mesa always sets L3 Bypass Disable so that the VF unit snoops L3, and
+whenever it issues a VF Cache Invalidate, it also issues a L3 Read Only
+Cache Invalidate so that both L2 and L3 vertex data is invalidated.
+
+xe is issuing VF cache invalidates too (which handles cases like CPU
+writes to a buffer between GPU batches).  Because userspace may enable
+L3 snooping, it needs to issue an L3 Read Only Cache Invalidate as well.
+
+Fixes significant flickering in Firefox on Meteorlake, which was writing
+to vertex buffers via the CPU between batches; the missing L3 Read Only
+invalidates were causing the vertex fetcher to read stale data from L3.
+
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/4460
+Fixes: 6ef3bb60557d ("drm/xe: enable lite restore")
+Cc: stable@vger.kernel.org # v6.13+
+Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
+Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Link: https://lore.kernel.org/r/20250330165923.56410-1-rodrigo.vivi@intel.com
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+(cherry picked from commit 61672806b579dd5a150a042ec9383be2bbc2ae7e)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/instructions/xe_gpu_commands.h |    1 +
+ drivers/gpu/drm/xe/xe_ring_ops.c                  |   13 +++++++++----
+ 2 files changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
++++ b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
+@@ -41,6 +41,7 @@
+ #define GFX_OP_PIPE_CONTROL(len)      ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
++#define         PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE   BIT(10) /* gen12 */
+ #define         PIPE_CONTROL0_HDC_PIPELINE_FLUSH              BIT(9)  /* gen12 */
+ #define   PIPE_CONTROL_COMMAND_CACHE_INVALIDATE               (1<<29)
+--- a/drivers/gpu/drm/xe/xe_ring_ops.c
++++ b/drivers/gpu/drm/xe/xe_ring_ops.c
+@@ -141,7 +141,8 @@ emit_pipe_control(u32 *dw, int i, u32 bi
+ static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
+                               int i)
+ {
+-      u32 flags = PIPE_CONTROL_CS_STALL |
++      u32 flags0 = 0;
++      u32 flags1 = PIPE_CONTROL_CS_STALL |
+               PIPE_CONTROL_COMMAND_CACHE_INVALIDATE |
+               PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
+               PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+@@ -152,11 +153,15 @@ static int emit_pipe_invalidate(u32 mask
+               PIPE_CONTROL_STORE_DATA_INDEX;
+       if (invalidate_tlb)
+-              flags |= PIPE_CONTROL_TLB_INVALIDATE;
++              flags1 |= PIPE_CONTROL_TLB_INVALIDATE;
+-      flags &= ~mask_flags;
++      flags1 &= ~mask_flags;
+-      return emit_pipe_control(dw, i, 0, flags, LRC_PPHWSP_SCRATCH_ADDR, 0);
++      if (flags1 & PIPE_CONTROL_VF_CACHE_INVALIDATE)
++              flags0 |= PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE;
++
++      return emit_pipe_control(dw, i, flags0, flags1,
++                               LRC_PPHWSP_SCRATCH_ADDR, 0);
+ }
+ static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
diff --git a/queue-6.14/mm-slab-clean-up-slab-obj_exts-always.patch b/queue-6.14/mm-slab-clean-up-slab-obj_exts-always.patch
new file mode 100644 (file)
index 0000000..72df5a2
--- /dev/null
@@ -0,0 +1,95 @@
+From be8250786ca94952a19ce87f98ad9906448bc9ef Mon Sep 17 00:00:00 2001
+From: Zhenhua Huang <quic_zhenhuah@quicinc.com>
+Date: Mon, 21 Apr 2025 15:52:32 +0800
+Subject: mm, slab: clean up slab->obj_exts always
+
+From: Zhenhua Huang <quic_zhenhuah@quicinc.com>
+
+commit be8250786ca94952a19ce87f98ad9906448bc9ef upstream.
+
+When memory allocation profiling is disabled at runtime or due to an
+error, shutdown_mem_profiling() is called: slab->obj_exts which
+previously allocated remains.
+It won't be cleared by unaccount_slab() because of
+mem_alloc_profiling_enabled() not true. It's incorrect, slab->obj_exts
+should always be cleaned up in unaccount_slab() to avoid following error:
+
+[...]BUG: Bad page state in process...
+..
+[...]page dumped because: page still charged to cgroup
+
+[andriy.shevchenko@linux.intel.com: fold need_slab_obj_ext() into its only user]
+Fixes: 21c690a349ba ("mm: introduce slabobj_ext to support slab object extensions")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Acked-by: Harry Yoo <harry.yoo@oracle.com>
+Tested-by: Harry Yoo <harry.yoo@oracle.com>
+Acked-by: Suren Baghdasaryan <surenb@google.com>
+Link: https://patch.msgid.link/20250421075232.2165527-1-quic_zhenhuah@quicinc.com
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+[surenb: fixed trivial merge conflict in alloc_tagging_slab_alloc_hook(),
+skipped inlining free_slab_obj_exts() as it's already inline in 6.14]
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c |   27 +++++++--------------------
+ 1 file changed, 7 insertions(+), 20 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2025,18 +2025,6 @@ static inline void free_slab_obj_exts(st
+       slab->obj_exts = 0;
+ }
+-static inline bool need_slab_obj_ext(void)
+-{
+-      if (mem_alloc_profiling_enabled())
+-              return true;
+-
+-      /*
+-       * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally
+-       * inside memcg_slab_post_alloc_hook. No other users for now.
+-       */
+-      return false;
+-}
+-
+ #else /* CONFIG_SLAB_OBJ_EXT */
+ static inline void init_slab_obj_exts(struct slab *slab)
+@@ -2053,11 +2041,6 @@ static inline void free_slab_obj_exts(st
+ {
+ }
+-static inline bool need_slab_obj_ext(void)
+-{
+-      return false;
+-}
+-
+ #endif /* CONFIG_SLAB_OBJ_EXT */
+ #ifdef CONFIG_MEM_ALLOC_PROFILING
+@@ -2089,7 +2072,7 @@ prepare_slab_obj_exts_hook(struct kmem_c
+ static inline void
+ alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
+ {
+-      if (need_slab_obj_ext()) {
++      if (mem_alloc_profiling_enabled()) {
+               struct slabobj_ext *obj_exts;
+               obj_exts = prepare_slab_obj_exts_hook(s, flags, object);
+@@ -2565,8 +2548,12 @@ static __always_inline void account_slab
+ static __always_inline void unaccount_slab(struct slab *slab, int order,
+                                          struct kmem_cache *s)
+ {
+-      if (memcg_kmem_online() || need_slab_obj_ext())
+-              free_slab_obj_exts(slab);
++      /*
++       * The slab object extensions should now be freed regardless of
++       * whether mem_alloc_profiling_enabled() or not because profiling
++       * might have been disabled after slab->obj_exts got allocated.
++       */
++      free_slab_obj_exts(slab);
+       mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
+                           -(PAGE_SIZE << order));
index ca2a65c0cb3f351fee54bf92897bbdff61192a06..282b5511f477e305444bb1061e37ee517e52d93b 100644 (file)
@@ -145,3 +145,8 @@ net-vertexcom-mse102x-fix-possible-stuck-of-spi-inte.patch
 net-vertexcom-mse102x-fix-len_mask.patch
 net-vertexcom-mse102x-add-range-check-for-cmd_rts.patch
 net-vertexcom-mse102x-fix-rx-error-handling.patch
+mm-slab-clean-up-slab-obj_exts-always.patch
+accel-ivpu-abort-all-jobs-after-command-queue-unregister.patch
+accel-ivpu-fix-locking-order-in-ivpu_job_submit.patch
+accel-ivpu-add-handling-of-vpu_jsm_status_mvnci_context_violation_hw.patch
+drm-xe-invalidate-l3-read-only-cachelines-for-geometry-streams-too.patch