--- /dev/null
+From 91274fd4ed9ba110b02c53d71d2778b7d13b49ac Mon Sep 17 00:00:00 2001
+From: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Date: Wed, 28 May 2025 19:12:20 +0200
+Subject: accel/ivpu: Fix warning in ivpu_gem_bo_free()
+
+From: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+
+commit 91274fd4ed9ba110b02c53d71d2778b7d13b49ac upstream.
+
+Don't WARN if imported buffers are in use in ivpu_gem_bo_free() as they
+can be indeed used in the original context/driver.
+
+Fixes: 647371a6609d ("accel/ivpu: Add GEM buffer object management")
+Cc: stable@vger.kernel.org # v6.3
+Reviewed-by: Jeff Hugo <jeff.hugo@oss.qualcomm.com>
+Reviewed-by: Lizhi Hou <lizhi.hou@amd.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://lore.kernel.org/r/20250528171220.513225-1-jacek.lawrynowicz@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/accel/ivpu/ivpu_gem.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/accel/ivpu/ivpu_gem.c
++++ b/drivers/accel/ivpu/ivpu_gem.c
+@@ -285,7 +285,8 @@ static void ivpu_gem_bo_free(struct drm_
+ list_del(&bo->bo_list_node);
+ mutex_unlock(&vdev->bo_list_lock);
+
+- drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
++ drm_WARN_ON(&vdev->drm, !drm_gem_is_imported(&bo->base.base) &&
++ !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
+ drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0);
+ drm_WARN_ON(&vdev->drm, bo->base.vaddr);
+
--- /dev/null
+From a01e93ee44f7ed76f872d0ede82f8d31bf0a048a Mon Sep 17 00:00:00 2001
+From: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Date: Tue, 6 May 2025 11:13:03 +0200
+Subject: accel/ivpu: Improve buffer object logging
+
+From: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+
+commit a01e93ee44f7ed76f872d0ede82f8d31bf0a048a upstream.
+
+- Fix missing alloc log when drm_gem_handle_create() fails in
+ drm_vma_node_allow() and open callback is not called
+- Add ivpu_bo->ctx_id that enables to log the actual context
+ id instead of using 0 as default
+- Add couple WARNs and errors so we can catch more memory
+ corruption issues
+
+Fixes: 37dee2a2f433 ("accel/ivpu: Improve buffer object debug logs")
+Cc: stable@vger.kernel.org # v6.8+
+Reviewed-by: Jeff Hugo <jeff.hugo@oss.qualcomm.com>
+Reviewed-by: Lizhi Hou <lizhi.hou@amd.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://lore.kernel.org/r/20250506091303.262034-1-jacek.lawrynowicz@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/accel/ivpu/ivpu_gem.c | 25 +++++++++++++++++--------
+ drivers/accel/ivpu/ivpu_gem.h | 1 +
+ 2 files changed, 18 insertions(+), 8 deletions(-)
+
+--- a/drivers/accel/ivpu/ivpu_gem.c
++++ b/drivers/accel/ivpu/ivpu_gem.c
+@@ -28,7 +28,7 @@ static inline void ivpu_dbg_bo(struct iv
+ {
+ ivpu_dbg(vdev, BO,
+ "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
+- action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
++ action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id,
+ (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
+ (bool)bo->base.base.import_attach);
+ }
+@@ -94,8 +94,6 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *b
+ ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
+ }
+
+- ivpu_dbg_bo(vdev, bo, "alloc");
+-
+ mutex_unlock(&bo->lock);
+
+ drm_dev_exit(idx);
+@@ -215,7 +213,7 @@ fail_detach:
+ return ERR_PTR(ret);
+ }
+
+-static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
++static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id)
+ {
+ struct drm_gem_shmem_object *shmem;
+ struct ivpu_bo *bo;
+@@ -233,6 +231,7 @@ static struct ivpu_bo *ivpu_bo_alloc(str
+ return ERR_CAST(shmem);
+
+ bo = to_ivpu_bo(&shmem->base);
++ bo->ctx_id = ctx_id;
+ bo->base.map_wc = flags & DRM_IVPU_BO_WC;
+ bo->flags = flags;
+
+@@ -240,6 +239,8 @@ static struct ivpu_bo *ivpu_bo_alloc(str
+ list_add_tail(&bo->bo_list_node, &vdev->bo_list);
+ mutex_unlock(&vdev->bo_list_lock);
+
++ ivpu_dbg_bo(vdev, bo, "alloc");
++
+ return bo;
+ }
+
+@@ -278,8 +279,13 @@ static void ivpu_gem_bo_free(struct drm_
+ mutex_unlock(&vdev->bo_list_lock);
+
+ drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
++ drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0);
++ drm_WARN_ON(&vdev->drm, bo->base.vaddr);
+
+ ivpu_bo_unbind_locked(bo);
++ drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
++ drm_WARN_ON(&vdev->drm, bo->ctx);
++
+ mutex_destroy(&bo->lock);
+
+ drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
+@@ -314,7 +320,7 @@ int ivpu_bo_create_ioctl(struct drm_devi
+ if (size == 0)
+ return -EINVAL;
+
+- bo = ivpu_bo_alloc(vdev, size, args->flags);
++ bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id);
+ if (IS_ERR(bo)) {
+ ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
+ bo, file_priv->ctx.id, args->size, args->flags);
+@@ -322,7 +328,10 @@ int ivpu_bo_create_ioctl(struct drm_devi
+ }
+
+ ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
+- if (!ret)
++ if (ret)
++ ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
++ bo, file_priv->ctx.id, args->size, args->flags);
++ else
+ args->vpu_addr = bo->vpu_addr;
+
+ drm_gem_object_put(&bo->base.base);
+@@ -345,7 +354,7 @@ ivpu_bo_create(struct ivpu_device *vdev,
+ drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
+ drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
+
+- bo = ivpu_bo_alloc(vdev, size, flags);
++ bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID);
+ if (IS_ERR(bo)) {
+ ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
+ bo, range->start, size, flags);
+@@ -452,7 +461,7 @@ static void ivpu_bo_print_info(struct iv
+ mutex_lock(&bo->lock);
+
+ drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
+- bo, bo->ctx ? bo->ctx->id : 0, bo->vpu_addr, bo->base.base.size,
++ bo, bo->ctx_id, bo->vpu_addr, bo->base.base.size,
+ bo->flags, kref_read(&bo->base.base.refcount));
+
+ if (bo->base.pages)
+--- a/drivers/accel/ivpu/ivpu_gem.h
++++ b/drivers/accel/ivpu/ivpu_gem.h
+@@ -21,6 +21,7 @@ struct ivpu_bo {
+ u64 vpu_addr;
+ u32 flags;
+ u32 job_status; /* Valid only for command buffer */
++ u32 ctx_id;
+ bool mmu_mapped;
+ };
+
--- /dev/null
+From a47e36dc5d90dc664cac87304c17d50f1595d634 Mon Sep 17 00:00:00 2001
+From: Karol Wachowski <karol.wachowski@intel.com>
+Date: Wed, 28 May 2025 17:42:53 +0200
+Subject: accel/ivpu: Trigger device recovery on engine reset/resume failure
+
+From: Karol Wachowski <karol.wachowski@intel.com>
+
+commit a47e36dc5d90dc664cac87304c17d50f1595d634 upstream.
+
+Trigger full device recovery when the driver fails to restore device state
+via engine reset and resume operations. This is necessary because, even if
+submissions from a faulty context are blocked, the NPU may still process
+previously submitted faulty jobs if the engine reset fails to abort them.
+Such jobs can continue to generate faults and occupy device resources.
+When engine reset is ineffective, the only way to recover is to perform
+a full device recovery.
+
+Fixes: dad945c27a42 ("accel/ivpu: Add handling of VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW")
+Cc: stable@vger.kernel.org # v6.15+
+Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
+Reviewed-by: Lizhi Hou <lizhi.hou@amd.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://lore.kernel.org/r/20250528154253.500556-1-jacek.lawrynowicz@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/accel/ivpu/ivpu_job.c | 6 ++++--
+ drivers/accel/ivpu/ivpu_jsm_msg.c | 9 +++++++--
+ 2 files changed, 11 insertions(+), 4 deletions(-)
+
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -986,7 +986,8 @@ void ivpu_context_abort_work_fn(struct w
+ return;
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+- ivpu_jsm_reset_engine(vdev, 0);
++ if (ivpu_jsm_reset_engine(vdev, 0))
++ return;
+
+ mutex_lock(&vdev->context_list_lock);
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+@@ -1009,7 +1010,8 @@ void ivpu_context_abort_work_fn(struct w
+ if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ goto runtime_put;
+
+- ivpu_jsm_hws_resume_engine(vdev, 0);
++ if (ivpu_jsm_hws_resume_engine(vdev, 0))
++ return;
+ /*
+ * In hardware scheduling mode NPU already has stopped processing jobs
+ * and won't send us any further notifications, thus we have to free job related resources
+--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
++++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
+@@ -7,6 +7,7 @@
+ #include "ivpu_hw.h"
+ #include "ivpu_ipc.h"
+ #include "ivpu_jsm_msg.h"
++#include "ivpu_pm.h"
+ #include "vpu_jsm_api.h"
+
+ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
+@@ -163,8 +164,10 @@ int ivpu_jsm_reset_engine(struct ivpu_de
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+- if (ret)
++ if (ret) {
+ ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
++ ivpu_pm_trigger_recovery(vdev, "Engine reset failed");
++ }
+
+ return ret;
+ }
+@@ -354,8 +357,10 @@ int ivpu_jsm_hws_resume_engine(struct iv
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+- if (ret)
++ if (ret) {
+ ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
++ ivpu_pm_trigger_recovery(vdev, "Engine resume failed");
++ }
+
+ return ret;
+ }
--- /dev/null
+From 98d3f772ca7d6822bdfc8c960f5f909574db97c9 Mon Sep 17 00:00:00 2001
+From: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Date: Wed, 28 May 2025 17:43:25 +0200
+Subject: accel/ivpu: Use dma_resv_lock() instead of a custom mutex
+
+From: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+
+commit 98d3f772ca7d6822bdfc8c960f5f909574db97c9 upstream.
+
+This fixes a potential race conditions in:
+ - ivpu_bo_unbind_locked() where we modified the shmem->sgt without
+ holding the dma_resv_lock().
+ - ivpu_bo_print_info() where we read the shmem->pages without
+ holding the dma_resv_lock().
+
+Using dma_resv_lock() also protects against future syncronisation
+issues that may arise when accessing drm_gem_shmem_object or
+drm_gem_object members.
+
+Fixes: 42328003ecb6 ("accel/ivpu: Refactor BO creation functions")
+Cc: stable@vger.kernel.org # v6.9+
+Reviewed-by: Lizhi Hou <lizhi.hou@amd.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://lore.kernel.org/r/20250528154325.500684-1-jacek.lawrynowicz@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/accel/ivpu/ivpu_gem.c | 63 ++++++++++++++++++++++--------------------
+ drivers/accel/ivpu/ivpu_gem.h | 1
+ 2 files changed, 34 insertions(+), 30 deletions(-)
+
+--- a/drivers/accel/ivpu/ivpu_gem.c
++++ b/drivers/accel/ivpu/ivpu_gem.c
+@@ -33,6 +33,16 @@ static inline void ivpu_dbg_bo(struct iv
+ (bool)bo->base.base.import_attach);
+ }
+
++static inline int ivpu_bo_lock(struct ivpu_bo *bo)
++{
++ return dma_resv_lock(bo->base.base.resv, NULL);
++}
++
++static inline void ivpu_bo_unlock(struct ivpu_bo *bo)
++{
++ dma_resv_unlock(bo->base.base.resv);
++}
++
+ /*
+ * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
+ *
+@@ -43,22 +53,22 @@ static inline void ivpu_dbg_bo(struct iv
+ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
+ {
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
++ struct sg_table *sgt;
+ int ret = 0;
+
+- mutex_lock(&bo->lock);
+-
+ ivpu_dbg_bo(vdev, bo, "pin");
+- drm_WARN_ON(&vdev->drm, !bo->ctx);
+
+- if (!bo->mmu_mapped) {
+- struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
++ sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
++ if (IS_ERR(sgt)) {
++ ret = PTR_ERR(sgt);
++ ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
++ return ret;
++ }
+
+- if (IS_ERR(sgt)) {
+- ret = PTR_ERR(sgt);
+- ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
+- goto unlock;
+- }
++ ivpu_bo_lock(bo);
+
++ if (!bo->mmu_mapped) {
++ drm_WARN_ON(&vdev->drm, !bo->ctx);
+ ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
+ ivpu_bo_is_snooped(bo));
+ if (ret) {
+@@ -69,7 +79,7 @@ int __must_check ivpu_bo_pin(struct ivpu
+ }
+
+ unlock:
+- mutex_unlock(&bo->lock);
++ ivpu_bo_unlock(bo);
+
+ return ret;
+ }
+@@ -84,7 +94,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *b
+ if (!drm_dev_enter(&vdev->drm, &idx))
+ return -ENODEV;
+
+- mutex_lock(&bo->lock);
++ ivpu_bo_lock(bo);
+
+ ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
+ if (!ret) {
+@@ -94,7 +104,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *b
+ ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
+ }
+
+- mutex_unlock(&bo->lock);
++ ivpu_bo_unlock(bo);
+
+ drm_dev_exit(idx);
+
+@@ -105,7 +115,7 @@ static void ivpu_bo_unbind_locked(struct
+ {
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+
+- lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
++ lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount));
+
+ if (bo->mmu_mapped) {
+ drm_WARN_ON(&vdev->drm, !bo->ctx);
+@@ -123,14 +133,12 @@ static void ivpu_bo_unbind_locked(struct
+ if (bo->base.base.import_attach)
+ return;
+
+- dma_resv_lock(bo->base.base.resv, NULL);
+ if (bo->base.sgt) {
+ dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(bo->base.sgt);
+ kfree(bo->base.sgt);
+ bo->base.sgt = NULL;
+ }
+- dma_resv_unlock(bo->base.base.resv);
+ }
+
+ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+@@ -142,12 +150,12 @@ void ivpu_bo_unbind_all_bos_from_context
+
+ mutex_lock(&vdev->bo_list_lock);
+ list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
+- mutex_lock(&bo->lock);
++ ivpu_bo_lock(bo);
+ if (bo->ctx == ctx) {
+ ivpu_dbg_bo(vdev, bo, "unbind");
+ ivpu_bo_unbind_locked(bo);
+ }
+- mutex_unlock(&bo->lock);
++ ivpu_bo_unlock(bo);
+ }
+ mutex_unlock(&vdev->bo_list_lock);
+ }
+@@ -167,7 +175,6 @@ struct drm_gem_object *ivpu_gem_create_o
+ bo->base.pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
+
+ INIT_LIST_HEAD(&bo->bo_list_node);
+- mutex_init(&bo->lock);
+
+ return &bo->base.base;
+ }
+@@ -286,8 +293,6 @@ static void ivpu_gem_bo_free(struct drm_
+ drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
+ drm_WARN_ON(&vdev->drm, bo->ctx);
+
+- mutex_destroy(&bo->lock);
+-
+ drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
+ drm_gem_shmem_free(&bo->base);
+ }
+@@ -370,9 +375,9 @@ ivpu_bo_create(struct ivpu_device *vdev,
+ goto err_put;
+
+ if (flags & DRM_IVPU_BO_MAPPABLE) {
+- dma_resv_lock(bo->base.base.resv, NULL);
++ ivpu_bo_lock(bo);
+ ret = drm_gem_shmem_vmap(&bo->base, &map);
+- dma_resv_unlock(bo->base.base.resv);
++ ivpu_bo_unlock(bo);
+
+ if (ret)
+ goto err_put;
+@@ -395,9 +400,9 @@ void ivpu_bo_free(struct ivpu_bo *bo)
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
+
+ if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
+- dma_resv_lock(bo->base.base.resv, NULL);
++ ivpu_bo_lock(bo);
+ drm_gem_shmem_vunmap(&bo->base, &map);
+- dma_resv_unlock(bo->base.base.resv);
++ ivpu_bo_unlock(bo);
+ }
+
+ drm_gem_object_put(&bo->base.base);
+@@ -416,12 +421,12 @@ int ivpu_bo_info_ioctl(struct drm_device
+
+ bo = to_ivpu_bo(obj);
+
+- mutex_lock(&bo->lock);
++ ivpu_bo_lock(bo);
+ args->flags = bo->flags;
+ args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
+ args->vpu_addr = bo->vpu_addr;
+ args->size = obj->size;
+- mutex_unlock(&bo->lock);
++ ivpu_bo_unlock(bo);
+
+ drm_gem_object_put(obj);
+ return ret;
+@@ -458,7 +463,7 @@ int ivpu_bo_wait_ioctl(struct drm_device
+
+ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
+ {
+- mutex_lock(&bo->lock);
++ ivpu_bo_lock(bo);
+
+ drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
+ bo, bo->ctx_id, bo->vpu_addr, bo->base.base.size,
+@@ -475,7 +480,7 @@ static void ivpu_bo_print_info(struct iv
+
+ drm_printf(p, "\n");
+
+- mutex_unlock(&bo->lock);
++ ivpu_bo_unlock(bo);
+ }
+
+ void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
+--- a/drivers/accel/ivpu/ivpu_gem.h
++++ b/drivers/accel/ivpu/ivpu_gem.h
+@@ -17,7 +17,6 @@ struct ivpu_bo {
+ struct list_head bo_list_node;
+ struct drm_mm_node mm_node;
+
+- struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
+ u64 vpu_addr;
+ u32 flags;
+ u32 job_status; /* Valid only for command buffer */
--- /dev/null
+From 1c2c0e29f24360b3130c005a3c261cb8c7b363c6 Mon Sep 17 00:00:00 2001
+From: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Date: Tue, 6 May 2025 11:20:30 +0200
+Subject: accel/ivpu: Use firmware names from upstream repo
+
+From: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+
+commit 1c2c0e29f24360b3130c005a3c261cb8c7b363c6 upstream.
+
+Use FW names from linux-firmware repo instead of deprecated ones.
+The vpu_37xx.bin style names were never released and were only used for
+internal testing, so it is safe to remove them.
+
+Fixes: c140244f0cfb ("accel/ivpu: Add initial Panther Lake support")
+Cc: stable@vger.kernel.org # v6.13+
+Reviewed-by: Lizhi Hou <lizhi.hou@amd.com>
+Reviewed-by: Jeff Hugo <jeff.hugo@oss.qualcomm.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://lore.kernel.org/r/20250506092030.280276-1-jacek.lawrynowicz@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/accel/ivpu/ivpu_fw.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/accel/ivpu/ivpu_fw.c
++++ b/drivers/accel/ivpu/ivpu_fw.c
+@@ -55,18 +55,18 @@ static struct {
+ int gen;
+ const char *name;
+ } fw_names[] = {
+- { IVPU_HW_IP_37XX, "vpu_37xx.bin" },
++ { IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v1.bin" },
+ { IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
+- { IVPU_HW_IP_40XX, "vpu_40xx.bin" },
++ { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v1.bin" },
+ { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
+- { IVPU_HW_IP_50XX, "vpu_50xx.bin" },
++ { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v1.bin" },
+ { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" },
+ };
+
+ /* Production fw_names from the table above */
+-MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin");
+-MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin");
+-MODULE_FIRMWARE("intel/vpu/vpu_50xx_v0.0.bin");
++MODULE_FIRMWARE("intel/vpu/vpu_37xx_v1.bin");
++MODULE_FIRMWARE("intel/vpu/vpu_40xx_v1.bin");
++MODULE_FIRMWARE("intel/vpu/vpu_50xx_v1.bin");
+
+ static int ivpu_fw_request(struct ivpu_device *vdev)
+ {
--- /dev/null
+From 16038474e3a0263572f36326ef85057aaf341814 Mon Sep 17 00:00:00 2001
+From: Sean Nyekjaer <sean@geanix.com>
+Date: Mon, 5 May 2025 21:20:07 +0200
+Subject: iio: accel: fxls8962af: Fix temperature calculation
+
+From: Sean Nyekjaer <sean@geanix.com>
+
+commit 16038474e3a0263572f36326ef85057aaf341814 upstream.
+
+According to spec temperature should be returned in milli degrees Celsius.
+Add in_temp_scale to calculate from Celsius to milli Celsius.
+
+Fixes: a3e0b51884ee ("iio: accel: add support for FXLS8962AF/FXLS8964AF accelerometers")
+Cc: stable@vger.kernel.org
+Reviewed-by: Marcelo Schmitt <marcelo.schmitt1@gmail.com>
+Signed-off-by: Sean Nyekjaer <sean@geanix.com>
+Link: https://patch.msgid.link/20250505-fxls-v4-1-a38652e21738@geanix.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/accel/fxls8962af-core.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/drivers/iio/accel/fxls8962af-core.c
++++ b/drivers/iio/accel/fxls8962af-core.c
+@@ -23,6 +23,7 @@
+ #include <linux/regulator/consumer.h>
+ #include <linux/regmap.h>
+ #include <linux/types.h>
++#include <linux/units.h>
+
+ #include <linux/iio/buffer.h>
+ #include <linux/iio/events.h>
+@@ -439,8 +440,16 @@ static int fxls8962af_read_raw(struct ii
+ *val = FXLS8962AF_TEMP_CENTER_VAL;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+- *val = 0;
+- return fxls8962af_read_full_scale(data, val2);
++ switch (chan->type) {
++ case IIO_TEMP:
++ *val = MILLIDEGREE_PER_DEGREE;
++ return IIO_VAL_INT;
++ case IIO_ACCEL:
++ *val = 0;
++ return fxls8962af_read_full_scale(data, val2);
++ default:
++ return -EINVAL;
++ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return fxls8962af_read_samp_freq(data, val, val2);
+ default:
+@@ -736,6 +745,7 @@ static const struct iio_event_spec fxls8
+ .type = IIO_TEMP, \
+ .address = FXLS8962AF_TEMP_OUT, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
++ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET),\
+ .scan_index = -1, \
+ .scan_type = { \
--- /dev/null
+From 9c78317b42e7c32523c91099859bc4721e9f75dd Mon Sep 17 00:00:00 2001
+From: Sean Nyekjaer <sean@geanix.com>
+Date: Mon, 5 May 2025 21:20:08 +0200
+Subject: iio: accel: fxls8962af: Fix temperature scan element sign
+
+From: Sean Nyekjaer <sean@geanix.com>
+
+commit 9c78317b42e7c32523c91099859bc4721e9f75dd upstream.
+
+Mark the temperature element signed, data read from the TEMP_OUT register
+is in two's complement format.
+This will avoid the temperature being mishandled and miss displayed.
+
+Fixes: a3e0b51884ee ("iio: accel: add support for FXLS8962AF/FXLS8964AF accelerometers")
+Suggested-by: Marcelo Schmitt <marcelo.schmitt1@gmail.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Marcelo Schmitt <marcelo.schmitt1@gmail.com>
+Signed-off-by: Sean Nyekjaer <sean@geanix.com>
+Link: https://patch.msgid.link/20250505-fxls-v4-2-a38652e21738@geanix.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/accel/fxls8962af-core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/iio/accel/fxls8962af-core.c
++++ b/drivers/iio/accel/fxls8962af-core.c
+@@ -739,6 +739,7 @@ static const struct iio_event_spec fxls8
+ BIT(IIO_CHAN_INFO_OFFSET),\
+ .scan_index = -1, \
+ .scan_type = { \
++ .sign = 's', \
+ .realbits = 8, \
+ .storagebits = 8, \
+ }, \
pci-apple-set-only-available-ports-up.patch
pci-dw-rockchip-fix-phy-function-call-sequence-in-rockchip_pcie_phy_deinit.patch
hv_netvsc-fix-potential-deadlock-in-netvsc_vf_setxdp.patch
+iio-accel-fxls8962af-fix-temperature-scan-element-sign.patch
+iio-accel-fxls8962af-fix-temperature-calculation.patch
+accel-ivpu-improve-buffer-object-logging.patch
+accel-ivpu-use-firmware-names-from-upstream-repo.patch
+accel-ivpu-trigger-device-recovery-on-engine-reset-resume-failure.patch
+accel-ivpu-use-dma_resv_lock-instead-of-a-custom-mutex.patch
+accel-ivpu-fix-warning-in-ivpu_gem_bo_free.patch