]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/xe: Block exec and rebind worker while evicting for suspend / hibernate
authorThomas Hellström <thomas.hellstrom@linux.intel.com>
Thu, 4 Sep 2025 16:07:15 +0000 (18:07 +0200)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Tue, 9 Sep 2025 17:20:31 +0000 (13:20 -0400)
When the xe pm_notifier evicts for suspend / hibernate, there might be
racing tasks trying to re-validate again. This can lead to suspend taking
excessive time or get stuck in a live-lock. This behaviour becomes
much worse with the fix that actually makes re-validation bring back
bos to VRAM rather than letting them remain in TT.

Prevent that by having exec and the rebind worker waiting for a completion
that is set to block by the pm_notifier before suspend and is signaled
by the pm_notifier after resume / wakeup.

It's probably still possible to craft malicious applications that block
suspending. More work is pending to fix that.

v3:
- Avoid wait_for_completion() in the kernel worker since it could
  potentially cause work item flushes from freezable processes to
  wait forever. Instead terminate the rebind workers if needed and
  re-launch at resume. (Matt Auld)
v4:
- Fix some bad naming and leftover debug printouts.
- Fix kerneldoc.
- Use drmm_mutex_init() for the xe->rebind_resume_lock (Matt Auld).
- Rework the interface of xe_vm_rebind_resume_worker (Matt Auld).

Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/4288
Fixes: c6a4d46ec1d7 ("drm/xe: evict user memory in PM notifier")
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: <stable@vger.kernel.org> # v6.16+
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://lore.kernel.org/r/20250904160715.2613-4-thomas.hellstrom@linux.intel.com
(cherry picked from commit 599334572a5a99111015fbbd5152ce4dedc2f8b7)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_device_types.h
drivers/gpu/drm/xe/xe_exec.c
drivers/gpu/drm/xe/xe_pm.c
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm.h
drivers/gpu/drm/xe/xe_vm_types.h

index d4d2c6854790cae8f4c2ebb2e3274d3d947b9516..7ceb0c90f3914cb078f0cba0285effa15a4e8de8 100644 (file)
@@ -553,6 +553,12 @@ struct xe_device {
 
        /** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */
        struct notifier_block pm_notifier;
+       /** @pm_block: Completion to block validating tasks on suspend / hibernate prepare */
+       struct completion pm_block;
+       /** @rebind_resume_list: List of wq items to kick on resume. */
+       struct list_head rebind_resume_list;
+       /** @rebind_resume_lock: Lock to protect the rebind_resume_list */
+       struct mutex rebind_resume_lock;
 
        /** @pmt: Support the PMT driver callback interface */
        struct {
index 44364c042ad72d620db12b01bf4c6ef944527c96..374c831e691b2b477218765aa8a07c59e088ec63 100644 (file)
@@ -237,6 +237,15 @@ retry:
                goto err_unlock_list;
        }
 
+       /*
+        * It's OK to block interruptible here with the vm lock held, since
+        * on task freezing during suspend / hibernate, the call will
+        * return -ERESTARTSYS and the IOCTL will be rerun.
+        */
+       err = wait_for_completion_interruptible(&xe->pm_block);
+       if (err)
+               goto err_unlock_list;
+
        vm_exec.vm = &vm->gpuvm;
        vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
        if (xe_vm_in_lr_mode(vm)) {
index c9d5f0a21c48a12554e238fac9a3067cc0f560a7..bb9b6ecad2afcda7f1b9e8c3d12afb1121d04142 100644 (file)
@@ -24,6 +24,7 @@
 #include "xe_pcode.h"
 #include "xe_pxp.h"
 #include "xe_trace.h"
+#include "xe_vm.h"
 #include "xe_wa.h"
 
 /**
@@ -290,6 +291,19 @@ static u32 vram_threshold_value(struct xe_device *xe)
        return DEFAULT_VRAM_THRESHOLD;
 }
 
+static void xe_pm_wake_rebind_workers(struct xe_device *xe)
+{
+       struct xe_vm *vm, *next;
+
+       mutex_lock(&xe->rebind_resume_lock);
+       list_for_each_entry_safe(vm, next, &xe->rebind_resume_list,
+                                preempt.pm_activate_link) {
+               list_del_init(&vm->preempt.pm_activate_link);
+               xe_vm_resume_rebind_worker(vm);
+       }
+       mutex_unlock(&xe->rebind_resume_lock);
+}
+
 static int xe_pm_notifier_callback(struct notifier_block *nb,
                                   unsigned long action, void *data)
 {
@@ -299,6 +313,7 @@ static int xe_pm_notifier_callback(struct notifier_block *nb,
        switch (action) {
        case PM_HIBERNATION_PREPARE:
        case PM_SUSPEND_PREPARE:
+               reinit_completion(&xe->pm_block);
                xe_pm_runtime_get(xe);
                err = xe_bo_evict_all_user(xe);
                if (err)
@@ -315,6 +330,8 @@ static int xe_pm_notifier_callback(struct notifier_block *nb,
                break;
        case PM_POST_HIBERNATION:
        case PM_POST_SUSPEND:
+               complete_all(&xe->pm_block);
+               xe_pm_wake_rebind_workers(xe);
                xe_bo_notifier_unprepare_all_pinned(xe);
                xe_pm_runtime_put(xe);
                break;
@@ -341,6 +358,14 @@ int xe_pm_init(struct xe_device *xe)
        if (err)
                return err;
 
+       err = drmm_mutex_init(&xe->drm, &xe->rebind_resume_lock);
+       if (err)
+               goto err_unregister;
+
+       init_completion(&xe->pm_block);
+       complete_all(&xe->pm_block);
+       INIT_LIST_HEAD(&xe->rebind_resume_list);
+
        /* For now suspend/resume is only allowed with GuC */
        if (!xe_device_uc_enabled(xe))
                return 0;
index d60c4b1153043c4b3c35df4897e058e31d6a8f5a..dc4f61e56579cb7b83fe0c7c99d492131c47567f 100644 (file)
@@ -393,6 +393,9 @@ static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
                list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
                               &vm->rebind_list);
 
+       if (!try_wait_for_completion(&vm->xe->pm_block))
+               return -EAGAIN;
+
        ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
        if (ret)
                return ret;
@@ -479,6 +482,33 @@ static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
        return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
 }
 
+static bool vm_suspend_rebind_worker(struct xe_vm *vm)
+{
+       struct xe_device *xe = vm->xe;
+       bool ret = false;
+
+       mutex_lock(&xe->rebind_resume_lock);
+       if (!try_wait_for_completion(&vm->xe->pm_block)) {
+               ret = true;
+               list_move_tail(&vm->preempt.pm_activate_link, &xe->rebind_resume_list);
+       }
+       mutex_unlock(&xe->rebind_resume_lock);
+
+       return ret;
+}
+
+/**
+ * xe_vm_resume_rebind_worker() - Resume the rebind worker.
+ * @vm: The vm whose preempt worker to resume.
+ *
+ * Resume a preempt worker that was previously suspended by
+ * vm_suspend_rebind_worker().
+ */
+void xe_vm_resume_rebind_worker(struct xe_vm *vm)
+{
+       queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
+}
+
 static void preempt_rebind_work_func(struct work_struct *w)
 {
        struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
@@ -502,6 +532,11 @@ static void preempt_rebind_work_func(struct work_struct *w)
        }
 
 retry:
+       if (!try_wait_for_completion(&vm->xe->pm_block) && vm_suspend_rebind_worker(vm)) {
+               up_write(&vm->lock);
+               return;
+       }
+
        if (xe_vm_userptr_check_repin(vm)) {
                err = xe_vm_userptr_pin(vm);
                if (err)
@@ -1714,6 +1749,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
        if (flags & XE_VM_FLAG_LR_MODE) {
                INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
                xe_pm_runtime_get_noresume(xe);
+               INIT_LIST_HEAD(&vm->preempt.pm_activate_link);
        }
 
        if (flags & XE_VM_FLAG_FAULT_MODE) {
@@ -1895,8 +1931,12 @@ void xe_vm_close_and_put(struct xe_vm *vm)
        xe_assert(xe, !vm->preempt.num_exec_queues);
 
        xe_vm_close(vm);
-       if (xe_vm_in_preempt_fence_mode(vm))
+       if (xe_vm_in_preempt_fence_mode(vm)) {
+               mutex_lock(&xe->rebind_resume_lock);
+               list_del_init(&vm->preempt.pm_activate_link);
+               mutex_unlock(&xe->rebind_resume_lock);
                flush_work(&vm->preempt.rebind_work);
+       }
        if (xe_vm_in_fault_mode(vm))
                xe_svm_close(vm);
 
index 2ecb417c19a280c12bac8d2847a287fb6e8c454e..82b1127958071ffec584a8065828eb4c18bbd526 100644 (file)
@@ -273,6 +273,8 @@ struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
                                       struct xe_exec_queue *q, u64 addr,
                                       enum xe_cache_level cache_lvl);
 
+void xe_vm_resume_rebind_worker(struct xe_vm *vm);
+
 /**
  * xe_vm_resv() - Return's the vm's reservation object
  * @vm: The vm
index 8a07feef503badc01ad657ab9b5089622d0371fc..6058cf739388bc07cbff2bf6e20d66a2a39fead9 100644 (file)
@@ -293,6 +293,11 @@ struct xe_vm {
                 * BOs
                 */
                struct work_struct rebind_work;
+               /**
+                * @preempt.pm_activate_link: Link to list of rebind workers to be
+                * kicked on resume.
+                */
+               struct list_head pm_activate_link;
        } preempt;
 
        /** @um: unified memory state */