]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/xe/vm: move rebind_work init earlier
authorMatthew Auld <matthew.auld@intel.com>
Wed, 14 May 2025 15:24:25 +0000 (16:24 +0100)
committerThomas Hellström <thomas.hellstrom@linux.intel.com>
Thu, 5 Jun 2025 16:05:10 +0000 (18:05 +0200)
In xe_vm_close_and_put() we need to be able to call
flush_work(rebind_work), however during vm creation we can call this on
the error path, before having actually set up the worker, leading to a
splat from flush_work().

It looks like we can simply move the worker init step earlier to fix
this.

Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: <stable@vger.kernel.org> # v6.8+
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250514152424.149591-3-matthew.auld@intel.com
(cherry picked from commit 96af397aa1a2d1032a6e28ff3f4bc0ab4be40e1d)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
drivers/gpu/drm/xe/xe_vm.c

index 79323c78130f3fee3a2806aab3817da61c8fbeea..a68fd99ddfdb9220520b8a51e562859356579fd8 100644 (file)
@@ -1678,8 +1678,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
         * scheduler drops all the references of it, hence protecting the VM
         * for this case is necessary.
         */
-       if (flags & XE_VM_FLAG_LR_MODE)
+       if (flags & XE_VM_FLAG_LR_MODE) {
+               INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
                xe_pm_runtime_get_noresume(xe);
+       }
 
        vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
        if (!vm_resv_obj) {
@@ -1724,10 +1726,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
                vm->batch_invalidate_tlb = true;
        }
 
-       if (vm->flags & XE_VM_FLAG_LR_MODE) {
-               INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
+       if (vm->flags & XE_VM_FLAG_LR_MODE)
                vm->batch_invalidate_tlb = false;
-       }
 
        /* Fill pt_root after allocating scratch tables */
        for_each_tile(tile, xe, id) {