]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/xe: replace use of system_unbound_wq with system_dfl_wq
authorMarco Crivellari <marco.crivellari@suse.com>
Mon, 2 Feb 2026 10:37:55 +0000 (11:37 +0100)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Tue, 3 Feb 2026 00:18:09 +0000 (19:18 -0500)
This patch continues the effort to refactor workqueue APIs, which has begun
with the changes introducing new workqueues and a new alloc_workqueue flag:

   commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
   commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag")

The point of the refactoring is to eventually alter the default behavior of
workqueues to become unbound by default so that their workload placement is
optimized by the scheduler.

Before that to happen, workqueue users must be converted to the better named
new workqueues with no intended behaviour changes:

   system_wq -> system_percpu_wq
   system_unbound_wq -> system_dfl_wq

This way the old obsolete workqueues (system_wq, system_unbound_wq) can be
removed in the future.

Link: https://lore.kernel.org/all/20250221112003.1dSuoGyc@linutronix.de/
Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patch.msgid.link/20260202103756.62138-2-marco.crivellari@suse.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_devcoredump.c
drivers/gpu/drm/xe/xe_execlist.c
drivers/gpu/drm/xe/xe_guc_ct.c
drivers/gpu/drm/xe/xe_oa.c
drivers/gpu/drm/xe/xe_vm.c

index cf41bb6d21725576f21ed5ee0e5468c1a83cf675..558a1a9841a09c9cf3c71ef5609e144d30c0a3f8 100644 (file)
@@ -356,7 +356,7 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
 
        xe_engine_snapshot_capture_for_queue(q);
 
-       queue_work(system_unbound_wq, &ss->work);
+       queue_work(system_dfl_wq, &ss->work);
 
        dma_fence_end_signalling(cookie);
 }
index 005a5b2c36fe8d5144e20ed0d18744a83cfc9ceb..dc25caf4781345e893e346f1adf49601e3810a3f 100644 (file)
@@ -421,7 +421,7 @@ static void execlist_exec_queue_kill(struct xe_exec_queue *q)
 static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
 {
        INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async);
-       queue_work(system_unbound_wq, &q->execlist->destroy_async);
+       queue_work(system_dfl_wq, &q->execlist->destroy_async);
 }
 
 static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
index d4111124cd5f40a8cf6f7821fc2930fe932674ee..8a45573f88120701fbbbea63d78c89ca7af016e4 100644 (file)
@@ -644,7 +644,7 @@ static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool needs_register)
        spin_lock_irq(&ct->dead.lock);
        if (ct->dead.reason) {
                ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
-               queue_work(system_unbound_wq, &ct->dead.worker);
+               queue_work(system_dfl_wq, &ct->dead.worker);
        }
        spin_unlock_irq(&ct->dead.lock);
 #endif
@@ -2167,7 +2167,7 @@ static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reaso
 
        spin_unlock_irqrestore(&ct->dead.lock, flags);
 
-       queue_work(system_unbound_wq, &(ct)->dead.worker);
+       queue_work(system_dfl_wq, &(ct)->dead.worker);
 }
 
 static void ct_dead_print(struct xe_dead_ct *dead)
index abf87fe0b3450fbed7a8da7965ba04839f9526db..8b37e49f639fef6332622971e730b4dfcdd2eacf 100644 (file)
@@ -969,7 +969,7 @@ static void xe_oa_config_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
        struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb);
 
        INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn);
-       queue_delayed_work(system_unbound_wq, &ofence->work,
+       queue_delayed_work(system_dfl_wq, &ofence->work,
                           usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US));
        dma_fence_put(fence);
 }
index 293b92ed2fddfcfaa189387658378f9339400042..e6cfa5dc7f62b946d35754385949a00393c30512 100644 (file)
@@ -1112,7 +1112,7 @@ static void vma_destroy_cb(struct dma_fence *fence,
        struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
 
        INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
-       queue_work(system_unbound_wq, &vma->destroy_work);
+       queue_work(system_dfl_wq, &vma->destroy_work);
 }
 
 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
@@ -1894,7 +1894,7 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
        struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
 
        /* To destroy the VM we need to be able to sleep */
-       queue_work(system_unbound_wq, &vm->destroy_work);
+       queue_work(system_dfl_wq, &vm->destroy_work);
 }
 
 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)