This patch continues the effort to refactor workqueue APIs, which has begun
with the changes introducing new workqueues and a new alloc_workqueue flag:
commit
128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
commit
930c2ea566af ("workqueue: Add new WQ_PERCPU flag")
The point of the refactoring is to eventually alter the default behavior of
workqueues to become unbound by default so that their workload placement is
optimized by the scheduler.
Before that to happen, workqueue users must be converted to the better named
new workqueues with no intended behaviour changes:
system_wq -> system_percpu_wq
system_unbound_wq -> system_dfl_wq
This way the old obsolete workqueues (system_wq, system_unbound_wq) can be
removed in the future.
Link: https://lore.kernel.org/all/20250221112003.1dSuoGyc@linutronix.de/
Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patch.msgid.link/20260202103756.62138-2-marco.crivellari@suse.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
xe_engine_snapshot_capture_for_queue(q);
- queue_work(system_unbound_wq, &ss->work);
+ queue_work(system_dfl_wq, &ss->work);
dma_fence_end_signalling(cookie);
}
static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
{
INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async);
- queue_work(system_unbound_wq, &q->execlist->destroy_async);
+ queue_work(system_dfl_wq, &q->execlist->destroy_async);
}
static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
spin_lock_irq(&ct->dead.lock);
if (ct->dead.reason) {
ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
- queue_work(system_unbound_wq, &ct->dead.worker);
+ queue_work(system_dfl_wq, &ct->dead.worker);
}
spin_unlock_irq(&ct->dead.lock);
#endif
spin_unlock_irqrestore(&ct->dead.lock, flags);
- queue_work(system_unbound_wq, &(ct)->dead.worker);
+ queue_work(system_dfl_wq, &(ct)->dead.worker);
}
static void ct_dead_print(struct xe_dead_ct *dead)
struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb);
INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn);
- queue_delayed_work(system_unbound_wq, &ofence->work,
+ queue_delayed_work(system_dfl_wq, &ofence->work,
usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US));
dma_fence_put(fence);
}
struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
- queue_work(system_unbound_wq, &vma->destroy_work);
+ queue_work(system_dfl_wq, &vma->destroy_work);
}
static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
/* To destroy the VM we need to be able to sleep */
- queue_work(system_unbound_wq, &vm->destroy_work);
+ queue_work(system_dfl_wq, &vm->destroy_work);
}
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)