]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/i915: replace use of system_unbound_wq with system_dfl_wq
authorMarco Crivellari <marco.crivellari@suse.com>
Tue, 4 Nov 2025 10:00:30 +0000 (11:00 +0100)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 5 Mar 2026 18:58:22 +0000 (13:58 -0500)
Currently if a user enqueue a work item using schedule_delayed_work() the
used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.

This lack of consistency cannot be addressed without refactoring the API.

system_unbound_wq should be the default workqueue so as not to enforce
locality constraints for random work whenever it's not required.

This patch continues the effort to refactor worqueue APIs, which has
begun with the change introducing new workqueues:

commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")

The old system_unbound_wq will be kept for a few release cycles.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Reviewed-by: Krzysztof Karas <krzysztof.karas@intel.com>
Link: https://patch.msgid.link/20251104100032.61525-2-marco.crivellari@suse.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_tc.c
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
drivers/gpu/drm/i915/gt/uc/intel_guc.c
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_sw_fence_work.c
drivers/gpu/drm/i915/i915_vma_resource.c
drivers/gpu/drm/i915/pxp/intel_pxp.c
drivers/gpu/drm/i915/pxp/intel_pxp_irq.c

index 755935dcfe230e3b8237945c9bb60a82177f3e66..c678e06118cc772da0a6695bc5e19fc6b95a7a27 100644 (file)
@@ -645,7 +645,7 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains,
                                                     power.domains);
        drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
        power_domains->async_put_wakeref = wakeref;
-       drm_WARN_ON(display->drm, !queue_delayed_work(system_unbound_wq,
+       drm_WARN_ON(display->drm, !queue_delayed_work(system_dfl_wq,
                                                      &power_domains->async_put_work,
                                                      msecs_to_jiffies(delay_ms)));
 }
index 9d321509986d3a985f0d7065b4cfe55e2e95cc03..a21dd4e3fe4caab814c6d290fcc5d2e84e804195 100644 (file)
@@ -1844,7 +1844,7 @@ bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
        if (!intel_tc_port_link_needs_reset(dig_port))
                return false;
 
-       queue_delayed_work(system_unbound_wq,
+       queue_delayed_work(system_dfl_wq,
                           &to_tc_port(dig_port)->link_reset_work,
                           msecs_to_jiffies(2000));
 
@@ -1925,7 +1925,7 @@ void intel_tc_port_unlock(struct intel_digital_port *dig_port)
        struct intel_tc_port *tc = to_tc_port(dig_port);
 
        if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
-               queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
+               queue_delayed_work(system_dfl_wq, &tc->disconnect_phy_work,
                                   msecs_to_jiffies(1000));
 
        mutex_unlock(&tc->lock);
index 3a7e202ae87de9e45eac4f3a76892a5696c50735..56489cc127d64281911ec52aea1df903fc996e25 100644 (file)
@@ -408,7 +408,7 @@ static void __memcpy_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 
        if (unlikely(fence->error || I915_SELFTEST_ONLY(fail_gpu_migration))) {
                INIT_WORK(&copy_work->work, __memcpy_work);
-               queue_work(system_unbound_wq, &copy_work->work);
+               queue_work(system_dfl_wq, &copy_work->work);
        } else {
                init_irq_work(&copy_work->irq_work, __memcpy_irq_work);
                irq_work_queue(&copy_work->irq_work);
index 52ec4421a211e12607ef1e5161fb67300d8edcaf..1c276444032368f6ebd93a372a66215a29dcf4a0 100644 (file)
@@ -624,7 +624,7 @@ int intel_guc_crash_process_msg(struct intel_guc *guc, u32 action)
        else
                guc_err(guc, "Unknown crash notification: 0x%04X\n", action);
 
-       queue_work(system_unbound_wq, &guc->dead_guc_worker);
+       queue_work(system_dfl_wq, &guc->dead_guc_worker);
 
        return 0;
 }
@@ -646,7 +646,7 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
                guc_err(guc, "Received early exception notification!\n");
 
        if (msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | INTEL_GUC_RECV_MSG_EXCEPTION))
-               queue_work(system_unbound_wq, &guc->dead_guc_worker);
+               queue_work(system_dfl_wq, &guc->dead_guc_worker);
 
        return 0;
 }
index 8c4da526d461cff6a9707b1d27fb16bd2522daca..1c455d84bf9d880de5f5d355d83e4ad4107c337c 100644 (file)
@@ -31,7 +31,7 @@ static void ct_dead_ct_worker_func(struct work_struct *w);
        do { \
                if (!(ct)->dead_ct_reported) { \
                        (ct)->dead_ct_reason |= 1 << CT_DEAD_##reason; \
-                       queue_work(system_unbound_wq, &(ct)->dead_ct_worker); \
+                       queue_work(system_dfl_wq, &(ct)->dead_ct_worker); \
                } \
        } while (0)
 #else
@@ -1238,7 +1238,7 @@ static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *requ
        list_add_tail(&request->link, &ct->requests.incoming);
        spin_unlock_irqrestore(&ct->requests.lock, flags);
 
-       queue_work(system_unbound_wq, &ct->requests.worker);
+       queue_work(system_dfl_wq, &ct->requests.worker);
        return 0;
 }
 
index 13650ce1c7a681f668a90272d542d38e34bf321b..788e59cdfac9b80706acb722855b1413fe1f4ff3 100644 (file)
@@ -3385,7 +3385,7 @@ static void guc_context_sched_disable(struct intel_context *ce)
        } else if (!intel_context_is_closed(ce) && !guc_id_pressure(guc, ce) &&
                   delay) {
                spin_unlock_irqrestore(&ce->guc_state.lock, flags);
-               mod_delayed_work(system_unbound_wq,
+               mod_delayed_work(system_dfl_wq,
                                 &ce->guc_state.sched_disable_delay_work,
                                 msecs_to_jiffies(delay));
        } else {
@@ -3611,7 +3611,7 @@ static void guc_context_destroy(struct kref *kref)
         * take the GT PM for the first time which isn't allowed from an atomic
         * context.
         */
-       queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker);
+       queue_work(system_dfl_wq, &guc->submission_state.destroyed_worker);
 }
 
 static int guc_context_alloc(struct intel_context *ce)
@@ -5380,7 +5380,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
         * A GT reset flushes this worker queue (G2H handler) so we must use
         * another worker to trigger a GT reset.
         */
-       queue_work(system_unbound_wq, &guc->submission_state.reset_fail_worker);
+       queue_work(system_dfl_wq, &guc->submission_state.reset_fail_worker);
 
        return 0;
 }
index 25c46d7b1ea7ce3213ebdf5bc1d4f85f79640a08..4f69d525494d31decc7fcc073e8a53a5f386bd93 100644 (file)
@@ -193,7 +193,7 @@ active_retire(struct i915_active *ref)
                return;
 
        if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
-               queue_work(system_unbound_wq, &ref->work);
+               queue_work(system_dfl_wq, &ref->work);
                return;
        }
 
index d2e56b387993dad5bba8a80e82d598730e572431..366418108f78f4470e36a8d877b1efce1d4ba639 100644 (file)
@@ -38,7 +38,7 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
                        if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags))
                                fence_work(&f->work);
                        else
-                               queue_work(system_unbound_wq, &f->work);
+                               queue_work(system_dfl_wq, &f->work);
                } else {
                        fence_complete(f);
                }
index 53d619ef0c3d6c252a8b0a333ca8db4a421c4f2e..a8f2112ce81f7f5468d3f7d18a4db189060d574e 100644 (file)
@@ -202,7 +202,7 @@ i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
                        i915_vma_resource_unbind_work(&vma_res->work);
                } else {
                        INIT_WORK(&vma_res->work, i915_vma_resource_unbind_work);
-                       queue_work(system_unbound_wq, &vma_res->work);
+                       queue_work(system_dfl_wq, &vma_res->work);
                }
                break;
        case FENCE_FREE:
index 2b63fb2cffd618cc26621fb73d6b845badf384da..3d7f045f662d078aa72a7e3c70909d7719ce5eb2 100644 (file)
@@ -278,7 +278,7 @@ static void pxp_queue_termination(struct intel_pxp *pxp)
        spin_lock_irq(gt->irq_lock);
        intel_pxp_mark_termination_in_progress(pxp);
        pxp->session_events |= PXP_TERMINATION_REQUEST;
-       queue_work(system_unbound_wq, &pxp->session_work);
+       queue_work(system_dfl_wq, &pxp->session_work);
        spin_unlock_irq(gt->irq_lock);
 }
 
index d81750b9bddaa2720768cb131d8b0a8595a215ab..735325e828bc99a493502febe2ddc69949714804 100644 (file)
@@ -48,7 +48,7 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
                pxp->session_events |= PXP_TERMINATION_COMPLETE | PXP_EVENT_TYPE_IRQ;
 
        if (pxp->session_events)
-               queue_work(system_unbound_wq, &pxp->session_work);
+               queue_work(system_dfl_wq, &pxp->session_work);
 }
 
 static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts)