]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
workqueue: Add system_dfl_long_wq for long unbound works
authorMarco Crivellari <marco.crivellari@suse.com>
Mon, 9 Mar 2026 13:15:28 +0000 (14:15 +0100)
committerTejun Heo <tj@kernel.org>
Mon, 9 Mar 2026 16:45:08 +0000 (06:45 -1000)
Currently there are users of queue_delayed_work() who specify
system_long_wq, the per-cpu workqueue. This workqueue should
be used for long per-cpu works, but queue_delayed_work()
queue the work using:

  queue_delayed_work_on(WORK_CPU_UNBOUND, ...);

This would end up calling __queue_delayed_work() that does:

if (housekeeping_enabled(HK_TYPE_TIMER)) {
// [....]
} else {
if (likely(cpu == WORK_CPU_UNBOUND))
add_timer_global(timer);
else
add_timer_on(timer, cpu);
}

So when cpu == WORK_CPU_UNBOUND the timer is global and is
not using a specific CPU. Later, when __queue_work() is called:

if (req_cpu == WORK_CPU_UNBOUND) {
if (wq->flags & WQ_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
else
cpu = raw_smp_processor_id();
}

Because the wq is not unbound, it takes the CPU where the timer
fired and enqueue the work on that CPU.
The consequence of all of this is that the work can run anywhere,
depending on where the timer fired.

Introduce system_dfl_long_wq in order to change, in a future step,
users that are still calling:

  queue_delayed_work(system_long_wq, ...);

with the new system_dfl_long_wq instead, so that the work may
benefit from scheduler task placement.

Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
include/linux/workqueue.h
kernel/workqueue.c

index fc5744402a66976e4610a5d1b4f4df5d33f17875..8e0855d56e740d1f59d11ab047a5319a3a76988c 100644 (file)
@@ -440,6 +440,9 @@ enum wq_consts {
  * system_long_wq is similar to system_percpu_wq but may host long running
  * works.  Queue flushing might take relatively long.
  *
+ * system_dfl_long_wq is similar to system_dfl_wq but it may host long running
+ * works.
+ *
  * system_dfl_wq is unbound workqueue.  Workers are not bound to
  * any specific CPU, not concurrency managed, and all queued works are
  * executed immediately as long as max_active limit is not reached and
@@ -468,6 +471,7 @@ extern struct workqueue_struct *system_power_efficient_wq;
 extern struct workqueue_struct *system_freezable_power_efficient_wq;
 extern struct workqueue_struct *system_bh_wq;
 extern struct workqueue_struct *system_bh_highpri_wq;
+extern struct workqueue_struct *system_dfl_long_wq;
 
 void workqueue_softirq_action(bool highpri);
 void workqueue_softirq_dead(unsigned int cpu);
@@ -783,6 +787,8 @@ extern void __warn_flushing_systemwide_wq(void)
             _wq == system_highpri_wq) ||                               \
            (__builtin_constant_p(_wq == system_long_wq) &&             \
             _wq == system_long_wq) ||                                  \
+           (__builtin_constant_p(_wq == system_dfl_long_wq) &&         \
+            _wq == system_dfl_long_wq) ||                                      \
            (__builtin_constant_p(_wq == system_dfl_wq) &&              \
             _wq == system_dfl_wq) ||                           \
            (__builtin_constant_p(_wq == system_freezable_wq) &&        \
index 2f95cb0d2f1b8f220eb81867f1ad0bf4c8725ebf..2d8ff903f11346598dc28a9fcfa1b8418cba2d69 100644 (file)
@@ -530,6 +530,8 @@ struct workqueue_struct *system_bh_wq;
 EXPORT_SYMBOL_GPL(system_bh_wq);
 struct workqueue_struct *system_bh_highpri_wq;
 EXPORT_SYMBOL_GPL(system_bh_highpri_wq);
+struct workqueue_struct *system_dfl_long_wq __ro_after_init;
+EXPORT_SYMBOL_GPL(system_dfl_long_wq);
 
 static int worker_thread(void *__worker);
 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
@@ -7954,11 +7956,12 @@ void __init workqueue_init_early(void)
        system_bh_wq = alloc_workqueue("events_bh", WQ_BH | WQ_PERCPU, 0);
        system_bh_highpri_wq = alloc_workqueue("events_bh_highpri",
                                               WQ_BH | WQ_HIGHPRI | WQ_PERCPU, 0);
+       system_dfl_long_wq = alloc_workqueue("events_dfl_long", WQ_UNBOUND, WQ_MAX_ACTIVE);
        BUG_ON(!system_wq || !system_percpu_wq|| !system_highpri_wq || !system_long_wq ||
               !system_unbound_wq || !system_freezable_wq || !system_dfl_wq ||
               !system_power_efficient_wq ||
               !system_freezable_power_efficient_wq ||
-              !system_bh_wq || !system_bh_highpri_wq);
+              !system_bh_wq || !system_bh_highpri_wq || !system_dfl_long_wq);
 }
 
 static void __init wq_cpu_intensive_thresh_init(void)