]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: replace use of system_unbound_wq with system_dfl_wq
authorMarco Crivellari <marco.crivellari@suse.com>
Tue, 13 Jan 2026 11:46:28 +0000 (12:46 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 31 Jan 2026 22:22:39 +0000 (14:22 -0800)
Patch series "Replace wq users and add WQ_PERCPU to alloc_workqueue()
users", v2.

This series continues the effort to refactor the Workqueue API.  No
behavior changes are introduced by this series.

=== Recent changes to the WQ API ===

The following, address the recent changes in the Workqueue API:

- commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
- commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag")

The old workqueues will be removed in a future release cycle and
unbound will become the implicit default.

=== Introduced Changes by this series ===

1) [P 1-2] Replace use of system_wq and system_unbound_wq

    Workqueue users converted to the better named new workqueues:

        system_wq -> system_percpu_wq
        system_unbound_wq -> system_dfl_wq

    This way the old obsolete workqueues (system_wq, system_unbound_wq) can be
    removed in the future.

2) [P 3] add WQ_PERCPU to remaining alloc_workqueue() users

    With the introduction of the WQ_PERCPU flag (equivalent to !WQ_UNBOUND),
    any alloc_workqueue() caller that doesn’t explicitly specify WQ_UNBOUND
    must now use WQ_PERCPU.

    WQ_UNBOUND will be removed in future.

For more information:
    https://lore.kernel.org/all/20250221112003.1dSuoGyc@linutronix.de/

This patch (of 3):

This patch continues the effort to refactor workqueue APIs, which has
begun with the changes introducing new workqueues and a new
alloc_workqueue flag:

   commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
   commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag")

The point of the refactoring is to eventually alter the default behavior
of workqueues to become unbound by default so that their workload
placement is optimized by the scheduler.

Before that to happen, workqueue users must be converted to the better
named new workqueues with no intended behaviour changes:

   system_wq -> system_percpu_wq
   system_unbound_wq -> system_dfl_wq

This way the old obsolete workqueues (system_wq, system_unbound_wq) can be
removed in the future.

Link: https://lkml.kernel.org/r/20260113114630.152942-1-marco.crivellari@suse.com
Link: https://lore.kernel.org/all/20250221112003.1dSuoGyc@linutronix.de/
Link: https://lkml.kernel.org/r/20260113114630.152942-2-marco.crivellari@suse.com
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Suggested-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: David Hildenbrand <david@kernel.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Lai jiangshan <jiangshanlai@gmail.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Marco Elver <elver@google.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/backing-dev.c
mm/kfence/core.c
mm/memcontrol.c

index a0e26d1b717f55b19cadb4dee58f205e1bf1dfe2..0e315f7707559cb703ec7b560ce468c33b111a5f 100644 (file)
@@ -939,7 +939,7 @@ void wb_memcg_offline(struct mem_cgroup *memcg)
        memcg_cgwb_list->next = NULL;   /* prevent new wb's */
        spin_unlock_irq(&cgwb_lock);
 
-       queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work);
+       queue_work(system_dfl_wq, &cleanup_offline_cgwbs_work);
 }
 
 /**
index 4f79ec72075254666bf0e0d34798b1f4bef3e2b7..1b779cee6ca2f840087d0a62d2f18db75cc1037b 100644 (file)
@@ -900,7 +900,7 @@ static void toggle_allocation_gate(struct work_struct *work)
        /* Disable static key and reset timer. */
        static_branch_disable(&kfence_allocation_key);
 #endif
-       queue_delayed_work(system_unbound_wq, &kfence_timer,
+       queue_delayed_work(system_dfl_wq, &kfence_timer,
                           msecs_to_jiffies(kfence_sample_interval));
 }
 
@@ -950,7 +950,7 @@ static void kfence_init_enable(void)
 #endif
 
        WRITE_ONCE(kfence_enabled, true);
-       queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
+       queue_delayed_work(system_dfl_wq, &kfence_timer, 0);
 
        pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
                CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
@@ -1046,7 +1046,7 @@ static int kfence_enable_late(void)
                return kfence_init_late();
 
        WRITE_ONCE(kfence_enabled, true);
-       queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
+       queue_delayed_work(system_dfl_wq, &kfence_timer, 0);
        pr_info("re-enabled\n");
        return 0;
 }
index 7d6cf47e6d4cbc238d6b4bc70e5038c2f800ac53..21d17975c4ace2f716d3d84d24211ca6c6ee4416 100644 (file)
@@ -644,7 +644,7 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
         * in latency-sensitive paths is as cheap as possible.
         */
        __mem_cgroup_flush_stats(root_mem_cgroup, true);
-       queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
+       queue_delayed_work(system_dfl_wq, &stats_flush_dwork, FLUSH_TIME);
 }
 
 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
@@ -3841,7 +3841,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
                goto offline_kmem;
 
        if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
-               queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
+               queue_delayed_work(system_dfl_wq, &stats_flush_dwork,
                                   FLUSH_TIME);
        lru_gen_online_memcg(memcg);