From 0bcbd7cf6596826cfb0ca653f47fb9e9410b3f2e Mon Sep 17 00:00:00 2001 From: Marco Crivellari Date: Tue, 13 Jan 2026 12:46:28 +0100 Subject: [PATCH] mm: replace use of system_unbound_wq with system_dfl_wq MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Patch series "Replace wq users and add WQ_PERCPU to alloc_workqueue() users", v2. This series continues the effort to refactor the Workqueue API. No behavior changes are introduced by this series. === Recent changes to the WQ API === The following, address the recent changes in the Workqueue API: - commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq") - commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag") The old workqueues will be removed in a future release cycle and unbound will become the implicit default. === Introduced Changes by this series === 1) [P 1-2] Replace use of system_wq and system_unbound_wq Workqueue users converted to the better named new workqueues: system_wq -> system_percpu_wq system_unbound_wq -> system_dfl_wq This way the old obsolete workqueues (system_wq, system_unbound_wq) can be removed in the future. 2) [P 3] add WQ_PERCPU to remaining alloc_workqueue() users With the introduction of the WQ_PERCPU flag (equivalent to !WQ_UNBOUND), any alloc_workqueue() caller that doesn’t explicitly specify WQ_UNBOUND must now use WQ_PERCPU. WQ_UNBOUND will be removed in future. For more information: https://lore.kernel.org/all/20250221112003.1dSuoGyc@linutronix.de/ This patch (of 3): This patch continues the effort to refactor workqueue APIs, which has begun with the changes introducing new workqueues and a new alloc_workqueue flag: commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq") commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag") The point of the refactoring is to eventually alter the default behavior of workqueues to become unbound by default so that their workload placement is optimized by the scheduler. Before that to happen, workqueue users must be converted to the better named new workqueues with no intended behaviour changes: system_wq -> system_percpu_wq system_unbound_wq -> system_dfl_wq This way the old obsolete workqueues (system_wq, system_unbound_wq) can be removed in the future. Link: https://lkml.kernel.org/r/20260113114630.152942-1-marco.crivellari@suse.com Link: https://lore.kernel.org/all/20250221112003.1dSuoGyc@linutronix.de/ Link: https://lkml.kernel.org/r/20260113114630.152942-2-marco.crivellari@suse.com Signed-off-by: Marco Crivellari Suggested-by: Tejun Heo Reviewed-by: Sebastian Andrzej Siewior Reviewed-by: Frederic Weisbecker Cc: Alexander Potapenko Cc: David Hildenbrand Cc: Dmitry Vyukov Cc: Johannes Weiner Cc: Lai jiangshan Cc: "Liam R. Howlett" Cc: Lorenzo Stoakes Cc: Marco Elver Cc: Michal Hocko Cc: Mike Rapoport Cc: Muchun Song Cc: Roman Gushchin Cc: Shakeel Butt Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/backing-dev.c | 2 +- mm/kfence/core.c | 6 +++--- mm/memcontrol.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/backing-dev.c b/mm/backing-dev.c index a0e26d1b717f5..0e315f7707559 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -939,7 +939,7 @@ void wb_memcg_offline(struct mem_cgroup *memcg) memcg_cgwb_list->next = NULL; /* prevent new wb's */ spin_unlock_irq(&cgwb_lock); - queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work); + queue_work(system_dfl_wq, &cleanup_offline_cgwbs_work); } /** diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 4f79ec7207525..1b779cee6ca2f 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -900,7 +900,7 @@ static void toggle_allocation_gate(struct work_struct *work) /* Disable static key and reset timer. */ static_branch_disable(&kfence_allocation_key); #endif - queue_delayed_work(system_unbound_wq, &kfence_timer, + queue_delayed_work(system_dfl_wq, &kfence_timer, msecs_to_jiffies(kfence_sample_interval)); } @@ -950,7 +950,7 @@ static void kfence_init_enable(void) #endif WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + queue_delayed_work(system_dfl_wq, &kfence_timer, 0); pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool, @@ -1046,7 +1046,7 @@ static int kfence_enable_late(void) return kfence_init_late(); WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + queue_delayed_work(system_dfl_wq, &kfence_timer, 0); pr_info("re-enabled\n"); return 0; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7d6cf47e6d4cb..21d17975c4ace 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -644,7 +644,7 @@ static void flush_memcg_stats_dwork(struct work_struct *w) * in latency-sensitive paths is as cheap as possible. */ __mem_cgroup_flush_stats(root_mem_cgroup, true); - queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME); + queue_delayed_work(system_dfl_wq, &stats_flush_dwork, FLUSH_TIME); } unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) @@ -3841,7 +3841,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css) goto offline_kmem; if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled()) - queue_delayed_work(system_unbound_wq, &stats_flush_dwork, + queue_delayed_work(system_dfl_wq, &stats_flush_dwork, FLUSH_TIME); lru_gen_online_memcg(memcg); -- 2.47.3