]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
scsi: target: Add WQ_PERCPU to alloc_workqueue() users
authorMarco Crivellari <marco.crivellari@suse.com>
Fri, 7 Nov 2025 15:40:08 +0000 (16:40 +0100)
committerMartin K. Petersen <martin.petersen@oracle.com>
Thu, 13 Nov 2025 02:28:27 +0000 (21:28 -0500)
Currently if a user enqueues a work item using schedule_delayed_work()
the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.  This lack of consistency cannot be addressed
without refactoring the API.

alloc_workqueue() treats all queues as per-CPU by default, while unbound
workqueues must opt-in via WQ_UNBOUND.

This default is suboptimal: most workloads benefit from unbound queues,
allowing the scheduler to place worker threads where they’re needed and
reducing noise when CPUs are isolated.

This continues the effort to refactor workqueue APIs, which began with
the introduction of new workqueues and a new alloc_workqueue flag in:

commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag")

This change adds a new WQ_PERCPU flag to explicitly request
alloc_workqueue() to be per-cpu when WQ_UNBOUND has not been specified.

With the introduction of the WQ_PERCPU flag (equivalent to !WQ_UNBOUND),
any alloc_workqueue() caller that doesn’t explicitly specify WQ_UNBOUND
must now use WQ_PERCPU.

Once migration is complete, WQ_UNBOUND can be removed and unbound will
become the implicit default.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Link: https://patch.msgid.link/20251107154008.304127-1-marco.crivellari@suse.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/target/target_core_transport.c
drivers/target/target_core_xcopy.c
drivers/target/tcm_fc/tfc_conf.c

index 0a76bdfe55282073c6cb423051d279d7ff1693b7..ca571076c15b19b45a442f16092de29ee154e910 100644 (file)
@@ -126,12 +126,12 @@ int init_se_kmem_caches(void)
        }
 
        target_completion_wq = alloc_workqueue("target_completion",
-                                              WQ_MEM_RECLAIM, 0);
+                                              WQ_MEM_RECLAIM | WQ_PERCPU, 0);
        if (!target_completion_wq)
                goto out_free_lba_map_mem_cache;
 
        target_submission_wq = alloc_workqueue("target_submission",
-                                              WQ_MEM_RECLAIM, 0);
+                                              WQ_MEM_RECLAIM | WQ_PERCPU, 0);
        if (!target_submission_wq)
                goto out_free_completion_wq;
 
index 877ce58c0a708d2621660686706225f8cfb864ae..93534a6e14b7d0af2dd14f0cbc13c0ba8d2eee6f 100644 (file)
@@ -462,7 +462,7 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
 
 int target_xcopy_setup_pt(void)
 {
-       xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
+       xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
        if (!xcopy_wq) {
                pr_err("Unable to allocate xcopy_wq\n");
                return -ENOMEM;
index 639fc358ed0fdb3b425d68211af19ed79170bd0d..f686d95d327323e50641e0485a971b96392c9936 100644 (file)
@@ -250,7 +250,7 @@ static struct se_portal_group *ft_add_tpg(struct se_wwn *wwn, const char *name)
        tpg->lport_wwn = ft_wwn;
        INIT_LIST_HEAD(&tpg->lun_list);
 
-       wq = alloc_workqueue("tcm_fc", 0, 1);
+       wq = alloc_workqueue("tcm_fc", WQ_PERCPU, 1);
        if (!wq) {
                kfree(tpg);
                return NULL;