]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
RDMA/core: RDMA/mlx5: replace use of system_unbound_wq with system_dfl_wq
authorMarco Crivellari <marco.crivellari@suse.com>
Sat, 1 Nov 2025 16:31:11 +0000 (17:31 +0100)
committerLeon Romanovsky <leon@kernel.org>
Thu, 6 Nov 2025 07:23:23 +0000 (02:23 -0500)
Currently if a user enqueue a work item using schedule_delayed_work() the
used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.

This lack of consistency cannot be addressed without refactoring the API.

system_unbound_wq should be the default workqueue so as not to enforce
locality constraints for random work whenever it's not required.

Adding system_dfl_wq to encourage its use when unbound work should be used.

The old system_unbound_wq will be kept for a few release cycles.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Link: https://patch.msgid.link/20251101163121.78400-2-marco.crivellari@suse.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/core/ucma.c
drivers/infiniband/hw/mlx5/odp.c

index f86ece701db6e081a06f76fa5026f690449624ec..ec3be65a2b889de61d1dcd9c8991bc5e490327ff 100644 (file)
@@ -366,7 +366,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
        if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
                xa_lock(&ctx_table);
                if (xa_load(&ctx_table, ctx->id) == ctx)
-                       queue_work(system_unbound_wq, &ctx->close_work);
+                       queue_work(system_dfl_wq, &ctx->close_work);
                xa_unlock(&ctx_table);
        }
        return 0;
index 0e8ae85af5a625bb97b4b5f99036f8ac0d448dec..6441abdf1f3b688ab25174885a8b843e7afb65bd 100644 (file)
@@ -265,7 +265,7 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
 
        /* Freeing a MR is a sleeping operation, so bounce to a work queue */
        INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
-       queue_work(system_unbound_wq, &mr->odp_destroy.work);
+       queue_work(system_dfl_wq, &mr->odp_destroy.work);
 }
 
 static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
@@ -2093,6 +2093,6 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
                destroy_prefetch_work(work);
                return rc;
        }
-       queue_work(system_unbound_wq, &work->work);
+       queue_work(system_dfl_wq, &work->work);
        return 0;
 }