From: Marco Crivellari Date: Fri, 5 Sep 2025 08:53:08 +0000 (+0200) Subject: bpf: replace use of system_unbound_wq with system_dfl_wq X-Git-Tag: v6.18-rc1~175^2~55^2~1 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=0409819a002165e9471113598323eb72bba17a53;p=thirdparty%2Fkernel%2Fstable.git bpf: replace use of system_unbound_wq with system_dfl_wq Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistentcy cannot be addressed without refactoring the API. system_unbound_wq should be the default workqueue so as not to enforce locality constraints for random work whenever it's not required. Adding system_dfl_wq to encourage its use when unbound work should be used. queue_work() / queue_delayed_work() / mod_delayed_work() will now use the new unbound wq: whether the user still use the old wq a warn will be printed along with a wq redirect to the new one. The old system_unbound_wq will be kept for a few release cycles. Suggested-by: Tejun Heo Signed-off-by: Marco Crivellari Link: https://lore.kernel.org/r/20250905085309.94596-3-marco.crivellari@suse.com Signed-off-by: Alexei Starovoitov --- diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 588bc7e364367..1ef1e65bd7d0e 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1594,7 +1594,7 @@ void bpf_timer_cancel_and_free(void *val) * timer callback. */ if (this_cpu_read(hrtimer_running)) { - queue_work(system_unbound_wq, &t->cb.delete_work); + queue_work(system_dfl_wq, &t->cb.delete_work); return; } @@ -1607,7 +1607,7 @@ void bpf_timer_cancel_and_free(void *val) if (hrtimer_try_to_cancel(&t->timer) >= 0) kfree_rcu(t, cb.rcu); else - queue_work(system_unbound_wq, &t->cb.delete_work); + queue_work(system_dfl_wq, &t->cb.delete_work); } else { bpf_timer_delete_work(&t->cb.delete_work); } diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index 889374722d0aa..bd45dda9dc354 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -736,7 +736,7 @@ static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress) /* Defer barriers into worker to let the rest of map memory to be freed */ memset(ma, 0, sizeof(*ma)); INIT_WORK(©->work, free_mem_alloc_deferred); - queue_work(system_unbound_wq, ©->work); + queue_work(system_dfl_wq, ©->work); } void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0fbfa8532c392..3f178a0f8eb12 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -905,7 +905,7 @@ static void bpf_map_free_in_work(struct bpf_map *map) /* Avoid spawning kworkers, since they all might contend * for the same mutex like slab_mutex. */ - queue_work(system_unbound_wq, &map->work); + queue_work(system_dfl_wq, &map->work); } static void bpf_map_free_rcu_gp(struct rcu_head *rcu)