]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf: replace use of system_unbound_wq with system_dfl_wq
authorMarco Crivellari <marco.crivellari@suse.com>
Fri, 5 Sep 2025 08:53:08 +0000 (10:53 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Mon, 8 Sep 2025 17:04:37 +0000 (10:04 -0700)
Currently if a user enqueue a work item using schedule_delayed_work() the
used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.

This lack of consistentcy cannot be addressed without refactoring the API.

system_unbound_wq should be the default workqueue so as not to enforce
locality constraints for random work whenever it's not required.

Adding system_dfl_wq to encourage its use when unbound work should be used.

queue_work() / queue_delayed_work() / mod_delayed_work() will now use the
new unbound wq: whether the user still use the old wq a warn will be
printed along with a wq redirect to the new one.

The old system_unbound_wq will be kept for a few release cycles.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Link: https://lore.kernel.org/r/20250905085309.94596-3-marco.crivellari@suse.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/helpers.c
kernel/bpf/memalloc.c
kernel/bpf/syscall.c

index 588bc7e36436799c0cfdb926caf4cefd9aee9933..1ef1e65bd7d0e95cf33ac5cff060d0d34720876e 100644 (file)
@@ -1594,7 +1594,7 @@ void bpf_timer_cancel_and_free(void *val)
         * timer callback.
         */
        if (this_cpu_read(hrtimer_running)) {
-               queue_work(system_unbound_wq, &t->cb.delete_work);
+               queue_work(system_dfl_wq, &t->cb.delete_work);
                return;
        }
 
@@ -1607,7 +1607,7 @@ void bpf_timer_cancel_and_free(void *val)
                if (hrtimer_try_to_cancel(&t->timer) >= 0)
                        kfree_rcu(t, cb.rcu);
                else
-                       queue_work(system_unbound_wq, &t->cb.delete_work);
+                       queue_work(system_dfl_wq, &t->cb.delete_work);
        } else {
                bpf_timer_delete_work(&t->cb.delete_work);
        }
index 889374722d0aa903fa72592fd5b010929ec662ec..bd45dda9dc354cbd97f11e414e4a1d28ed6933dc 100644 (file)
@@ -736,7 +736,7 @@ static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress)
        /* Defer barriers into worker to let the rest of map memory to be freed */
        memset(ma, 0, sizeof(*ma));
        INIT_WORK(&copy->work, free_mem_alloc_deferred);
-       queue_work(system_unbound_wq, &copy->work);
+       queue_work(system_dfl_wq, &copy->work);
 }
 
 void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
index 0fbfa8532c392ca994e09e3d15b4ec1f747fa827..3f178a0f8eb12c24d68b95d53114c1749d5a3b2a 100644 (file)
@@ -905,7 +905,7 @@ static void bpf_map_free_in_work(struct bpf_map *map)
        /* Avoid spawning kworkers, since they all might contend
         * for the same mutex like slab_mutex.
         */
-       queue_work(system_unbound_wq, &map->work);
+       queue_work(system_dfl_wq, &map->work);
 }
 
 static void bpf_map_free_rcu_gp(struct rcu_head *rcu)