]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
fs: replace use of system_wq with system_percpu_wq
authorMarco Crivellari <marco.crivellari@suse.com>
Tue, 16 Sep 2025 08:29:05 +0000 (10:29 +0200)
committerChristian Brauner <brauner@kernel.org>
Fri, 19 Sep 2025 14:15:07 +0000 (16:15 +0200)
Currently if a user enqueue a work item using schedule_delayed_work() the
used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.

This lack of consistentcy cannot be addressed without refactoring the API.

system_wq is a per-CPU worqueue, yet nothing in its name tells about that
CPU affinity constraint, which is very often not required by users.
Make it clear by adding a system_percpu_wq to all the fs subsystem.

The old wq will be kept for a few release cylces.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Link: https://lore.kernel.org/20250916082906.77439-3-marco.crivellari@suse.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/aio.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/fuse/inode.c
fs/nfs/namespace.c
fs/nfs/nfs4renewd.c

index 7fc7b6221312c399e9c131a0a29c9d62f9073bfe..6002617f078c6f9fae25c332c2f724fcdfe94f86 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -636,7 +636,7 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
 
        /* Synchronize against RCU protected table->table[] dereferences */
        INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
-       queue_rcu_work(system_wq, &ctx->free_rwork);
+       queue_rcu_work(system_percpu_wq, &ctx->free_rwork);
 }
 
 /*
index cc57367fb641d7bb448d65f132ef5734c3795422..cf51a265bf2712ddcfaa5aa1767e3da832e95126 100644 (file)
@@ -2442,7 +2442,7 @@ static int dirtytime_interval_handler(const struct ctl_table *table, int write,
 
        ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
        if (ret == 0 && write)
-               mod_delayed_work(system_wq, &dirtytime_work, 0);
+               mod_delayed_work(system_percpu_wq, &dirtytime_work, 0);
        return ret;
 }
 
index e80cd8f2c049f9abd584f35922e6c0aeffad2913..8520eb94c527a3496714fe491adec42132142f4c 100644 (file)
@@ -119,7 +119,7 @@ void fuse_check_timeout(struct work_struct *work)
            goto abort_conn;
 
 out:
-       queue_delayed_work(system_wq, &fc->timeout.work,
+       queue_delayed_work(system_percpu_wq, &fc->timeout.work,
                           fuse_timeout_timer_freq);
        return;
 
index ecb869e895ab1d644d88868290f4a7dc69e36a2c..b12cd19a9bcae8b4b02cdcdbcfbf1253e77b6f7f 100644 (file)
@@ -1273,7 +1273,7 @@ static void set_request_timeout(struct fuse_conn *fc, unsigned int timeout)
 {
        fc->timeout.req_timeout = secs_to_jiffies(timeout);
        INIT_DELAYED_WORK(&fc->timeout.work, fuse_check_timeout);
-       queue_delayed_work(system_wq, &fc->timeout.work,
+       queue_delayed_work(system_percpu_wq, &fc->timeout.work,
                           fuse_timeout_timer_freq);
 }
 
index 7f1ec9c67ff21de7e2ce39431526b8dee75e7f49..f9a3a1fbf44ce897b382b9c311226ed79412ebff 100644 (file)
@@ -335,7 +335,7 @@ static int param_set_nfs_timeout(const char *val, const struct kernel_param *kp)
                        num *= HZ;
                *((int *)kp->arg) = num;
                if (!list_empty(&nfs_automount_list))
-                       mod_delayed_work(system_wq, &nfs_automount_task, num);
+                       mod_delayed_work(system_percpu_wq, &nfs_automount_task, num);
        } else {
                *((int *)kp->arg) = -1*HZ;
                cancel_delayed_work(&nfs_automount_task);
index db3811af079691bf8524bd85783bb321edfc1fe7..18ae614e5a6c39157f8fbddc9a1e3d51f68cb760 100644 (file)
@@ -122,7 +122,7 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
                timeout = 5 * HZ;
        dprintk("%s: requeueing work. Lease period = %ld\n",
                        __func__, (timeout + HZ - 1) / HZ);
-       mod_delayed_work(system_wq, &clp->cl_renewd, timeout);
+       mod_delayed_work(system_percpu_wq, &clp->cl_renewd, timeout);
        set_bit(NFS_CS_RENEWD, &clp->cl_res_state);
        spin_unlock(&clp->cl_lock);
 }