]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
fs: replace use of system_unbound_wq with system_dfl_wq
authorMarco Crivellari <marco.crivellari@suse.com>
Tue, 16 Sep 2025 08:29:04 +0000 (10:29 +0200)
committerChristian Brauner <brauner@kernel.org>
Fri, 19 Sep 2025 14:15:07 +0000 (16:15 +0200)
Currently if a user enqueue a work item using schedule_delayed_work() the
used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.

This lack of consistentcy cannot be addressed without refactoring the API.

system_unbound_wq should be the default workqueue so as not to enforce
locality constraints for random work whenever it's not required.

Adding system_dfl_wq to encourage its use when unbound work should be used.

The old system_unbound_wq will be kept for a few release cycles.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Link: https://lore.kernel.org/20250916082906.77439-2-marco.crivellari@suse.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
16 files changed:
fs/afs/callback.c
fs/afs/write.c
fs/bcachefs/btree_write_buffer.c
fs/bcachefs/io_read.c
fs/bcachefs/journal_io.c
fs/btrfs/block-group.c
fs/btrfs/extent_map.c
fs/btrfs/space-info.c
fs/btrfs/zoned.c
fs/coredump.c
fs/ext4/mballoc.c
fs/netfs/misc.c
fs/netfs/objects.c
fs/nfsd/filecache.c
fs/notify/mark.c
fs/quota/dquot.c

index 69e1dd55b1601059a76763edb8bb147e3124972b..894d2bad6b6cec9ca4e70a198e967fae577e0156 100644 (file)
@@ -42,7 +42,7 @@ static void afs_volume_init_callback(struct afs_volume *volume)
        list_for_each_entry(vnode, &volume->open_mmaps, cb_mmap_link) {
                if (vnode->cb_v_check != atomic_read(&volume->cb_v_break)) {
                        afs_clear_cb_promise(vnode, afs_cb_promise_clear_vol_init_cb);
-                       queue_work(system_unbound_wq, &vnode->cb_work);
+                       queue_work(system_dfl_wq, &vnode->cb_work);
                }
        }
 
@@ -90,7 +90,7 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
                if (reason != afs_cb_break_for_deleted &&
                    vnode->status.type == AFS_FTYPE_FILE &&
                    atomic_read(&vnode->cb_nr_mmap))
-                       queue_work(system_unbound_wq, &vnode->cb_work);
+                       queue_work(system_dfl_wq, &vnode->cb_work);
 
                trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
        } else {
index 2e7526ea883ae231935b301935f802a0ad9f0dfa..93ad86ff33453f58ce8051c573db13c3849bd8ed 100644 (file)
@@ -172,7 +172,7 @@ static void afs_issue_write_worker(struct work_struct *work)
 void afs_issue_write(struct netfs_io_subrequest *subreq)
 {
        subreq->work.func = afs_issue_write_worker;
-       if (!queue_work(system_unbound_wq, &subreq->work))
+       if (!queue_work(system_dfl_wq, &subreq->work))
                WARN_ON_ONCE(1);
 }
 
index 4b095235a0d2210935fc555c552b2ff3b6d0556c..0afb44ce1a85628ee3fa21d88826ff81f765be38 100644 (file)
@@ -827,7 +827,7 @@ int bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys_
 
        if (bch2_btree_write_buffer_should_flush(c) &&
            __enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_btree_write_buffer) &&
-           !queue_work(system_unbound_wq, &c->btree_write_buffer.flush_work))
+           !queue_work(system_dfl_wq, &c->btree_write_buffer.flush_work))
                enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_write_buffer);
 
        if (dst->wb == &wb->flushing)
index e0874ad9a6cf2402a49655182fd97b461e3303c5..460e2e6341f1ced54d8a4846bbedaa930befff74 100644 (file)
@@ -684,7 +684,7 @@ static void bch2_rbio_error(struct bch_read_bio *rbio,
 
        if (bch2_err_matches(ret, BCH_ERR_data_read_retry)) {
                bch2_rbio_punt(rbio, bch2_rbio_retry,
-                              RBIO_CONTEXT_UNBOUND, system_unbound_wq);
+                              RBIO_CONTEXT_UNBOUND, system_dfl_wq);
        } else {
                rbio = bch2_rbio_free(rbio);
 
@@ -921,10 +921,10 @@ csum_err:
        bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_csum_err, BLK_STS_IOERR);
        goto out;
 decompression_err:
-       bch2_rbio_punt(rbio, bch2_read_decompress_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq);
+       bch2_rbio_punt(rbio, bch2_read_decompress_err, RBIO_CONTEXT_UNBOUND, system_dfl_wq);
        goto out;
 decrypt_err:
-       bch2_rbio_punt(rbio, bch2_read_decrypt_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq);
+       bch2_rbio_punt(rbio, bch2_read_decrypt_err, RBIO_CONTEXT_UNBOUND, system_dfl_wq);
        goto out;
 }
 
@@ -963,7 +963,7 @@ static void bch2_read_endio(struct bio *bio)
            rbio->promote ||
            crc_is_compressed(rbio->pick.crc) ||
            bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
-               context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
+               context = RBIO_CONTEXT_UNBOUND, wq = system_dfl_wq;
        else if (rbio->pick.crc.csum_type)
                context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
 
index 9e028dbcc3d02d3a4838e8601b66b9038bd47c22..29bea8e0e495412192870d564f09f95791cfa81c 100644 (file)
@@ -1362,7 +1362,7 @@ int bch2_journal_read(struct bch_fs *c,
                                          BCH_DEV_READ_REF_journal_read))
                        closure_call(&ca->journal.read,
                                     bch2_journal_read_device,
-                                    system_unbound_wq,
+                                    system_dfl_wq,
                                     &jlist.cl);
                else
                        degraded = true;
index 9bf282d2453c02ddfe5eed5526dc79f3775d1c0c..9a0af7e4a935c5b753c6fdf7420111eaf132de52 100644 (file)
@@ -2031,7 +2031,7 @@ void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info)
        btrfs_reclaim_sweep(fs_info);
        spin_lock(&fs_info->unused_bgs_lock);
        if (!list_empty(&fs_info->reclaim_bgs))
-               queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work);
+               queue_work(system_dfl_wq, &fs_info->reclaim_bgs_work);
        spin_unlock(&fs_info->unused_bgs_lock);
 }
 
index 57f52585a6dde9074b97cc52ae304ea9581cb55c..9a5a497edc97ae636bd593c803c31f5c3af108fe 100644 (file)
@@ -1372,7 +1372,7 @@ void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
        if (atomic64_cmpxchg(&fs_info->em_shrinker_nr_to_scan, 0, nr_to_scan) != 0)
                return;
 
-       queue_work(system_unbound_wq, &fs_info->em_shrinker_work);
+       queue_work(system_dfl_wq, &fs_info->em_shrinker_work);
 }
 
 void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info)
index 0481c693ac2eafc2d9ca227b5cace959534558d2..c573d80550adf2c7fca58d7c5560430e04578345 100644 (file)
@@ -1830,7 +1830,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
                                                          space_info->flags,
                                                          orig_bytes, flush,
                                                          "enospc");
-                               queue_work(system_unbound_wq, async_work);
+                               queue_work(system_dfl_wq, async_work);
                        }
                } else {
                        list_add_tail(&ticket.list,
@@ -1847,7 +1847,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
                    need_preemptive_reclaim(fs_info, space_info)) {
                        trace_btrfs_trigger_flush(fs_info, space_info->flags,
                                                  orig_bytes, flush, "preempt");
-                       queue_work(system_unbound_wq,
+                       queue_work(system_dfl_wq,
                                   &fs_info->preempt_reclaim_work);
                }
        }
index 245e813ecd785a337dbfa5384634e3f4cb864ad2..0f493db7ae44c2034d7fbb45d440a7ebc5926754 100644 (file)
@@ -2488,7 +2488,7 @@ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
        refcount_inc(&eb->refs);
        bg->last_eb = eb;
        INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
-       queue_work(system_unbound_wq, &bg->zone_finish_work);
+       queue_work(system_dfl_wq, &bg->zone_finish_work);
 }
 
 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
index fedbead956ed16636c333eea7d5fdc96c5497503..b22f99cb6818ce48497a768ada2ec771456f5950 100644 (file)
@@ -635,7 +635,7 @@ static int umh_coredump_setup(struct subprocess_info *info, struct cred *new)
 
                /*
                 * Usermode helpers are childen of either
-                * system_unbound_wq or of kthreadd. So we know that
+                * system_dfl_wq or of kthreadd. So we know that
                 * we're starting off with a clean file descriptor
                 * table. So we should always be able to use
                 * COREDUMP_PIDFD_NUMBER as our file descriptor value.
index 5898d92ba19f14d4dc61bafc19617e5750672656..8b18802e83ebd2dc10d03fba16e22ebe69d0836b 100644 (file)
@@ -3995,7 +3995,7 @@ void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
                list_splice_tail(&freed_data_list, &sbi->s_discard_list);
                spin_unlock(&sbi->s_md_lock);
                if (wake)
-                       queue_work(system_unbound_wq, &sbi->s_discard_work);
+                       queue_work(system_dfl_wq, &sbi->s_discard_work);
        } else {
                list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
                        kmem_cache_free(ext4_free_data_cachep, entry);
index 20748bcfbf59025ea7d3cce954a607bfb693b13e..486166460e177d52806b41b46cca8f325deb1ef4 100644 (file)
@@ -321,7 +321,7 @@ void netfs_wake_collector(struct netfs_io_request *rreq)
 {
        if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags) &&
            !test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) {
-               queue_work(system_unbound_wq, &rreq->work);
+               queue_work(system_dfl_wq, &rreq->work);
        } else {
                trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue);
                wake_up(&rreq->waitq);
index e8c99738b5bbf26f48c6e02c418a05956a6c8385..2ebe56b24ddd10c6a6bcd6ae7399b1e33a57d697 100644 (file)
@@ -163,7 +163,7 @@ void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace
                dead = __refcount_dec_and_test(&rreq->ref, &r);
                trace_netfs_rreq_ref(debug_id, r - 1, what);
                if (dead)
-                       WARN_ON(!queue_work(system_unbound_wq, &rreq->cleanup_work));
+                       WARN_ON(!queue_work(system_dfl_wq, &rreq->cleanup_work));
        }
 }
 
index 732abf6b92a569336865985e61906e9263efeec7..85ca663c052c1f642701c76755410e03149892b8 100644 (file)
@@ -113,7 +113,7 @@ static void
 nfsd_file_schedule_laundrette(void)
 {
        if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags))
-               queue_delayed_work(system_unbound_wq, &nfsd_filecache_laundrette,
+               queue_delayed_work(system_dfl_wq, &nfsd_filecache_laundrette,
                                   NFSD_LAUNDRETTE_DELAY);
 }
 
index 798340db69d761dd05c1b361c251818dee89b9cf..55a03bb05aa118ad3480b67068d5cc42844436b8 100644 (file)
@@ -428,7 +428,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
                conn->destroy_next = connector_destroy_list;
                connector_destroy_list = conn;
                spin_unlock(&destroy_lock);
-               queue_work(system_unbound_wq, &connector_reaper_work);
+               queue_work(system_dfl_wq, &connector_reaper_work);
        }
        /*
         * Note that we didn't update flags telling whether inode cares about
@@ -439,7 +439,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
        spin_lock(&destroy_lock);
        list_add(&mark->g_list, &destroy_list);
        spin_unlock(&destroy_lock);
-       queue_delayed_work(system_unbound_wq, &reaper_work,
+       queue_delayed_work(system_dfl_wq, &reaper_work,
                           FSNOTIFY_REAPER_DELAY);
 }
 EXPORT_SYMBOL_GPL(fsnotify_put_mark);
index df4a9b34876965553fb959691e1d58e05592f9d8..afa15a214538224722546538021682c667c93309 100644 (file)
@@ -881,7 +881,7 @@ void dqput(struct dquot *dquot)
        put_releasing_dquots(dquot);
        atomic_dec(&dquot->dq_count);
        spin_unlock(&dq_list_lock);
-       queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
+       queue_delayed_work(system_dfl_wq, &quota_release_work, 1);
 }
 EXPORT_SYMBOL(dqput);