1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/rculist_nulls.h>
15 #include <linux/cpu.h>
16 #include <linux/task_work.h>
17 #include <linux/audit.h>
18 #include <linux/mmu_context.h>
19 #include <uapi/linux/io_uring.h>
25 #define WORKER_IDLE_TIMEOUT (5 * HZ)
28 IO_WORKER_F_UP
= 1, /* up and active */
29 IO_WORKER_F_RUNNING
= 2, /* account as running */
30 IO_WORKER_F_FREE
= 4, /* worker on free list */
31 IO_WORKER_F_BOUND
= 8, /* is doing bounded work */
35 IO_WQ_BIT_EXIT
= 0, /* wq exiting */
39 IO_ACCT_STALLED_BIT
= 0, /* stalled on hash */
43 * One for each thread in a wq pool
48 struct hlist_nulls_node nulls_node
;
49 struct list_head all_list
;
50 struct task_struct
*task
;
53 struct io_wq_work
*cur_work
;
54 struct io_wq_work
*next_work
;
57 struct completion ref_done
;
59 unsigned long create_state
;
60 struct callback_head create_work
;
65 struct work_struct work
;
69 #if BITS_PER_LONG == 64
70 #define IO_WQ_HASH_ORDER 6
72 #define IO_WQ_HASH_ORDER 5
75 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
83 struct io_wq_work_list work_list
;
99 free_work_fn
*free_work
;
100 io_wq_work_fn
*do_work
;
102 struct io_wq_hash
*hash
;
104 atomic_t worker_refs
;
105 struct completion worker_done
;
107 struct hlist_node cpuhp_node
;
109 struct task_struct
*task
;
111 struct io_wq_acct acct
[IO_WQ_ACCT_NR
];
113 /* lock protects access to elements below */
116 struct hlist_nulls_head free_list
;
117 struct list_head all_list
;
119 struct wait_queue_entry wait
;
121 struct io_wq_work
*hash_tail
[IO_WQ_NR_HASH_BUCKETS
];
123 cpumask_var_t cpu_mask
;
126 static enum cpuhp_state io_wq_online
;
128 struct io_cb_cancel_data
{
136 static bool create_io_worker(struct io_wq
*wq
, int index
);
137 static void io_wq_dec_running(struct io_worker
*worker
);
138 static bool io_acct_cancel_pending_work(struct io_wq
*wq
,
139 struct io_wq_acct
*acct
,
140 struct io_cb_cancel_data
*match
);
141 static void create_worker_cb(struct callback_head
*cb
);
142 static void io_wq_cancel_tw_create(struct io_wq
*wq
);
144 static bool io_worker_get(struct io_worker
*worker
)
146 return refcount_inc_not_zero(&worker
->ref
);
149 static void io_worker_release(struct io_worker
*worker
)
151 if (refcount_dec_and_test(&worker
->ref
))
152 complete(&worker
->ref_done
);
155 static inline struct io_wq_acct
*io_get_acct(struct io_wq
*wq
, bool bound
)
157 return &wq
->acct
[bound
? IO_WQ_ACCT_BOUND
: IO_WQ_ACCT_UNBOUND
];
160 static inline struct io_wq_acct
*io_work_get_acct(struct io_wq
*wq
,
161 struct io_wq_work
*work
)
163 return io_get_acct(wq
, !(work
->flags
& IO_WQ_WORK_UNBOUND
));
166 static inline struct io_wq_acct
*io_wq_get_acct(struct io_worker
*worker
)
168 return io_get_acct(worker
->wq
, worker
->flags
& IO_WORKER_F_BOUND
);
171 static void io_worker_ref_put(struct io_wq
*wq
)
173 if (atomic_dec_and_test(&wq
->worker_refs
))
174 complete(&wq
->worker_done
);
177 bool io_wq_worker_stopped(void)
179 struct io_worker
*worker
= current
->worker_private
;
181 if (WARN_ON_ONCE(!io_wq_current_is_worker()))
184 return test_bit(IO_WQ_BIT_EXIT
, &worker
->wq
->state
);
187 static void io_worker_cancel_cb(struct io_worker
*worker
)
189 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
190 struct io_wq
*wq
= worker
->wq
;
192 atomic_dec(&acct
->nr_running
);
193 raw_spin_lock(&wq
->lock
);
195 raw_spin_unlock(&wq
->lock
);
196 io_worker_ref_put(wq
);
197 clear_bit_unlock(0, &worker
->create_state
);
198 io_worker_release(worker
);
201 static bool io_task_worker_match(struct callback_head
*cb
, void *data
)
203 struct io_worker
*worker
;
205 if (cb
->func
!= create_worker_cb
)
207 worker
= container_of(cb
, struct io_worker
, create_work
);
208 return worker
== data
;
211 static void io_worker_exit(struct io_worker
*worker
)
213 struct io_wq
*wq
= worker
->wq
;
216 struct callback_head
*cb
= task_work_cancel_match(wq
->task
,
217 io_task_worker_match
, worker
);
221 io_worker_cancel_cb(worker
);
224 io_worker_release(worker
);
225 wait_for_completion(&worker
->ref_done
);
227 raw_spin_lock(&wq
->lock
);
228 if (worker
->flags
& IO_WORKER_F_FREE
)
229 hlist_nulls_del_rcu(&worker
->nulls_node
);
230 list_del_rcu(&worker
->all_list
);
231 raw_spin_unlock(&wq
->lock
);
232 io_wq_dec_running(worker
);
234 * this worker is a goner, clear ->worker_private to avoid any
235 * inc/dec running calls that could happen as part of exit from
238 current
->worker_private
= NULL
;
240 kfree_rcu(worker
, rcu
);
241 io_worker_ref_put(wq
);
245 static inline bool __io_acct_run_queue(struct io_wq_acct
*acct
)
247 return !test_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
) &&
248 !wq_list_empty(&acct
->work_list
);
252 * If there's work to do, returns true with acct->lock acquired. If not,
253 * returns false with no lock held.
255 static inline bool io_acct_run_queue(struct io_wq_acct
*acct
)
256 __acquires(&acct
->lock
)
258 raw_spin_lock(&acct
->lock
);
259 if (__io_acct_run_queue(acct
))
262 raw_spin_unlock(&acct
->lock
);
267 * Check head of free list for an available worker. If one isn't available,
268 * caller must create one.
270 static bool io_wq_activate_free_worker(struct io_wq
*wq
,
271 struct io_wq_acct
*acct
)
274 struct hlist_nulls_node
*n
;
275 struct io_worker
*worker
;
278 * Iterate free_list and see if we can find an idle worker to
279 * activate. If a given worker is on the free_list but in the process
280 * of exiting, keep trying.
282 hlist_nulls_for_each_entry_rcu(worker
, n
, &wq
->free_list
, nulls_node
) {
283 if (!io_worker_get(worker
))
285 if (io_wq_get_acct(worker
) != acct
) {
286 io_worker_release(worker
);
290 * If the worker is already running, it's either already
291 * starting work or finishing work. In either case, if it does
292 * to go sleep, we'll kick off a new task for this work anyway.
294 wake_up_process(worker
->task
);
295 io_worker_release(worker
);
303 * We need a worker. If we find a free one, we're good. If not, and we're
304 * below the max number of workers, create one.
306 static bool io_wq_create_worker(struct io_wq
*wq
, struct io_wq_acct
*acct
)
309 * Most likely an attempt to queue unbounded work on an io_wq that
310 * wasn't setup with any unbounded workers.
312 if (unlikely(!acct
->max_workers
))
313 pr_warn_once("io-wq is not configured for unbound workers");
315 raw_spin_lock(&wq
->lock
);
316 if (acct
->nr_workers
>= acct
->max_workers
) {
317 raw_spin_unlock(&wq
->lock
);
321 raw_spin_unlock(&wq
->lock
);
322 atomic_inc(&acct
->nr_running
);
323 atomic_inc(&wq
->worker_refs
);
324 return create_io_worker(wq
, acct
->index
);
327 static void io_wq_inc_running(struct io_worker
*worker
)
329 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
331 atomic_inc(&acct
->nr_running
);
334 static void create_worker_cb(struct callback_head
*cb
)
336 struct io_worker
*worker
;
339 struct io_wq_acct
*acct
;
340 bool do_create
= false;
342 worker
= container_of(cb
, struct io_worker
, create_work
);
344 acct
= &wq
->acct
[worker
->create_index
];
345 raw_spin_lock(&wq
->lock
);
347 if (acct
->nr_workers
< acct
->max_workers
) {
351 raw_spin_unlock(&wq
->lock
);
353 create_io_worker(wq
, worker
->create_index
);
355 atomic_dec(&acct
->nr_running
);
356 io_worker_ref_put(wq
);
358 clear_bit_unlock(0, &worker
->create_state
);
359 io_worker_release(worker
);
362 static bool io_queue_worker_create(struct io_worker
*worker
,
363 struct io_wq_acct
*acct
,
364 task_work_func_t func
)
366 struct io_wq
*wq
= worker
->wq
;
368 /* raced with exit, just ignore create call */
369 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
371 if (!io_worker_get(worker
))
374 * create_state manages ownership of create_work/index. We should
375 * only need one entry per worker, as the worker going to sleep
376 * will trigger the condition, and waking will clear it once it
377 * runs the task_work.
379 if (test_bit(0, &worker
->create_state
) ||
380 test_and_set_bit_lock(0, &worker
->create_state
))
383 atomic_inc(&wq
->worker_refs
);
384 init_task_work(&worker
->create_work
, func
);
385 worker
->create_index
= acct
->index
;
386 if (!task_work_add(wq
->task
, &worker
->create_work
, TWA_SIGNAL
)) {
388 * EXIT may have been set after checking it above, check after
389 * adding the task_work and remove any creation item if it is
390 * now set. wq exit does that too, but we can have added this
391 * work item after we canceled in io_wq_exit_workers().
393 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
394 io_wq_cancel_tw_create(wq
);
395 io_worker_ref_put(wq
);
398 io_worker_ref_put(wq
);
399 clear_bit_unlock(0, &worker
->create_state
);
401 io_worker_release(worker
);
403 atomic_dec(&acct
->nr_running
);
404 io_worker_ref_put(wq
);
408 static void io_wq_dec_running(struct io_worker
*worker
)
410 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
411 struct io_wq
*wq
= worker
->wq
;
413 if (!(worker
->flags
& IO_WORKER_F_UP
))
416 if (!atomic_dec_and_test(&acct
->nr_running
))
418 if (!io_acct_run_queue(acct
))
421 raw_spin_unlock(&acct
->lock
);
422 atomic_inc(&acct
->nr_running
);
423 atomic_inc(&wq
->worker_refs
);
424 io_queue_worker_create(worker
, acct
, create_worker_cb
);
428 * Worker will start processing some work. Move it to the busy list, if
429 * it's currently on the freelist
431 static void __io_worker_busy(struct io_wq
*wq
, struct io_worker
*worker
)
433 if (worker
->flags
& IO_WORKER_F_FREE
) {
434 worker
->flags
&= ~IO_WORKER_F_FREE
;
435 raw_spin_lock(&wq
->lock
);
436 hlist_nulls_del_init_rcu(&worker
->nulls_node
);
437 raw_spin_unlock(&wq
->lock
);
442 * No work, worker going to sleep. Move to freelist.
444 static void __io_worker_idle(struct io_wq
*wq
, struct io_worker
*worker
)
445 __must_hold(wq
->lock
)
447 if (!(worker
->flags
& IO_WORKER_F_FREE
)) {
448 worker
->flags
|= IO_WORKER_F_FREE
;
449 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wq
->free_list
);
453 static inline unsigned int io_get_work_hash(struct io_wq_work
*work
)
455 return work
->flags
>> IO_WQ_HASH_SHIFT
;
458 static bool io_wait_on_hash(struct io_wq
*wq
, unsigned int hash
)
462 spin_lock_irq(&wq
->hash
->wait
.lock
);
463 if (list_empty(&wq
->wait
.entry
)) {
464 __add_wait_queue(&wq
->hash
->wait
, &wq
->wait
);
465 if (!test_bit(hash
, &wq
->hash
->map
)) {
466 __set_current_state(TASK_RUNNING
);
467 list_del_init(&wq
->wait
.entry
);
471 spin_unlock_irq(&wq
->hash
->wait
.lock
);
475 static struct io_wq_work
*io_get_next_work(struct io_wq_acct
*acct
,
476 struct io_worker
*worker
)
477 __must_hold(acct
->lock
)
479 struct io_wq_work_node
*node
, *prev
;
480 struct io_wq_work
*work
, *tail
;
481 unsigned int stall_hash
= -1U;
482 struct io_wq
*wq
= worker
->wq
;
484 wq_list_for_each(node
, prev
, &acct
->work_list
) {
487 work
= container_of(node
, struct io_wq_work
, list
);
489 /* not hashed, can run anytime */
490 if (!io_wq_is_hashed(work
)) {
491 wq_list_del(&acct
->work_list
, node
, prev
);
495 hash
= io_get_work_hash(work
);
496 /* all items with this hash lie in [work, tail] */
497 tail
= wq
->hash_tail
[hash
];
499 /* hashed, can run if not already running */
500 if (!test_and_set_bit(hash
, &wq
->hash
->map
)) {
501 wq
->hash_tail
[hash
] = NULL
;
502 wq_list_cut(&acct
->work_list
, &tail
->list
, prev
);
505 if (stall_hash
== -1U)
507 /* fast forward to a next hash, for-each will fix up @prev */
511 if (stall_hash
!= -1U) {
515 * Set this before dropping the lock to avoid racing with new
516 * work being added and clearing the stalled bit.
518 set_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
519 raw_spin_unlock(&acct
->lock
);
520 unstalled
= io_wait_on_hash(wq
, stall_hash
);
521 raw_spin_lock(&acct
->lock
);
523 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
524 if (wq_has_sleeper(&wq
->hash
->wait
))
525 wake_up(&wq
->hash
->wait
);
532 static void io_assign_current_work(struct io_worker
*worker
,
533 struct io_wq_work
*work
)
540 raw_spin_lock(&worker
->lock
);
541 worker
->cur_work
= work
;
542 worker
->next_work
= NULL
;
543 raw_spin_unlock(&worker
->lock
);
547 * Called with acct->lock held, drops it before returning
549 static void io_worker_handle_work(struct io_wq_acct
*acct
,
550 struct io_worker
*worker
)
551 __releases(&acct
->lock
)
553 struct io_wq
*wq
= worker
->wq
;
554 bool do_kill
= test_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
557 struct io_wq_work
*work
;
560 * If we got some work, mark us as busy. If we didn't, but
561 * the list isn't empty, it means we stalled on hashed work.
562 * Mark us stalled so we don't keep looking for work when we
563 * can't make progress, any work completion or insertion will
564 * clear the stalled flag.
566 work
= io_get_next_work(acct
, worker
);
567 raw_spin_unlock(&acct
->lock
);
569 __io_worker_busy(wq
, worker
);
572 * Make sure cancelation can find this, even before
573 * it becomes the active work. That avoids a window
574 * where the work has been removed from our general
575 * work list, but isn't yet discoverable as the
576 * current work item for this worker.
578 raw_spin_lock(&worker
->lock
);
579 worker
->next_work
= work
;
580 raw_spin_unlock(&worker
->lock
);
584 io_assign_current_work(worker
, work
);
585 __set_current_state(TASK_RUNNING
);
587 /* handle a whole dependent link */
589 struct io_wq_work
*next_hashed
, *linked
;
590 unsigned int hash
= io_get_work_hash(work
);
592 next_hashed
= wq_next_work(work
);
594 if (unlikely(do_kill
) && (work
->flags
& IO_WQ_WORK_UNBOUND
))
595 work
->flags
|= IO_WQ_WORK_CANCEL
;
597 io_assign_current_work(worker
, NULL
);
599 linked
= wq
->free_work(work
);
601 if (!work
&& linked
&& !io_wq_is_hashed(linked
)) {
605 io_assign_current_work(worker
, work
);
607 io_wq_enqueue(wq
, linked
);
609 if (hash
!= -1U && !next_hashed
) {
610 /* serialize hash clear with wake_up() */
611 spin_lock_irq(&wq
->hash
->wait
.lock
);
612 clear_bit(hash
, &wq
->hash
->map
);
613 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
614 spin_unlock_irq(&wq
->hash
->wait
.lock
);
615 if (wq_has_sleeper(&wq
->hash
->wait
))
616 wake_up(&wq
->hash
->wait
);
620 if (!__io_acct_run_queue(acct
))
622 raw_spin_lock(&acct
->lock
);
626 static int io_wq_worker(void *data
)
628 struct io_worker
*worker
= data
;
629 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
630 struct io_wq
*wq
= worker
->wq
;
631 bool exit_mask
= false, last_timeout
= false;
632 char buf
[TASK_COMM_LEN
];
634 worker
->flags
|= (IO_WORKER_F_UP
| IO_WORKER_F_RUNNING
);
636 snprintf(buf
, sizeof(buf
), "iou-wrk-%d", wq
->task
->pid
);
637 set_task_comm(current
, buf
);
639 while (!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
642 set_current_state(TASK_INTERRUPTIBLE
);
645 * If we have work to do, io_acct_run_queue() returns with
646 * the acct->lock held. If not, it will drop it.
648 while (io_acct_run_queue(acct
))
649 io_worker_handle_work(acct
, worker
);
651 raw_spin_lock(&wq
->lock
);
653 * Last sleep timed out. Exit if we're not the last worker,
654 * or if someone modified our affinity.
656 if (last_timeout
&& (exit_mask
|| acct
->nr_workers
> 1)) {
658 raw_spin_unlock(&wq
->lock
);
659 __set_current_state(TASK_RUNNING
);
662 last_timeout
= false;
663 __io_worker_idle(wq
, worker
);
664 raw_spin_unlock(&wq
->lock
);
665 if (io_run_task_work())
667 ret
= schedule_timeout(WORKER_IDLE_TIMEOUT
);
668 if (signal_pending(current
)) {
671 if (!get_signal(&ksig
))
677 exit_mask
= !cpumask_test_cpu(raw_smp_processor_id(),
682 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
) && io_acct_run_queue(acct
))
683 io_worker_handle_work(acct
, worker
);
685 io_worker_exit(worker
);
690 * Called when a worker is scheduled in. Mark us as currently running.
692 void io_wq_worker_running(struct task_struct
*tsk
)
694 struct io_worker
*worker
= tsk
->worker_private
;
698 if (!(worker
->flags
& IO_WORKER_F_UP
))
700 if (worker
->flags
& IO_WORKER_F_RUNNING
)
702 worker
->flags
|= IO_WORKER_F_RUNNING
;
703 io_wq_inc_running(worker
);
707 * Called when worker is going to sleep. If there are no workers currently
708 * running and we have work pending, wake up a free one or create a new one.
710 void io_wq_worker_sleeping(struct task_struct
*tsk
)
712 struct io_worker
*worker
= tsk
->worker_private
;
716 if (!(worker
->flags
& IO_WORKER_F_UP
))
718 if (!(worker
->flags
& IO_WORKER_F_RUNNING
))
721 worker
->flags
&= ~IO_WORKER_F_RUNNING
;
722 io_wq_dec_running(worker
);
725 static void io_init_new_worker(struct io_wq
*wq
, struct io_worker
*worker
,
726 struct task_struct
*tsk
)
728 tsk
->worker_private
= worker
;
730 set_cpus_allowed_ptr(tsk
, wq
->cpu_mask
);
732 raw_spin_lock(&wq
->lock
);
733 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wq
->free_list
);
734 list_add_tail_rcu(&worker
->all_list
, &wq
->all_list
);
735 worker
->flags
|= IO_WORKER_F_FREE
;
736 raw_spin_unlock(&wq
->lock
);
737 wake_up_new_task(tsk
);
740 static bool io_wq_work_match_all(struct io_wq_work
*work
, void *data
)
745 static inline bool io_should_retry_thread(long err
)
748 * Prevent perpetual task_work retry, if the task (or its group) is
751 if (fatal_signal_pending(current
))
757 case -ERESTARTNOINTR
:
758 case -ERESTARTNOHAND
:
765 static void create_worker_cont(struct callback_head
*cb
)
767 struct io_worker
*worker
;
768 struct task_struct
*tsk
;
771 worker
= container_of(cb
, struct io_worker
, create_work
);
772 clear_bit_unlock(0, &worker
->create_state
);
774 tsk
= create_io_thread(io_wq_worker
, worker
, NUMA_NO_NODE
);
776 io_init_new_worker(wq
, worker
, tsk
);
777 io_worker_release(worker
);
779 } else if (!io_should_retry_thread(PTR_ERR(tsk
))) {
780 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
782 atomic_dec(&acct
->nr_running
);
783 raw_spin_lock(&wq
->lock
);
785 if (!acct
->nr_workers
) {
786 struct io_cb_cancel_data match
= {
787 .fn
= io_wq_work_match_all
,
791 raw_spin_unlock(&wq
->lock
);
792 while (io_acct_cancel_pending_work(wq
, acct
, &match
))
795 raw_spin_unlock(&wq
->lock
);
797 io_worker_ref_put(wq
);
802 /* re-create attempts grab a new worker ref, drop the existing one */
803 io_worker_release(worker
);
804 schedule_work(&worker
->work
);
807 static void io_workqueue_create(struct work_struct
*work
)
809 struct io_worker
*worker
= container_of(work
, struct io_worker
, work
);
810 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
812 if (!io_queue_worker_create(worker
, acct
, create_worker_cont
))
816 static bool create_io_worker(struct io_wq
*wq
, int index
)
818 struct io_wq_acct
*acct
= &wq
->acct
[index
];
819 struct io_worker
*worker
;
820 struct task_struct
*tsk
;
822 __set_current_state(TASK_RUNNING
);
824 worker
= kzalloc(sizeof(*worker
), GFP_KERNEL
);
827 atomic_dec(&acct
->nr_running
);
828 raw_spin_lock(&wq
->lock
);
830 raw_spin_unlock(&wq
->lock
);
831 io_worker_ref_put(wq
);
835 refcount_set(&worker
->ref
, 1);
837 raw_spin_lock_init(&worker
->lock
);
838 init_completion(&worker
->ref_done
);
840 if (index
== IO_WQ_ACCT_BOUND
)
841 worker
->flags
|= IO_WORKER_F_BOUND
;
843 tsk
= create_io_thread(io_wq_worker
, worker
, NUMA_NO_NODE
);
845 io_init_new_worker(wq
, worker
, tsk
);
846 } else if (!io_should_retry_thread(PTR_ERR(tsk
))) {
850 INIT_WORK(&worker
->work
, io_workqueue_create
);
851 schedule_work(&worker
->work
);
858 * Iterate the passed in list and call the specific function for each
859 * worker that isn't exiting
861 static bool io_wq_for_each_worker(struct io_wq
*wq
,
862 bool (*func
)(struct io_worker
*, void *),
865 struct io_worker
*worker
;
868 list_for_each_entry_rcu(worker
, &wq
->all_list
, all_list
) {
869 if (io_worker_get(worker
)) {
870 /* no task if node is/was offline */
872 ret
= func(worker
, data
);
873 io_worker_release(worker
);
882 static bool io_wq_worker_wake(struct io_worker
*worker
, void *data
)
884 __set_notify_signal(worker
->task
);
885 wake_up_process(worker
->task
);
889 static void io_run_cancel(struct io_wq_work
*work
, struct io_wq
*wq
)
892 work
->flags
|= IO_WQ_WORK_CANCEL
;
894 work
= wq
->free_work(work
);
898 static void io_wq_insert_work(struct io_wq
*wq
, struct io_wq_work
*work
)
900 struct io_wq_acct
*acct
= io_work_get_acct(wq
, work
);
902 struct io_wq_work
*tail
;
904 if (!io_wq_is_hashed(work
)) {
906 wq_list_add_tail(&work
->list
, &acct
->work_list
);
910 hash
= io_get_work_hash(work
);
911 tail
= wq
->hash_tail
[hash
];
912 wq
->hash_tail
[hash
] = work
;
916 wq_list_add_after(&work
->list
, &tail
->list
, &acct
->work_list
);
919 static bool io_wq_work_match_item(struct io_wq_work
*work
, void *data
)
924 void io_wq_enqueue(struct io_wq
*wq
, struct io_wq_work
*work
)
926 struct io_wq_acct
*acct
= io_work_get_acct(wq
, work
);
927 struct io_cb_cancel_data match
;
928 unsigned work_flags
= work
->flags
;
932 * If io-wq is exiting for this task, or if the request has explicitly
933 * been marked as one that should not get executed, cancel it here.
935 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
) ||
936 (work
->flags
& IO_WQ_WORK_CANCEL
)) {
937 io_run_cancel(work
, wq
);
941 raw_spin_lock(&acct
->lock
);
942 io_wq_insert_work(wq
, work
);
943 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
944 raw_spin_unlock(&acct
->lock
);
947 do_create
= !io_wq_activate_free_worker(wq
, acct
);
950 if (do_create
&& ((work_flags
& IO_WQ_WORK_CONCURRENT
) ||
951 !atomic_read(&acct
->nr_running
))) {
954 did_create
= io_wq_create_worker(wq
, acct
);
955 if (likely(did_create
))
958 raw_spin_lock(&wq
->lock
);
959 if (acct
->nr_workers
) {
960 raw_spin_unlock(&wq
->lock
);
963 raw_spin_unlock(&wq
->lock
);
965 /* fatal condition, failed to create the first worker */
966 match
.fn
= io_wq_work_match_item
,
968 match
.cancel_all
= false,
970 io_acct_cancel_pending_work(wq
, acct
, &match
);
975 * Work items that hash to the same value will not be done in parallel.
976 * Used to limit concurrent writes, generally hashed by inode.
978 void io_wq_hash_work(struct io_wq_work
*work
, void *val
)
982 bit
= hash_ptr(val
, IO_WQ_HASH_ORDER
);
983 work
->flags
|= (IO_WQ_WORK_HASHED
| (bit
<< IO_WQ_HASH_SHIFT
));
986 static bool __io_wq_worker_cancel(struct io_worker
*worker
,
987 struct io_cb_cancel_data
*match
,
988 struct io_wq_work
*work
)
990 if (work
&& match
->fn(work
, match
->data
)) {
991 work
->flags
|= IO_WQ_WORK_CANCEL
;
992 __set_notify_signal(worker
->task
);
999 static bool io_wq_worker_cancel(struct io_worker
*worker
, void *data
)
1001 struct io_cb_cancel_data
*match
= data
;
1004 * Hold the lock to avoid ->cur_work going out of scope, caller
1005 * may dereference the passed in work.
1007 raw_spin_lock(&worker
->lock
);
1008 if (__io_wq_worker_cancel(worker
, match
, worker
->cur_work
) ||
1009 __io_wq_worker_cancel(worker
, match
, worker
->next_work
))
1010 match
->nr_running
++;
1011 raw_spin_unlock(&worker
->lock
);
1013 return match
->nr_running
&& !match
->cancel_all
;
1016 static inline void io_wq_remove_pending(struct io_wq
*wq
,
1017 struct io_wq_work
*work
,
1018 struct io_wq_work_node
*prev
)
1020 struct io_wq_acct
*acct
= io_work_get_acct(wq
, work
);
1021 unsigned int hash
= io_get_work_hash(work
);
1022 struct io_wq_work
*prev_work
= NULL
;
1024 if (io_wq_is_hashed(work
) && work
== wq
->hash_tail
[hash
]) {
1026 prev_work
= container_of(prev
, struct io_wq_work
, list
);
1027 if (prev_work
&& io_get_work_hash(prev_work
) == hash
)
1028 wq
->hash_tail
[hash
] = prev_work
;
1030 wq
->hash_tail
[hash
] = NULL
;
1032 wq_list_del(&acct
->work_list
, &work
->list
, prev
);
1035 static bool io_acct_cancel_pending_work(struct io_wq
*wq
,
1036 struct io_wq_acct
*acct
,
1037 struct io_cb_cancel_data
*match
)
1039 struct io_wq_work_node
*node
, *prev
;
1040 struct io_wq_work
*work
;
1042 raw_spin_lock(&acct
->lock
);
1043 wq_list_for_each(node
, prev
, &acct
->work_list
) {
1044 work
= container_of(node
, struct io_wq_work
, list
);
1045 if (!match
->fn(work
, match
->data
))
1047 io_wq_remove_pending(wq
, work
, prev
);
1048 raw_spin_unlock(&acct
->lock
);
1049 io_run_cancel(work
, wq
);
1050 match
->nr_pending
++;
1051 /* not safe to continue after unlock */
1054 raw_spin_unlock(&acct
->lock
);
1059 static void io_wq_cancel_pending_work(struct io_wq
*wq
,
1060 struct io_cb_cancel_data
*match
)
1064 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1065 struct io_wq_acct
*acct
= io_get_acct(wq
, i
== 0);
1067 if (io_acct_cancel_pending_work(wq
, acct
, match
)) {
1068 if (match
->cancel_all
)
1075 static void io_wq_cancel_running_work(struct io_wq
*wq
,
1076 struct io_cb_cancel_data
*match
)
1079 io_wq_for_each_worker(wq
, io_wq_worker_cancel
, match
);
1083 enum io_wq_cancel
io_wq_cancel_cb(struct io_wq
*wq
, work_cancel_fn
*cancel
,
1084 void *data
, bool cancel_all
)
1086 struct io_cb_cancel_data match
= {
1089 .cancel_all
= cancel_all
,
1093 * First check pending list, if we're lucky we can just remove it
1094 * from there. CANCEL_OK means that the work is returned as-new,
1095 * no completion will be posted for it.
1097 * Then check if a free (going busy) or busy worker has the work
1098 * currently running. If we find it there, we'll return CANCEL_RUNNING
1099 * as an indication that we attempt to signal cancellation. The
1100 * completion will run normally in this case.
1102 * Do both of these while holding the wq->lock, to ensure that
1103 * we'll find a work item regardless of state.
1105 io_wq_cancel_pending_work(wq
, &match
);
1106 if (match
.nr_pending
&& !match
.cancel_all
)
1107 return IO_WQ_CANCEL_OK
;
1109 raw_spin_lock(&wq
->lock
);
1110 io_wq_cancel_running_work(wq
, &match
);
1111 raw_spin_unlock(&wq
->lock
);
1112 if (match
.nr_running
&& !match
.cancel_all
)
1113 return IO_WQ_CANCEL_RUNNING
;
1115 if (match
.nr_running
)
1116 return IO_WQ_CANCEL_RUNNING
;
1117 if (match
.nr_pending
)
1118 return IO_WQ_CANCEL_OK
;
1119 return IO_WQ_CANCEL_NOTFOUND
;
1122 static int io_wq_hash_wake(struct wait_queue_entry
*wait
, unsigned mode
,
1123 int sync
, void *key
)
1125 struct io_wq
*wq
= container_of(wait
, struct io_wq
, wait
);
1128 list_del_init(&wait
->entry
);
1131 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1132 struct io_wq_acct
*acct
= &wq
->acct
[i
];
1134 if (test_and_clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
))
1135 io_wq_activate_free_worker(wq
, acct
);
1141 struct io_wq
*io_wq_create(unsigned bounded
, struct io_wq_data
*data
)
1146 if (WARN_ON_ONCE(!data
->free_work
|| !data
->do_work
))
1147 return ERR_PTR(-EINVAL
);
1148 if (WARN_ON_ONCE(!bounded
))
1149 return ERR_PTR(-EINVAL
);
1151 wq
= kzalloc(sizeof(struct io_wq
), GFP_KERNEL
);
1153 return ERR_PTR(-ENOMEM
);
1155 refcount_inc(&data
->hash
->refs
);
1156 wq
->hash
= data
->hash
;
1157 wq
->free_work
= data
->free_work
;
1158 wq
->do_work
= data
->do_work
;
1162 if (!alloc_cpumask_var(&wq
->cpu_mask
, GFP_KERNEL
))
1164 cpumask_copy(wq
->cpu_mask
, cpu_possible_mask
);
1165 wq
->acct
[IO_WQ_ACCT_BOUND
].max_workers
= bounded
;
1166 wq
->acct
[IO_WQ_ACCT_UNBOUND
].max_workers
=
1167 task_rlimit(current
, RLIMIT_NPROC
);
1168 INIT_LIST_HEAD(&wq
->wait
.entry
);
1169 wq
->wait
.func
= io_wq_hash_wake
;
1170 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1171 struct io_wq_acct
*acct
= &wq
->acct
[i
];
1174 atomic_set(&acct
->nr_running
, 0);
1175 INIT_WQ_LIST(&acct
->work_list
);
1176 raw_spin_lock_init(&acct
->lock
);
1179 raw_spin_lock_init(&wq
->lock
);
1180 INIT_HLIST_NULLS_HEAD(&wq
->free_list
, 0);
1181 INIT_LIST_HEAD(&wq
->all_list
);
1183 wq
->task
= get_task_struct(data
->task
);
1184 atomic_set(&wq
->worker_refs
, 1);
1185 init_completion(&wq
->worker_done
);
1186 ret
= cpuhp_state_add_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1192 io_wq_put_hash(data
->hash
);
1193 free_cpumask_var(wq
->cpu_mask
);
1195 return ERR_PTR(ret
);
1198 static bool io_task_work_match(struct callback_head
*cb
, void *data
)
1200 struct io_worker
*worker
;
1202 if (cb
->func
!= create_worker_cb
&& cb
->func
!= create_worker_cont
)
1204 worker
= container_of(cb
, struct io_worker
, create_work
);
1205 return worker
->wq
== data
;
1208 void io_wq_exit_start(struct io_wq
*wq
)
1210 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
1213 static void io_wq_cancel_tw_create(struct io_wq
*wq
)
1215 struct callback_head
*cb
;
1217 while ((cb
= task_work_cancel_match(wq
->task
, io_task_work_match
, wq
)) != NULL
) {
1218 struct io_worker
*worker
;
1220 worker
= container_of(cb
, struct io_worker
, create_work
);
1221 io_worker_cancel_cb(worker
);
1223 * Only the worker continuation helper has worker allocated and
1224 * hence needs freeing.
1226 if (cb
->func
== create_worker_cont
)
1231 static void io_wq_exit_workers(struct io_wq
*wq
)
1236 io_wq_cancel_tw_create(wq
);
1239 io_wq_for_each_worker(wq
, io_wq_worker_wake
, NULL
);
1241 io_worker_ref_put(wq
);
1242 wait_for_completion(&wq
->worker_done
);
1244 spin_lock_irq(&wq
->hash
->wait
.lock
);
1245 list_del_init(&wq
->wait
.entry
);
1246 spin_unlock_irq(&wq
->hash
->wait
.lock
);
1248 put_task_struct(wq
->task
);
1252 static void io_wq_destroy(struct io_wq
*wq
)
1254 struct io_cb_cancel_data match
= {
1255 .fn
= io_wq_work_match_all
,
1259 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1260 io_wq_cancel_pending_work(wq
, &match
);
1261 free_cpumask_var(wq
->cpu_mask
);
1262 io_wq_put_hash(wq
->hash
);
1266 void io_wq_put_and_exit(struct io_wq
*wq
)
1268 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
));
1270 io_wq_exit_workers(wq
);
1274 struct online_data
{
1279 static bool io_wq_worker_affinity(struct io_worker
*worker
, void *data
)
1281 struct online_data
*od
= data
;
1284 cpumask_set_cpu(od
->cpu
, worker
->wq
->cpu_mask
);
1286 cpumask_clear_cpu(od
->cpu
, worker
->wq
->cpu_mask
);
1290 static int __io_wq_cpu_online(struct io_wq
*wq
, unsigned int cpu
, bool online
)
1292 struct online_data od
= {
1298 io_wq_for_each_worker(wq
, io_wq_worker_affinity
, &od
);
1303 static int io_wq_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
1305 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1307 return __io_wq_cpu_online(wq
, cpu
, true);
1310 static int io_wq_cpu_offline(unsigned int cpu
, struct hlist_node
*node
)
1312 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1314 return __io_wq_cpu_online(wq
, cpu
, false);
1317 int io_wq_cpu_affinity(struct io_uring_task
*tctx
, cpumask_var_t mask
)
1319 if (!tctx
|| !tctx
->io_wq
)
1324 cpumask_copy(tctx
->io_wq
->cpu_mask
, mask
);
1326 cpumask_copy(tctx
->io_wq
->cpu_mask
, cpu_possible_mask
);
1333 * Set max number of unbounded workers, returns old value. If new_count is 0,
1334 * then just return the old value.
1336 int io_wq_max_workers(struct io_wq
*wq
, int *new_count
)
1338 struct io_wq_acct
*acct
;
1339 int prev
[IO_WQ_ACCT_NR
];
1342 BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND
!= (int) IO_WQ_BOUND
);
1343 BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND
!= (int) IO_WQ_UNBOUND
);
1344 BUILD_BUG_ON((int) IO_WQ_ACCT_NR
!= 2);
1346 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1347 if (new_count
[i
] > task_rlimit(current
, RLIMIT_NPROC
))
1348 new_count
[i
] = task_rlimit(current
, RLIMIT_NPROC
);
1351 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++)
1356 raw_spin_lock(&wq
->lock
);
1357 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1358 acct
= &wq
->acct
[i
];
1359 prev
[i
] = max_t(int, acct
->max_workers
, prev
[i
]);
1361 acct
->max_workers
= new_count
[i
];
1363 raw_spin_unlock(&wq
->lock
);
1366 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++)
1367 new_count
[i
] = prev
[i
];
1372 static __init
int io_wq_init(void)
1376 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "io-wq/online",
1377 io_wq_cpu_online
, io_wq_cpu_offline
);
1383 subsys_initcall(io_wq_init
);