1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/rculist_nulls.h>
15 #include <linux/cpu.h>
16 #include <linux/task_work.h>
17 #include <linux/audit.h>
18 #include <linux/mmu_context.h>
19 #include <uapi/linux/io_uring.h>
25 #define WORKER_IDLE_TIMEOUT (5 * HZ)
28 IO_WORKER_F_UP
= 1, /* up and active */
29 IO_WORKER_F_RUNNING
= 2, /* account as running */
30 IO_WORKER_F_FREE
= 4, /* worker on free list */
31 IO_WORKER_F_BOUND
= 8, /* is doing bounded work */
35 IO_WQ_BIT_EXIT
= 0, /* wq exiting */
39 IO_ACCT_STALLED_BIT
= 0, /* stalled on hash */
43 * One for each thread in a wq pool
48 struct hlist_nulls_node nulls_node
;
49 struct list_head all_list
;
50 struct task_struct
*task
;
53 struct io_wq_work
*cur_work
;
54 struct io_wq_work
*next_work
;
57 struct completion ref_done
;
59 unsigned long create_state
;
60 struct callback_head create_work
;
65 struct work_struct work
;
69 #if BITS_PER_LONG == 64
70 #define IO_WQ_HASH_ORDER 6
72 #define IO_WQ_HASH_ORDER 5
75 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
83 struct io_wq_work_list work_list
;
99 free_work_fn
*free_work
;
100 io_wq_work_fn
*do_work
;
102 struct io_wq_hash
*hash
;
104 atomic_t worker_refs
;
105 struct completion worker_done
;
107 struct hlist_node cpuhp_node
;
109 struct task_struct
*task
;
111 struct io_wq_acct acct
[IO_WQ_ACCT_NR
];
113 /* lock protects access to elements below */
116 struct hlist_nulls_head free_list
;
117 struct list_head all_list
;
119 struct wait_queue_entry wait
;
121 struct io_wq_work
*hash_tail
[IO_WQ_NR_HASH_BUCKETS
];
123 cpumask_var_t cpu_mask
;
126 static enum cpuhp_state io_wq_online
;
128 struct io_cb_cancel_data
{
136 static bool create_io_worker(struct io_wq
*wq
, int index
);
137 static void io_wq_dec_running(struct io_worker
*worker
);
138 static bool io_acct_cancel_pending_work(struct io_wq
*wq
,
139 struct io_wq_acct
*acct
,
140 struct io_cb_cancel_data
*match
);
141 static void create_worker_cb(struct callback_head
*cb
);
142 static void io_wq_cancel_tw_create(struct io_wq
*wq
);
144 static bool io_worker_get(struct io_worker
*worker
)
146 return refcount_inc_not_zero(&worker
->ref
);
149 static void io_worker_release(struct io_worker
*worker
)
151 if (refcount_dec_and_test(&worker
->ref
))
152 complete(&worker
->ref_done
);
155 static inline struct io_wq_acct
*io_get_acct(struct io_wq
*wq
, bool bound
)
157 return &wq
->acct
[bound
? IO_WQ_ACCT_BOUND
: IO_WQ_ACCT_UNBOUND
];
160 static inline struct io_wq_acct
*io_work_get_acct(struct io_wq
*wq
,
161 struct io_wq_work
*work
)
163 return io_get_acct(wq
, !(work
->flags
& IO_WQ_WORK_UNBOUND
));
166 static inline struct io_wq_acct
*io_wq_get_acct(struct io_worker
*worker
)
168 return io_get_acct(worker
->wq
, worker
->flags
& IO_WORKER_F_BOUND
);
171 static void io_worker_ref_put(struct io_wq
*wq
)
173 if (atomic_dec_and_test(&wq
->worker_refs
))
174 complete(&wq
->worker_done
);
177 static void io_worker_cancel_cb(struct io_worker
*worker
)
179 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
180 struct io_wq
*wq
= worker
->wq
;
182 atomic_dec(&acct
->nr_running
);
183 raw_spin_lock(&wq
->lock
);
185 raw_spin_unlock(&wq
->lock
);
186 io_worker_ref_put(wq
);
187 clear_bit_unlock(0, &worker
->create_state
);
188 io_worker_release(worker
);
191 static bool io_task_worker_match(struct callback_head
*cb
, void *data
)
193 struct io_worker
*worker
;
195 if (cb
->func
!= create_worker_cb
)
197 worker
= container_of(cb
, struct io_worker
, create_work
);
198 return worker
== data
;
201 static void io_worker_exit(struct io_worker
*worker
)
203 struct io_wq
*wq
= worker
->wq
;
206 struct callback_head
*cb
= task_work_cancel_match(wq
->task
,
207 io_task_worker_match
, worker
);
211 io_worker_cancel_cb(worker
);
214 io_worker_release(worker
);
215 wait_for_completion(&worker
->ref_done
);
217 raw_spin_lock(&wq
->lock
);
218 if (worker
->flags
& IO_WORKER_F_FREE
)
219 hlist_nulls_del_rcu(&worker
->nulls_node
);
220 list_del_rcu(&worker
->all_list
);
221 raw_spin_unlock(&wq
->lock
);
222 io_wq_dec_running(worker
);
225 current
->flags
&= ~PF_IO_WORKER
;
228 kfree_rcu(worker
, rcu
);
229 io_worker_ref_put(wq
);
233 static inline bool io_acct_run_queue(struct io_wq_acct
*acct
)
237 raw_spin_lock(&acct
->lock
);
238 if (!wq_list_empty(&acct
->work_list
) &&
239 !test_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
))
241 raw_spin_unlock(&acct
->lock
);
247 * Check head of free list for an available worker. If one isn't available,
248 * caller must create one.
250 static bool io_wq_activate_free_worker(struct io_wq
*wq
,
251 struct io_wq_acct
*acct
)
254 struct hlist_nulls_node
*n
;
255 struct io_worker
*worker
;
258 * Iterate free_list and see if we can find an idle worker to
259 * activate. If a given worker is on the free_list but in the process
260 * of exiting, keep trying.
262 hlist_nulls_for_each_entry_rcu(worker
, n
, &wq
->free_list
, nulls_node
) {
263 if (!io_worker_get(worker
))
265 if (io_wq_get_acct(worker
) != acct
) {
266 io_worker_release(worker
);
269 if (wake_up_process(worker
->task
)) {
270 io_worker_release(worker
);
273 io_worker_release(worker
);
280 * We need a worker. If we find a free one, we're good. If not, and we're
281 * below the max number of workers, create one.
283 static bool io_wq_create_worker(struct io_wq
*wq
, struct io_wq_acct
*acct
)
286 * Most likely an attempt to queue unbounded work on an io_wq that
287 * wasn't setup with any unbounded workers.
289 if (unlikely(!acct
->max_workers
))
290 pr_warn_once("io-wq is not configured for unbound workers");
292 raw_spin_lock(&wq
->lock
);
293 if (acct
->nr_workers
>= acct
->max_workers
) {
294 raw_spin_unlock(&wq
->lock
);
298 raw_spin_unlock(&wq
->lock
);
299 atomic_inc(&acct
->nr_running
);
300 atomic_inc(&wq
->worker_refs
);
301 return create_io_worker(wq
, acct
->index
);
304 static void io_wq_inc_running(struct io_worker
*worker
)
306 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
308 atomic_inc(&acct
->nr_running
);
311 static void create_worker_cb(struct callback_head
*cb
)
313 struct io_worker
*worker
;
316 struct io_wq_acct
*acct
;
317 bool do_create
= false;
319 worker
= container_of(cb
, struct io_worker
, create_work
);
321 acct
= &wq
->acct
[worker
->create_index
];
322 raw_spin_lock(&wq
->lock
);
324 if (acct
->nr_workers
< acct
->max_workers
) {
328 raw_spin_unlock(&wq
->lock
);
330 create_io_worker(wq
, worker
->create_index
);
332 atomic_dec(&acct
->nr_running
);
333 io_worker_ref_put(wq
);
335 clear_bit_unlock(0, &worker
->create_state
);
336 io_worker_release(worker
);
339 static bool io_queue_worker_create(struct io_worker
*worker
,
340 struct io_wq_acct
*acct
,
341 task_work_func_t func
)
343 struct io_wq
*wq
= worker
->wq
;
345 /* raced with exit, just ignore create call */
346 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
348 if (!io_worker_get(worker
))
351 * create_state manages ownership of create_work/index. We should
352 * only need one entry per worker, as the worker going to sleep
353 * will trigger the condition, and waking will clear it once it
354 * runs the task_work.
356 if (test_bit(0, &worker
->create_state
) ||
357 test_and_set_bit_lock(0, &worker
->create_state
))
360 atomic_inc(&wq
->worker_refs
);
361 init_task_work(&worker
->create_work
, func
);
362 worker
->create_index
= acct
->index
;
363 if (!task_work_add(wq
->task
, &worker
->create_work
, TWA_SIGNAL
)) {
365 * EXIT may have been set after checking it above, check after
366 * adding the task_work and remove any creation item if it is
367 * now set. wq exit does that too, but we can have added this
368 * work item after we canceled in io_wq_exit_workers().
370 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
371 io_wq_cancel_tw_create(wq
);
372 io_worker_ref_put(wq
);
375 io_worker_ref_put(wq
);
376 clear_bit_unlock(0, &worker
->create_state
);
378 io_worker_release(worker
);
380 atomic_dec(&acct
->nr_running
);
381 io_worker_ref_put(wq
);
385 static void io_wq_dec_running(struct io_worker
*worker
)
387 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
388 struct io_wq
*wq
= worker
->wq
;
390 if (!(worker
->flags
& IO_WORKER_F_UP
))
393 if (!atomic_dec_and_test(&acct
->nr_running
))
395 if (!io_acct_run_queue(acct
))
398 atomic_inc(&acct
->nr_running
);
399 atomic_inc(&wq
->worker_refs
);
400 io_queue_worker_create(worker
, acct
, create_worker_cb
);
404 * Worker will start processing some work. Move it to the busy list, if
405 * it's currently on the freelist
407 static void __io_worker_busy(struct io_wq
*wq
, struct io_worker
*worker
)
409 if (worker
->flags
& IO_WORKER_F_FREE
) {
410 worker
->flags
&= ~IO_WORKER_F_FREE
;
411 raw_spin_lock(&wq
->lock
);
412 hlist_nulls_del_init_rcu(&worker
->nulls_node
);
413 raw_spin_unlock(&wq
->lock
);
418 * No work, worker going to sleep. Move to freelist.
420 static void __io_worker_idle(struct io_wq
*wq
, struct io_worker
*worker
)
421 __must_hold(wq
->lock
)
423 if (!(worker
->flags
& IO_WORKER_F_FREE
)) {
424 worker
->flags
|= IO_WORKER_F_FREE
;
425 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wq
->free_list
);
429 static inline unsigned int io_get_work_hash(struct io_wq_work
*work
)
431 return work
->flags
>> IO_WQ_HASH_SHIFT
;
434 static bool io_wait_on_hash(struct io_wq
*wq
, unsigned int hash
)
438 spin_lock_irq(&wq
->hash
->wait
.lock
);
439 if (list_empty(&wq
->wait
.entry
)) {
440 __add_wait_queue(&wq
->hash
->wait
, &wq
->wait
);
441 if (!test_bit(hash
, &wq
->hash
->map
)) {
442 __set_current_state(TASK_RUNNING
);
443 list_del_init(&wq
->wait
.entry
);
447 spin_unlock_irq(&wq
->hash
->wait
.lock
);
451 static struct io_wq_work
*io_get_next_work(struct io_wq_acct
*acct
,
452 struct io_worker
*worker
)
453 __must_hold(acct
->lock
)
455 struct io_wq_work_node
*node
, *prev
;
456 struct io_wq_work
*work
, *tail
;
457 unsigned int stall_hash
= -1U;
458 struct io_wq
*wq
= worker
->wq
;
460 wq_list_for_each(node
, prev
, &acct
->work_list
) {
463 work
= container_of(node
, struct io_wq_work
, list
);
465 /* not hashed, can run anytime */
466 if (!io_wq_is_hashed(work
)) {
467 wq_list_del(&acct
->work_list
, node
, prev
);
471 hash
= io_get_work_hash(work
);
472 /* all items with this hash lie in [work, tail] */
473 tail
= wq
->hash_tail
[hash
];
475 /* hashed, can run if not already running */
476 if (!test_and_set_bit(hash
, &wq
->hash
->map
)) {
477 wq
->hash_tail
[hash
] = NULL
;
478 wq_list_cut(&acct
->work_list
, &tail
->list
, prev
);
481 if (stall_hash
== -1U)
483 /* fast forward to a next hash, for-each will fix up @prev */
487 if (stall_hash
!= -1U) {
491 * Set this before dropping the lock to avoid racing with new
492 * work being added and clearing the stalled bit.
494 set_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
495 raw_spin_unlock(&acct
->lock
);
496 unstalled
= io_wait_on_hash(wq
, stall_hash
);
497 raw_spin_lock(&acct
->lock
);
499 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
500 if (wq_has_sleeper(&wq
->hash
->wait
))
501 wake_up(&wq
->hash
->wait
);
508 static void io_assign_current_work(struct io_worker
*worker
,
509 struct io_wq_work
*work
)
516 raw_spin_lock(&worker
->lock
);
517 worker
->cur_work
= work
;
518 worker
->next_work
= NULL
;
519 raw_spin_unlock(&worker
->lock
);
522 static void io_worker_handle_work(struct io_worker
*worker
)
524 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
525 struct io_wq
*wq
= worker
->wq
;
526 bool do_kill
= test_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
529 struct io_wq_work
*work
;
532 * If we got some work, mark us as busy. If we didn't, but
533 * the list isn't empty, it means we stalled on hashed work.
534 * Mark us stalled so we don't keep looking for work when we
535 * can't make progress, any work completion or insertion will
536 * clear the stalled flag.
538 raw_spin_lock(&acct
->lock
);
539 work
= io_get_next_work(acct
, worker
);
540 raw_spin_unlock(&acct
->lock
);
542 __io_worker_busy(wq
, worker
);
545 * Make sure cancelation can find this, even before
546 * it becomes the active work. That avoids a window
547 * where the work has been removed from our general
548 * work list, but isn't yet discoverable as the
549 * current work item for this worker.
551 raw_spin_lock(&worker
->lock
);
552 worker
->next_work
= work
;
553 raw_spin_unlock(&worker
->lock
);
557 io_assign_current_work(worker
, work
);
558 __set_current_state(TASK_RUNNING
);
560 /* handle a whole dependent link */
562 struct io_wq_work
*next_hashed
, *linked
;
563 unsigned int hash
= io_get_work_hash(work
);
565 next_hashed
= wq_next_work(work
);
567 if (unlikely(do_kill
) && (work
->flags
& IO_WQ_WORK_UNBOUND
))
568 work
->flags
|= IO_WQ_WORK_CANCEL
;
570 io_assign_current_work(worker
, NULL
);
572 linked
= wq
->free_work(work
);
574 if (!work
&& linked
&& !io_wq_is_hashed(linked
)) {
578 io_assign_current_work(worker
, work
);
580 io_wq_enqueue(wq
, linked
);
582 if (hash
!= -1U && !next_hashed
) {
583 /* serialize hash clear with wake_up() */
584 spin_lock_irq(&wq
->hash
->wait
.lock
);
585 clear_bit(hash
, &wq
->hash
->map
);
586 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
587 spin_unlock_irq(&wq
->hash
->wait
.lock
);
588 if (wq_has_sleeper(&wq
->hash
->wait
))
589 wake_up(&wq
->hash
->wait
);
595 static int io_wq_worker(void *data
)
597 struct io_worker
*worker
= data
;
598 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
599 struct io_wq
*wq
= worker
->wq
;
600 bool exit_mask
= false, last_timeout
= false;
601 char buf
[TASK_COMM_LEN
];
603 worker
->flags
|= (IO_WORKER_F_UP
| IO_WORKER_F_RUNNING
);
605 snprintf(buf
, sizeof(buf
), "iou-wrk-%d", wq
->task
->pid
);
606 set_task_comm(current
, buf
);
608 while (!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
611 set_current_state(TASK_INTERRUPTIBLE
);
612 while (io_acct_run_queue(acct
))
613 io_worker_handle_work(worker
);
615 raw_spin_lock(&wq
->lock
);
617 * Last sleep timed out. Exit if we're not the last worker,
618 * or if someone modified our affinity.
620 if (last_timeout
&& (exit_mask
|| acct
->nr_workers
> 1)) {
622 raw_spin_unlock(&wq
->lock
);
623 __set_current_state(TASK_RUNNING
);
626 last_timeout
= false;
627 __io_worker_idle(wq
, worker
);
628 raw_spin_unlock(&wq
->lock
);
629 if (io_run_task_work())
631 ret
= schedule_timeout(WORKER_IDLE_TIMEOUT
);
632 if (signal_pending(current
)) {
635 if (!get_signal(&ksig
))
641 exit_mask
= !cpumask_test_cpu(raw_smp_processor_id(),
646 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
647 io_worker_handle_work(worker
);
649 io_worker_exit(worker
);
654 * Called when a worker is scheduled in. Mark us as currently running.
656 void io_wq_worker_running(struct task_struct
*tsk
)
658 struct io_worker
*worker
= tsk
->worker_private
;
662 if (!(worker
->flags
& IO_WORKER_F_UP
))
664 if (worker
->flags
& IO_WORKER_F_RUNNING
)
666 worker
->flags
|= IO_WORKER_F_RUNNING
;
667 io_wq_inc_running(worker
);
671 * Called when worker is going to sleep. If there are no workers currently
672 * running and we have work pending, wake up a free one or create a new one.
674 void io_wq_worker_sleeping(struct task_struct
*tsk
)
676 struct io_worker
*worker
= tsk
->worker_private
;
680 if (!(worker
->flags
& IO_WORKER_F_UP
))
682 if (!(worker
->flags
& IO_WORKER_F_RUNNING
))
685 worker
->flags
&= ~IO_WORKER_F_RUNNING
;
686 io_wq_dec_running(worker
);
689 static void io_init_new_worker(struct io_wq
*wq
, struct io_worker
*worker
,
690 struct task_struct
*tsk
)
692 tsk
->worker_private
= worker
;
694 set_cpus_allowed_ptr(tsk
, wq
->cpu_mask
);
696 raw_spin_lock(&wq
->lock
);
697 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wq
->free_list
);
698 list_add_tail_rcu(&worker
->all_list
, &wq
->all_list
);
699 worker
->flags
|= IO_WORKER_F_FREE
;
700 raw_spin_unlock(&wq
->lock
);
701 wake_up_new_task(tsk
);
704 static bool io_wq_work_match_all(struct io_wq_work
*work
, void *data
)
709 static inline bool io_should_retry_thread(long err
)
712 * Prevent perpetual task_work retry, if the task (or its group) is
715 if (fatal_signal_pending(current
))
721 case -ERESTARTNOINTR
:
722 case -ERESTARTNOHAND
:
729 static void create_worker_cont(struct callback_head
*cb
)
731 struct io_worker
*worker
;
732 struct task_struct
*tsk
;
735 worker
= container_of(cb
, struct io_worker
, create_work
);
736 clear_bit_unlock(0, &worker
->create_state
);
738 tsk
= create_io_thread(io_wq_worker
, worker
, NUMA_NO_NODE
);
740 io_init_new_worker(wq
, worker
, tsk
);
741 io_worker_release(worker
);
743 } else if (!io_should_retry_thread(PTR_ERR(tsk
))) {
744 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
746 atomic_dec(&acct
->nr_running
);
747 raw_spin_lock(&wq
->lock
);
749 if (!acct
->nr_workers
) {
750 struct io_cb_cancel_data match
= {
751 .fn
= io_wq_work_match_all
,
755 raw_spin_unlock(&wq
->lock
);
756 while (io_acct_cancel_pending_work(wq
, acct
, &match
))
759 raw_spin_unlock(&wq
->lock
);
761 io_worker_ref_put(wq
);
766 /* re-create attempts grab a new worker ref, drop the existing one */
767 io_worker_release(worker
);
768 schedule_work(&worker
->work
);
771 static void io_workqueue_create(struct work_struct
*work
)
773 struct io_worker
*worker
= container_of(work
, struct io_worker
, work
);
774 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
776 if (!io_queue_worker_create(worker
, acct
, create_worker_cont
))
780 static bool create_io_worker(struct io_wq
*wq
, int index
)
782 struct io_wq_acct
*acct
= &wq
->acct
[index
];
783 struct io_worker
*worker
;
784 struct task_struct
*tsk
;
786 __set_current_state(TASK_RUNNING
);
788 worker
= kzalloc(sizeof(*worker
), GFP_KERNEL
);
791 atomic_dec(&acct
->nr_running
);
792 raw_spin_lock(&wq
->lock
);
794 raw_spin_unlock(&wq
->lock
);
795 io_worker_ref_put(wq
);
799 refcount_set(&worker
->ref
, 1);
801 raw_spin_lock_init(&worker
->lock
);
802 init_completion(&worker
->ref_done
);
804 if (index
== IO_WQ_ACCT_BOUND
)
805 worker
->flags
|= IO_WORKER_F_BOUND
;
807 tsk
= create_io_thread(io_wq_worker
, worker
, NUMA_NO_NODE
);
809 io_init_new_worker(wq
, worker
, tsk
);
810 } else if (!io_should_retry_thread(PTR_ERR(tsk
))) {
814 INIT_WORK(&worker
->work
, io_workqueue_create
);
815 schedule_work(&worker
->work
);
822 * Iterate the passed in list and call the specific function for each
823 * worker that isn't exiting
825 static bool io_wq_for_each_worker(struct io_wq
*wq
,
826 bool (*func
)(struct io_worker
*, void *),
829 struct io_worker
*worker
;
832 list_for_each_entry_rcu(worker
, &wq
->all_list
, all_list
) {
833 if (io_worker_get(worker
)) {
834 /* no task if node is/was offline */
836 ret
= func(worker
, data
);
837 io_worker_release(worker
);
846 static bool io_wq_worker_wake(struct io_worker
*worker
, void *data
)
848 __set_notify_signal(worker
->task
);
849 wake_up_process(worker
->task
);
853 static void io_run_cancel(struct io_wq_work
*work
, struct io_wq
*wq
)
856 work
->flags
|= IO_WQ_WORK_CANCEL
;
858 work
= wq
->free_work(work
);
862 static void io_wq_insert_work(struct io_wq
*wq
, struct io_wq_work
*work
)
864 struct io_wq_acct
*acct
= io_work_get_acct(wq
, work
);
866 struct io_wq_work
*tail
;
868 if (!io_wq_is_hashed(work
)) {
870 wq_list_add_tail(&work
->list
, &acct
->work_list
);
874 hash
= io_get_work_hash(work
);
875 tail
= wq
->hash_tail
[hash
];
876 wq
->hash_tail
[hash
] = work
;
880 wq_list_add_after(&work
->list
, &tail
->list
, &acct
->work_list
);
883 static bool io_wq_work_match_item(struct io_wq_work
*work
, void *data
)
888 void io_wq_enqueue(struct io_wq
*wq
, struct io_wq_work
*work
)
890 struct io_wq_acct
*acct
= io_work_get_acct(wq
, work
);
891 struct io_cb_cancel_data match
;
892 unsigned work_flags
= work
->flags
;
896 * If io-wq is exiting for this task, or if the request has explicitly
897 * been marked as one that should not get executed, cancel it here.
899 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
) ||
900 (work
->flags
& IO_WQ_WORK_CANCEL
)) {
901 io_run_cancel(work
, wq
);
905 raw_spin_lock(&acct
->lock
);
906 io_wq_insert_work(wq
, work
);
907 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
908 raw_spin_unlock(&acct
->lock
);
910 raw_spin_lock(&wq
->lock
);
912 do_create
= !io_wq_activate_free_worker(wq
, acct
);
915 raw_spin_unlock(&wq
->lock
);
917 if (do_create
&& ((work_flags
& IO_WQ_WORK_CONCURRENT
) ||
918 !atomic_read(&acct
->nr_running
))) {
921 did_create
= io_wq_create_worker(wq
, acct
);
922 if (likely(did_create
))
925 raw_spin_lock(&wq
->lock
);
926 if (acct
->nr_workers
) {
927 raw_spin_unlock(&wq
->lock
);
930 raw_spin_unlock(&wq
->lock
);
932 /* fatal condition, failed to create the first worker */
933 match
.fn
= io_wq_work_match_item
,
935 match
.cancel_all
= false,
937 io_acct_cancel_pending_work(wq
, acct
, &match
);
942 * Work items that hash to the same value will not be done in parallel.
943 * Used to limit concurrent writes, generally hashed by inode.
945 void io_wq_hash_work(struct io_wq_work
*work
, void *val
)
949 bit
= hash_ptr(val
, IO_WQ_HASH_ORDER
);
950 work
->flags
|= (IO_WQ_WORK_HASHED
| (bit
<< IO_WQ_HASH_SHIFT
));
953 static bool __io_wq_worker_cancel(struct io_worker
*worker
,
954 struct io_cb_cancel_data
*match
,
955 struct io_wq_work
*work
)
957 if (work
&& match
->fn(work
, match
->data
)) {
958 work
->flags
|= IO_WQ_WORK_CANCEL
;
959 __set_notify_signal(worker
->task
);
966 static bool io_wq_worker_cancel(struct io_worker
*worker
, void *data
)
968 struct io_cb_cancel_data
*match
= data
;
971 * Hold the lock to avoid ->cur_work going out of scope, caller
972 * may dereference the passed in work.
974 raw_spin_lock(&worker
->lock
);
975 if (__io_wq_worker_cancel(worker
, match
, worker
->cur_work
) ||
976 __io_wq_worker_cancel(worker
, match
, worker
->next_work
))
978 raw_spin_unlock(&worker
->lock
);
980 return match
->nr_running
&& !match
->cancel_all
;
983 static inline void io_wq_remove_pending(struct io_wq
*wq
,
984 struct io_wq_work
*work
,
985 struct io_wq_work_node
*prev
)
987 struct io_wq_acct
*acct
= io_work_get_acct(wq
, work
);
988 unsigned int hash
= io_get_work_hash(work
);
989 struct io_wq_work
*prev_work
= NULL
;
991 if (io_wq_is_hashed(work
) && work
== wq
->hash_tail
[hash
]) {
993 prev_work
= container_of(prev
, struct io_wq_work
, list
);
994 if (prev_work
&& io_get_work_hash(prev_work
) == hash
)
995 wq
->hash_tail
[hash
] = prev_work
;
997 wq
->hash_tail
[hash
] = NULL
;
999 wq_list_del(&acct
->work_list
, &work
->list
, prev
);
1002 static bool io_acct_cancel_pending_work(struct io_wq
*wq
,
1003 struct io_wq_acct
*acct
,
1004 struct io_cb_cancel_data
*match
)
1006 struct io_wq_work_node
*node
, *prev
;
1007 struct io_wq_work
*work
;
1009 raw_spin_lock(&acct
->lock
);
1010 wq_list_for_each(node
, prev
, &acct
->work_list
) {
1011 work
= container_of(node
, struct io_wq_work
, list
);
1012 if (!match
->fn(work
, match
->data
))
1014 io_wq_remove_pending(wq
, work
, prev
);
1015 raw_spin_unlock(&acct
->lock
);
1016 io_run_cancel(work
, wq
);
1017 match
->nr_pending
++;
1018 /* not safe to continue after unlock */
1021 raw_spin_unlock(&acct
->lock
);
1026 static void io_wq_cancel_pending_work(struct io_wq
*wq
,
1027 struct io_cb_cancel_data
*match
)
1031 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1032 struct io_wq_acct
*acct
= io_get_acct(wq
, i
== 0);
1034 if (io_acct_cancel_pending_work(wq
, acct
, match
)) {
1035 if (match
->cancel_all
)
1042 static void io_wq_cancel_running_work(struct io_wq
*wq
,
1043 struct io_cb_cancel_data
*match
)
1046 io_wq_for_each_worker(wq
, io_wq_worker_cancel
, match
);
1050 enum io_wq_cancel
io_wq_cancel_cb(struct io_wq
*wq
, work_cancel_fn
*cancel
,
1051 void *data
, bool cancel_all
)
1053 struct io_cb_cancel_data match
= {
1056 .cancel_all
= cancel_all
,
1060 * First check pending list, if we're lucky we can just remove it
1061 * from there. CANCEL_OK means that the work is returned as-new,
1062 * no completion will be posted for it.
1064 * Then check if a free (going busy) or busy worker has the work
1065 * currently running. If we find it there, we'll return CANCEL_RUNNING
1066 * as an indication that we attempt to signal cancellation. The
1067 * completion will run normally in this case.
1069 * Do both of these while holding the wq->lock, to ensure that
1070 * we'll find a work item regardless of state.
1072 io_wq_cancel_pending_work(wq
, &match
);
1073 if (match
.nr_pending
&& !match
.cancel_all
)
1074 return IO_WQ_CANCEL_OK
;
1076 raw_spin_lock(&wq
->lock
);
1077 io_wq_cancel_running_work(wq
, &match
);
1078 raw_spin_unlock(&wq
->lock
);
1079 if (match
.nr_running
&& !match
.cancel_all
)
1080 return IO_WQ_CANCEL_RUNNING
;
1082 if (match
.nr_running
)
1083 return IO_WQ_CANCEL_RUNNING
;
1084 if (match
.nr_pending
)
1085 return IO_WQ_CANCEL_OK
;
1086 return IO_WQ_CANCEL_NOTFOUND
;
1089 static int io_wq_hash_wake(struct wait_queue_entry
*wait
, unsigned mode
,
1090 int sync
, void *key
)
1092 struct io_wq
*wq
= container_of(wait
, struct io_wq
, wait
);
1095 list_del_init(&wait
->entry
);
1098 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1099 struct io_wq_acct
*acct
= &wq
->acct
[i
];
1101 if (test_and_clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
))
1102 io_wq_activate_free_worker(wq
, acct
);
1108 struct io_wq
*io_wq_create(unsigned bounded
, struct io_wq_data
*data
)
1113 if (WARN_ON_ONCE(!data
->free_work
|| !data
->do_work
))
1114 return ERR_PTR(-EINVAL
);
1115 if (WARN_ON_ONCE(!bounded
))
1116 return ERR_PTR(-EINVAL
);
1118 wq
= kzalloc(sizeof(struct io_wq
), GFP_KERNEL
);
1120 return ERR_PTR(-ENOMEM
);
1121 ret
= cpuhp_state_add_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1125 refcount_inc(&data
->hash
->refs
);
1126 wq
->hash
= data
->hash
;
1127 wq
->free_work
= data
->free_work
;
1128 wq
->do_work
= data
->do_work
;
1132 if (!alloc_cpumask_var(&wq
->cpu_mask
, GFP_KERNEL
))
1134 cpumask_copy(wq
->cpu_mask
, cpu_possible_mask
);
1135 wq
->acct
[IO_WQ_ACCT_BOUND
].max_workers
= bounded
;
1136 wq
->acct
[IO_WQ_ACCT_UNBOUND
].max_workers
=
1137 task_rlimit(current
, RLIMIT_NPROC
);
1138 INIT_LIST_HEAD(&wq
->wait
.entry
);
1139 wq
->wait
.func
= io_wq_hash_wake
;
1140 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1141 struct io_wq_acct
*acct
= &wq
->acct
[i
];
1144 atomic_set(&acct
->nr_running
, 0);
1145 INIT_WQ_LIST(&acct
->work_list
);
1146 raw_spin_lock_init(&acct
->lock
);
1149 raw_spin_lock_init(&wq
->lock
);
1150 INIT_HLIST_NULLS_HEAD(&wq
->free_list
, 0);
1151 INIT_LIST_HEAD(&wq
->all_list
);
1153 wq
->task
= get_task_struct(data
->task
);
1154 atomic_set(&wq
->worker_refs
, 1);
1155 init_completion(&wq
->worker_done
);
1158 io_wq_put_hash(data
->hash
);
1159 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1161 free_cpumask_var(wq
->cpu_mask
);
1164 return ERR_PTR(ret
);
1167 static bool io_task_work_match(struct callback_head
*cb
, void *data
)
1169 struct io_worker
*worker
;
1171 if (cb
->func
!= create_worker_cb
&& cb
->func
!= create_worker_cont
)
1173 worker
= container_of(cb
, struct io_worker
, create_work
);
1174 return worker
->wq
== data
;
1177 void io_wq_exit_start(struct io_wq
*wq
)
1179 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
1182 static void io_wq_cancel_tw_create(struct io_wq
*wq
)
1184 struct callback_head
*cb
;
1186 while ((cb
= task_work_cancel_match(wq
->task
, io_task_work_match
, wq
)) != NULL
) {
1187 struct io_worker
*worker
;
1189 worker
= container_of(cb
, struct io_worker
, create_work
);
1190 io_worker_cancel_cb(worker
);
1192 * Only the worker continuation helper has worker allocated and
1193 * hence needs freeing.
1195 if (cb
->func
== create_worker_cont
)
1200 static void io_wq_exit_workers(struct io_wq
*wq
)
1205 io_wq_cancel_tw_create(wq
);
1208 io_wq_for_each_worker(wq
, io_wq_worker_wake
, NULL
);
1210 io_worker_ref_put(wq
);
1211 wait_for_completion(&wq
->worker_done
);
1213 spin_lock_irq(&wq
->hash
->wait
.lock
);
1214 list_del_init(&wq
->wait
.entry
);
1215 spin_unlock_irq(&wq
->hash
->wait
.lock
);
1217 put_task_struct(wq
->task
);
1221 static void io_wq_destroy(struct io_wq
*wq
)
1223 struct io_cb_cancel_data match
= {
1224 .fn
= io_wq_work_match_all
,
1228 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1229 io_wq_cancel_pending_work(wq
, &match
);
1230 free_cpumask_var(wq
->cpu_mask
);
1231 io_wq_put_hash(wq
->hash
);
1235 void io_wq_put_and_exit(struct io_wq
*wq
)
1237 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
));
1239 io_wq_exit_workers(wq
);
1243 struct online_data
{
1248 static bool io_wq_worker_affinity(struct io_worker
*worker
, void *data
)
1250 struct online_data
*od
= data
;
1253 cpumask_set_cpu(od
->cpu
, worker
->wq
->cpu_mask
);
1255 cpumask_clear_cpu(od
->cpu
, worker
->wq
->cpu_mask
);
1259 static int __io_wq_cpu_online(struct io_wq
*wq
, unsigned int cpu
, bool online
)
1261 struct online_data od
= {
1267 io_wq_for_each_worker(wq
, io_wq_worker_affinity
, &od
);
1272 static int io_wq_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
1274 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1276 return __io_wq_cpu_online(wq
, cpu
, true);
1279 static int io_wq_cpu_offline(unsigned int cpu
, struct hlist_node
*node
)
1281 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1283 return __io_wq_cpu_online(wq
, cpu
, false);
1286 int io_wq_cpu_affinity(struct io_wq
*wq
, cpumask_var_t mask
)
1290 cpumask_copy(wq
->cpu_mask
, mask
);
1292 cpumask_copy(wq
->cpu_mask
, cpu_possible_mask
);
1299 * Set max number of unbounded workers, returns old value. If new_count is 0,
1300 * then just return the old value.
1302 int io_wq_max_workers(struct io_wq
*wq
, int *new_count
)
1304 struct io_wq_acct
*acct
;
1305 int prev
[IO_WQ_ACCT_NR
];
1308 BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND
!= (int) IO_WQ_BOUND
);
1309 BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND
!= (int) IO_WQ_UNBOUND
);
1310 BUILD_BUG_ON((int) IO_WQ_ACCT_NR
!= 2);
1312 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1313 if (new_count
[i
] > task_rlimit(current
, RLIMIT_NPROC
))
1314 new_count
[i
] = task_rlimit(current
, RLIMIT_NPROC
);
1317 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++)
1322 raw_spin_lock(&wq
->lock
);
1323 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1324 acct
= &wq
->acct
[i
];
1325 prev
[i
] = max_t(int, acct
->max_workers
, prev
[i
]);
1327 acct
->max_workers
= new_count
[i
];
1329 raw_spin_unlock(&wq
->lock
);
1332 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++)
1333 new_count
[i
] = prev
[i
];
1338 static __init
int io_wq_init(void)
1342 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "io-wq/online",
1343 io_wq_cpu_online
, io_wq_cpu_offline
);
1349 subsys_initcall(io_wq_init
);