1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/rculist_nulls.h>
15 #include <linux/cpu.h>
16 #include <linux/task_work.h>
17 #include <linux/audit.h>
18 #include <uapi/linux/io_uring.h>
22 #define WORKER_IDLE_TIMEOUT (5 * HZ)
25 IO_WORKER_F_UP
= 1, /* up and active */
26 IO_WORKER_F_RUNNING
= 2, /* account as running */
27 IO_WORKER_F_FREE
= 4, /* worker on free list */
28 IO_WORKER_F_BOUND
= 8, /* is doing bounded work */
32 IO_WQ_BIT_EXIT
= 0, /* wq exiting */
36 IO_ACCT_STALLED_BIT
= 0, /* stalled on hash */
40 * One for each thread in a wqe pool
45 struct hlist_nulls_node nulls_node
;
46 struct list_head all_list
;
47 struct task_struct
*task
;
50 struct io_wq_work
*cur_work
;
51 struct io_wq_work
*next_work
;
54 struct completion ref_done
;
56 unsigned long create_state
;
57 struct callback_head create_work
;
62 struct work_struct work
;
66 #if BITS_PER_LONG == 64
67 #define IO_WQ_HASH_ORDER 6
69 #define IO_WQ_HASH_ORDER 5
72 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
80 struct io_wq_work_list work_list
;
91 * Per-node worker thread pool
95 struct io_wqe_acct acct
[IO_WQ_ACCT_NR
];
99 struct hlist_nulls_head free_list
;
100 struct list_head all_list
;
102 struct wait_queue_entry wait
;
105 struct io_wq_work
*hash_tail
[IO_WQ_NR_HASH_BUCKETS
];
107 cpumask_var_t cpu_mask
;
116 free_work_fn
*free_work
;
117 io_wq_work_fn
*do_work
;
119 struct io_wq_hash
*hash
;
121 atomic_t worker_refs
;
122 struct completion worker_done
;
124 struct hlist_node cpuhp_node
;
126 struct task_struct
*task
;
128 struct io_wqe
*wqes
[];
131 static enum cpuhp_state io_wq_online
;
133 struct io_cb_cancel_data
{
141 static bool create_io_worker(struct io_wq
*wq
, struct io_wqe
*wqe
, int index
);
142 static void io_wqe_dec_running(struct io_worker
*worker
);
143 static bool io_acct_cancel_pending_work(struct io_wqe
*wqe
,
144 struct io_wqe_acct
*acct
,
145 struct io_cb_cancel_data
*match
);
146 static void create_worker_cb(struct callback_head
*cb
);
147 static void io_wq_cancel_tw_create(struct io_wq
*wq
);
149 static bool io_worker_get(struct io_worker
*worker
)
151 return refcount_inc_not_zero(&worker
->ref
);
154 static void io_worker_release(struct io_worker
*worker
)
156 if (refcount_dec_and_test(&worker
->ref
))
157 complete(&worker
->ref_done
);
160 static inline struct io_wqe_acct
*io_get_acct(struct io_wqe
*wqe
, bool bound
)
162 return &wqe
->acct
[bound
? IO_WQ_ACCT_BOUND
: IO_WQ_ACCT_UNBOUND
];
165 static inline struct io_wqe_acct
*io_work_get_acct(struct io_wqe
*wqe
,
166 struct io_wq_work
*work
)
168 return io_get_acct(wqe
, !(work
->flags
& IO_WQ_WORK_UNBOUND
));
171 static inline struct io_wqe_acct
*io_wqe_get_acct(struct io_worker
*worker
)
173 return io_get_acct(worker
->wqe
, worker
->flags
& IO_WORKER_F_BOUND
);
176 static void io_worker_ref_put(struct io_wq
*wq
)
178 if (atomic_dec_and_test(&wq
->worker_refs
))
179 complete(&wq
->worker_done
);
182 static void io_worker_cancel_cb(struct io_worker
*worker
)
184 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
185 struct io_wqe
*wqe
= worker
->wqe
;
186 struct io_wq
*wq
= wqe
->wq
;
188 atomic_dec(&acct
->nr_running
);
189 raw_spin_lock(&worker
->wqe
->lock
);
191 raw_spin_unlock(&worker
->wqe
->lock
);
192 io_worker_ref_put(wq
);
193 clear_bit_unlock(0, &worker
->create_state
);
194 io_worker_release(worker
);
197 static bool io_task_worker_match(struct callback_head
*cb
, void *data
)
199 struct io_worker
*worker
;
201 if (cb
->func
!= create_worker_cb
)
203 worker
= container_of(cb
, struct io_worker
, create_work
);
204 return worker
== data
;
207 static void io_worker_exit(struct io_worker
*worker
)
209 struct io_wqe
*wqe
= worker
->wqe
;
210 struct io_wq
*wq
= wqe
->wq
;
213 struct callback_head
*cb
= task_work_cancel_match(wq
->task
,
214 io_task_worker_match
, worker
);
218 io_worker_cancel_cb(worker
);
221 io_worker_release(worker
);
222 wait_for_completion(&worker
->ref_done
);
224 raw_spin_lock(&wqe
->lock
);
225 if (worker
->flags
& IO_WORKER_F_FREE
)
226 hlist_nulls_del_rcu(&worker
->nulls_node
);
227 list_del_rcu(&worker
->all_list
);
228 raw_spin_unlock(&wqe
->lock
);
229 io_wqe_dec_running(worker
);
232 current
->flags
&= ~PF_IO_WORKER
;
235 kfree_rcu(worker
, rcu
);
236 io_worker_ref_put(wqe
->wq
);
240 static inline bool io_acct_run_queue(struct io_wqe_acct
*acct
)
244 raw_spin_lock(&acct
->lock
);
245 if (!wq_list_empty(&acct
->work_list
) &&
246 !test_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
))
248 raw_spin_unlock(&acct
->lock
);
254 * Check head of free list for an available worker. If one isn't available,
255 * caller must create one.
257 static bool io_wqe_activate_free_worker(struct io_wqe
*wqe
,
258 struct io_wqe_acct
*acct
)
261 struct hlist_nulls_node
*n
;
262 struct io_worker
*worker
;
265 * Iterate free_list and see if we can find an idle worker to
266 * activate. If a given worker is on the free_list but in the process
267 * of exiting, keep trying.
269 hlist_nulls_for_each_entry_rcu(worker
, n
, &wqe
->free_list
, nulls_node
) {
270 if (!io_worker_get(worker
))
272 if (io_wqe_get_acct(worker
) != acct
) {
273 io_worker_release(worker
);
276 if (wake_up_process(worker
->task
)) {
277 io_worker_release(worker
);
280 io_worker_release(worker
);
287 * We need a worker. If we find a free one, we're good. If not, and we're
288 * below the max number of workers, create one.
290 static bool io_wqe_create_worker(struct io_wqe
*wqe
, struct io_wqe_acct
*acct
)
293 * Most likely an attempt to queue unbounded work on an io_wq that
294 * wasn't setup with any unbounded workers.
296 if (unlikely(!acct
->max_workers
))
297 pr_warn_once("io-wq is not configured for unbound workers");
299 raw_spin_lock(&wqe
->lock
);
300 if (acct
->nr_workers
>= acct
->max_workers
) {
301 raw_spin_unlock(&wqe
->lock
);
305 raw_spin_unlock(&wqe
->lock
);
306 atomic_inc(&acct
->nr_running
);
307 atomic_inc(&wqe
->wq
->worker_refs
);
308 return create_io_worker(wqe
->wq
, wqe
, acct
->index
);
311 static void io_wqe_inc_running(struct io_worker
*worker
)
313 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
315 atomic_inc(&acct
->nr_running
);
318 static void create_worker_cb(struct callback_head
*cb
)
320 struct io_worker
*worker
;
323 struct io_wqe_acct
*acct
;
324 bool do_create
= false;
326 worker
= container_of(cb
, struct io_worker
, create_work
);
329 acct
= &wqe
->acct
[worker
->create_index
];
330 raw_spin_lock(&wqe
->lock
);
331 if (acct
->nr_workers
< acct
->max_workers
) {
335 raw_spin_unlock(&wqe
->lock
);
337 create_io_worker(wq
, wqe
, worker
->create_index
);
339 atomic_dec(&acct
->nr_running
);
340 io_worker_ref_put(wq
);
342 clear_bit_unlock(0, &worker
->create_state
);
343 io_worker_release(worker
);
346 static bool io_queue_worker_create(struct io_worker
*worker
,
347 struct io_wqe_acct
*acct
,
348 task_work_func_t func
)
350 struct io_wqe
*wqe
= worker
->wqe
;
351 struct io_wq
*wq
= wqe
->wq
;
353 /* raced with exit, just ignore create call */
354 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
356 if (!io_worker_get(worker
))
359 * create_state manages ownership of create_work/index. We should
360 * only need one entry per worker, as the worker going to sleep
361 * will trigger the condition, and waking will clear it once it
362 * runs the task_work.
364 if (test_bit(0, &worker
->create_state
) ||
365 test_and_set_bit_lock(0, &worker
->create_state
))
368 atomic_inc(&wq
->worker_refs
);
369 init_task_work(&worker
->create_work
, func
);
370 worker
->create_index
= acct
->index
;
371 if (!task_work_add(wq
->task
, &worker
->create_work
, TWA_SIGNAL
)) {
373 * EXIT may have been set after checking it above, check after
374 * adding the task_work and remove any creation item if it is
375 * now set. wq exit does that too, but we can have added this
376 * work item after we canceled in io_wq_exit_workers().
378 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
379 io_wq_cancel_tw_create(wq
);
380 io_worker_ref_put(wq
);
383 io_worker_ref_put(wq
);
384 clear_bit_unlock(0, &worker
->create_state
);
386 io_worker_release(worker
);
388 atomic_dec(&acct
->nr_running
);
389 io_worker_ref_put(wq
);
393 static void io_wqe_dec_running(struct io_worker
*worker
)
395 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
396 struct io_wqe
*wqe
= worker
->wqe
;
398 if (!(worker
->flags
& IO_WORKER_F_UP
))
401 if (!atomic_dec_and_test(&acct
->nr_running
))
403 if (!io_acct_run_queue(acct
))
406 atomic_inc(&acct
->nr_running
);
407 atomic_inc(&wqe
->wq
->worker_refs
);
408 io_queue_worker_create(worker
, acct
, create_worker_cb
);
412 * Worker will start processing some work. Move it to the busy list, if
413 * it's currently on the freelist
415 static void __io_worker_busy(struct io_wqe
*wqe
, struct io_worker
*worker
)
417 if (worker
->flags
& IO_WORKER_F_FREE
) {
418 worker
->flags
&= ~IO_WORKER_F_FREE
;
419 raw_spin_lock(&wqe
->lock
);
420 hlist_nulls_del_init_rcu(&worker
->nulls_node
);
421 raw_spin_unlock(&wqe
->lock
);
426 * No work, worker going to sleep. Move to freelist, and unuse mm if we
427 * have one attached. Dropping the mm may potentially sleep, so we drop
428 * the lock in that case and return success. Since the caller has to
429 * retry the loop in that case (we changed task state), we don't regrab
430 * the lock if we return success.
432 static void __io_worker_idle(struct io_wqe
*wqe
, struct io_worker
*worker
)
433 __must_hold(wqe
->lock
)
435 if (!(worker
->flags
& IO_WORKER_F_FREE
)) {
436 worker
->flags
|= IO_WORKER_F_FREE
;
437 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
441 static inline unsigned int io_get_work_hash(struct io_wq_work
*work
)
443 return work
->flags
>> IO_WQ_HASH_SHIFT
;
446 static bool io_wait_on_hash(struct io_wqe
*wqe
, unsigned int hash
)
448 struct io_wq
*wq
= wqe
->wq
;
451 spin_lock_irq(&wq
->hash
->wait
.lock
);
452 if (list_empty(&wqe
->wait
.entry
)) {
453 __add_wait_queue(&wq
->hash
->wait
, &wqe
->wait
);
454 if (!test_bit(hash
, &wq
->hash
->map
)) {
455 __set_current_state(TASK_RUNNING
);
456 list_del_init(&wqe
->wait
.entry
);
460 spin_unlock_irq(&wq
->hash
->wait
.lock
);
464 static struct io_wq_work
*io_get_next_work(struct io_wqe_acct
*acct
,
465 struct io_worker
*worker
)
466 __must_hold(acct
->lock
)
468 struct io_wq_work_node
*node
, *prev
;
469 struct io_wq_work
*work
, *tail
;
470 unsigned int stall_hash
= -1U;
471 struct io_wqe
*wqe
= worker
->wqe
;
473 wq_list_for_each(node
, prev
, &acct
->work_list
) {
476 work
= container_of(node
, struct io_wq_work
, list
);
478 /* not hashed, can run anytime */
479 if (!io_wq_is_hashed(work
)) {
480 wq_list_del(&acct
->work_list
, node
, prev
);
484 hash
= io_get_work_hash(work
);
485 /* all items with this hash lie in [work, tail] */
486 tail
= wqe
->hash_tail
[hash
];
488 /* hashed, can run if not already running */
489 if (!test_and_set_bit(hash
, &wqe
->wq
->hash
->map
)) {
490 wqe
->hash_tail
[hash
] = NULL
;
491 wq_list_cut(&acct
->work_list
, &tail
->list
, prev
);
494 if (stall_hash
== -1U)
496 /* fast forward to a next hash, for-each will fix up @prev */
500 if (stall_hash
!= -1U) {
504 * Set this before dropping the lock to avoid racing with new
505 * work being added and clearing the stalled bit.
507 set_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
508 raw_spin_unlock(&acct
->lock
);
509 unstalled
= io_wait_on_hash(wqe
, stall_hash
);
510 raw_spin_lock(&acct
->lock
);
512 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
513 if (wq_has_sleeper(&wqe
->wq
->hash
->wait
))
514 wake_up(&wqe
->wq
->hash
->wait
);
521 static bool io_flush_signals(void)
523 if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL
))) {
524 __set_current_state(TASK_RUNNING
);
525 clear_notify_signal();
526 if (task_work_pending(current
))
533 static void io_assign_current_work(struct io_worker
*worker
,
534 struct io_wq_work
*work
)
541 raw_spin_lock(&worker
->lock
);
542 worker
->cur_work
= work
;
543 worker
->next_work
= NULL
;
544 raw_spin_unlock(&worker
->lock
);
547 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
);
549 static void io_worker_handle_work(struct io_worker
*worker
)
551 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
552 struct io_wqe
*wqe
= worker
->wqe
;
553 struct io_wq
*wq
= wqe
->wq
;
554 bool do_kill
= test_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
557 struct io_wq_work
*work
;
560 * If we got some work, mark us as busy. If we didn't, but
561 * the list isn't empty, it means we stalled on hashed work.
562 * Mark us stalled so we don't keep looking for work when we
563 * can't make progress, any work completion or insertion will
564 * clear the stalled flag.
566 raw_spin_lock(&acct
->lock
);
567 work
= io_get_next_work(acct
, worker
);
568 raw_spin_unlock(&acct
->lock
);
570 __io_worker_busy(wqe
, worker
);
573 * Make sure cancelation can find this, even before
574 * it becomes the active work. That avoids a window
575 * where the work has been removed from our general
576 * work list, but isn't yet discoverable as the
577 * current work item for this worker.
579 raw_spin_lock(&worker
->lock
);
580 worker
->next_work
= work
;
581 raw_spin_unlock(&worker
->lock
);
585 io_assign_current_work(worker
, work
);
586 __set_current_state(TASK_RUNNING
);
588 /* handle a whole dependent link */
590 struct io_wq_work
*next_hashed
, *linked
;
591 unsigned int hash
= io_get_work_hash(work
);
593 next_hashed
= wq_next_work(work
);
595 if (unlikely(do_kill
) && (work
->flags
& IO_WQ_WORK_UNBOUND
))
596 work
->flags
|= IO_WQ_WORK_CANCEL
;
598 io_assign_current_work(worker
, NULL
);
600 linked
= wq
->free_work(work
);
602 if (!work
&& linked
&& !io_wq_is_hashed(linked
)) {
606 io_assign_current_work(worker
, work
);
608 io_wqe_enqueue(wqe
, linked
);
610 if (hash
!= -1U && !next_hashed
) {
611 /* serialize hash clear with wake_up() */
612 spin_lock_irq(&wq
->hash
->wait
.lock
);
613 clear_bit(hash
, &wq
->hash
->map
);
614 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
615 spin_unlock_irq(&wq
->hash
->wait
.lock
);
616 if (wq_has_sleeper(&wq
->hash
->wait
))
617 wake_up(&wq
->hash
->wait
);
623 static int io_wqe_worker(void *data
)
625 struct io_worker
*worker
= data
;
626 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
627 struct io_wqe
*wqe
= worker
->wqe
;
628 struct io_wq
*wq
= wqe
->wq
;
629 bool last_timeout
= false;
630 char buf
[TASK_COMM_LEN
];
632 worker
->flags
|= (IO_WORKER_F_UP
| IO_WORKER_F_RUNNING
);
634 snprintf(buf
, sizeof(buf
), "iou-wrk-%d", wq
->task
->pid
);
635 set_task_comm(current
, buf
);
637 audit_alloc_kernel(current
);
639 while (!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
642 set_current_state(TASK_INTERRUPTIBLE
);
643 while (io_acct_run_queue(acct
))
644 io_worker_handle_work(worker
);
646 raw_spin_lock(&wqe
->lock
);
647 /* timed out, exit unless we're the last worker */
648 if (last_timeout
&& acct
->nr_workers
> 1) {
650 raw_spin_unlock(&wqe
->lock
);
651 __set_current_state(TASK_RUNNING
);
654 last_timeout
= false;
655 __io_worker_idle(wqe
, worker
);
656 raw_spin_unlock(&wqe
->lock
);
657 if (io_flush_signals())
659 ret
= schedule_timeout(WORKER_IDLE_TIMEOUT
);
660 if (signal_pending(current
)) {
663 if (!get_signal(&ksig
))
670 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
671 io_worker_handle_work(worker
);
674 io_worker_exit(worker
);
679 * Called when a worker is scheduled in. Mark us as currently running.
681 void io_wq_worker_running(struct task_struct
*tsk
)
683 struct io_worker
*worker
= tsk
->worker_private
;
687 if (!(worker
->flags
& IO_WORKER_F_UP
))
689 if (worker
->flags
& IO_WORKER_F_RUNNING
)
691 worker
->flags
|= IO_WORKER_F_RUNNING
;
692 io_wqe_inc_running(worker
);
696 * Called when worker is going to sleep. If there are no workers currently
697 * running and we have work pending, wake up a free one or create a new one.
699 void io_wq_worker_sleeping(struct task_struct
*tsk
)
701 struct io_worker
*worker
= tsk
->worker_private
;
705 if (!(worker
->flags
& IO_WORKER_F_UP
))
707 if (!(worker
->flags
& IO_WORKER_F_RUNNING
))
710 worker
->flags
&= ~IO_WORKER_F_RUNNING
;
711 io_wqe_dec_running(worker
);
714 static void io_init_new_worker(struct io_wqe
*wqe
, struct io_worker
*worker
,
715 struct task_struct
*tsk
)
717 tsk
->worker_private
= worker
;
719 set_cpus_allowed_ptr(tsk
, wqe
->cpu_mask
);
720 tsk
->flags
|= PF_NO_SETAFFINITY
;
722 raw_spin_lock(&wqe
->lock
);
723 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
724 list_add_tail_rcu(&worker
->all_list
, &wqe
->all_list
);
725 worker
->flags
|= IO_WORKER_F_FREE
;
726 raw_spin_unlock(&wqe
->lock
);
727 wake_up_new_task(tsk
);
730 static bool io_wq_work_match_all(struct io_wq_work
*work
, void *data
)
735 static inline bool io_should_retry_thread(long err
)
738 * Prevent perpetual task_work retry, if the task (or its group) is
741 if (fatal_signal_pending(current
))
747 case -ERESTARTNOINTR
:
748 case -ERESTARTNOHAND
:
755 static void create_worker_cont(struct callback_head
*cb
)
757 struct io_worker
*worker
;
758 struct task_struct
*tsk
;
761 worker
= container_of(cb
, struct io_worker
, create_work
);
762 clear_bit_unlock(0, &worker
->create_state
);
764 tsk
= create_io_thread(io_wqe_worker
, worker
, wqe
->node
);
766 io_init_new_worker(wqe
, worker
, tsk
);
767 io_worker_release(worker
);
769 } else if (!io_should_retry_thread(PTR_ERR(tsk
))) {
770 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
772 atomic_dec(&acct
->nr_running
);
773 raw_spin_lock(&wqe
->lock
);
775 if (!acct
->nr_workers
) {
776 struct io_cb_cancel_data match
= {
777 .fn
= io_wq_work_match_all
,
781 raw_spin_unlock(&wqe
->lock
);
782 while (io_acct_cancel_pending_work(wqe
, acct
, &match
))
785 raw_spin_unlock(&wqe
->lock
);
787 io_worker_ref_put(wqe
->wq
);
792 /* re-create attempts grab a new worker ref, drop the existing one */
793 io_worker_release(worker
);
794 schedule_work(&worker
->work
);
797 static void io_workqueue_create(struct work_struct
*work
)
799 struct io_worker
*worker
= container_of(work
, struct io_worker
, work
);
800 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
802 if (!io_queue_worker_create(worker
, acct
, create_worker_cont
))
806 static bool create_io_worker(struct io_wq
*wq
, struct io_wqe
*wqe
, int index
)
808 struct io_wqe_acct
*acct
= &wqe
->acct
[index
];
809 struct io_worker
*worker
;
810 struct task_struct
*tsk
;
812 __set_current_state(TASK_RUNNING
);
814 worker
= kzalloc_node(sizeof(*worker
), GFP_KERNEL
, wqe
->node
);
817 atomic_dec(&acct
->nr_running
);
818 raw_spin_lock(&wqe
->lock
);
820 raw_spin_unlock(&wqe
->lock
);
821 io_worker_ref_put(wq
);
825 refcount_set(&worker
->ref
, 1);
827 raw_spin_lock_init(&worker
->lock
);
828 init_completion(&worker
->ref_done
);
830 if (index
== IO_WQ_ACCT_BOUND
)
831 worker
->flags
|= IO_WORKER_F_BOUND
;
833 tsk
= create_io_thread(io_wqe_worker
, worker
, wqe
->node
);
835 io_init_new_worker(wqe
, worker
, tsk
);
836 } else if (!io_should_retry_thread(PTR_ERR(tsk
))) {
840 INIT_WORK(&worker
->work
, io_workqueue_create
);
841 schedule_work(&worker
->work
);
848 * Iterate the passed in list and call the specific function for each
849 * worker that isn't exiting
851 static bool io_wq_for_each_worker(struct io_wqe
*wqe
,
852 bool (*func
)(struct io_worker
*, void *),
855 struct io_worker
*worker
;
858 list_for_each_entry_rcu(worker
, &wqe
->all_list
, all_list
) {
859 if (io_worker_get(worker
)) {
860 /* no task if node is/was offline */
862 ret
= func(worker
, data
);
863 io_worker_release(worker
);
872 static bool io_wq_worker_wake(struct io_worker
*worker
, void *data
)
874 __set_notify_signal(worker
->task
);
875 wake_up_process(worker
->task
);
879 static void io_run_cancel(struct io_wq_work
*work
, struct io_wqe
*wqe
)
881 struct io_wq
*wq
= wqe
->wq
;
884 work
->flags
|= IO_WQ_WORK_CANCEL
;
886 work
= wq
->free_work(work
);
890 static void io_wqe_insert_work(struct io_wqe
*wqe
, struct io_wq_work
*work
)
892 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
894 struct io_wq_work
*tail
;
896 if (!io_wq_is_hashed(work
)) {
898 wq_list_add_tail(&work
->list
, &acct
->work_list
);
902 hash
= io_get_work_hash(work
);
903 tail
= wqe
->hash_tail
[hash
];
904 wqe
->hash_tail
[hash
] = work
;
908 wq_list_add_after(&work
->list
, &tail
->list
, &acct
->work_list
);
911 static bool io_wq_work_match_item(struct io_wq_work
*work
, void *data
)
916 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
)
918 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
919 struct io_cb_cancel_data match
;
920 unsigned work_flags
= work
->flags
;
924 * If io-wq is exiting for this task, or if the request has explicitly
925 * been marked as one that should not get executed, cancel it here.
927 if (test_bit(IO_WQ_BIT_EXIT
, &wqe
->wq
->state
) ||
928 (work
->flags
& IO_WQ_WORK_CANCEL
)) {
929 io_run_cancel(work
, wqe
);
933 raw_spin_lock(&acct
->lock
);
934 io_wqe_insert_work(wqe
, work
);
935 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
936 raw_spin_unlock(&acct
->lock
);
938 raw_spin_lock(&wqe
->lock
);
940 do_create
= !io_wqe_activate_free_worker(wqe
, acct
);
943 raw_spin_unlock(&wqe
->lock
);
945 if (do_create
&& ((work_flags
& IO_WQ_WORK_CONCURRENT
) ||
946 !atomic_read(&acct
->nr_running
))) {
949 did_create
= io_wqe_create_worker(wqe
, acct
);
950 if (likely(did_create
))
953 raw_spin_lock(&wqe
->lock
);
954 if (acct
->nr_workers
) {
955 raw_spin_unlock(&wqe
->lock
);
958 raw_spin_unlock(&wqe
->lock
);
960 /* fatal condition, failed to create the first worker */
961 match
.fn
= io_wq_work_match_item
,
963 match
.cancel_all
= false,
965 io_acct_cancel_pending_work(wqe
, acct
, &match
);
969 void io_wq_enqueue(struct io_wq
*wq
, struct io_wq_work
*work
)
971 struct io_wqe
*wqe
= wq
->wqes
[numa_node_id()];
973 io_wqe_enqueue(wqe
, work
);
977 * Work items that hash to the same value will not be done in parallel.
978 * Used to limit concurrent writes, generally hashed by inode.
980 void io_wq_hash_work(struct io_wq_work
*work
, void *val
)
984 bit
= hash_ptr(val
, IO_WQ_HASH_ORDER
);
985 work
->flags
|= (IO_WQ_WORK_HASHED
| (bit
<< IO_WQ_HASH_SHIFT
));
988 static bool __io_wq_worker_cancel(struct io_worker
*worker
,
989 struct io_cb_cancel_data
*match
,
990 struct io_wq_work
*work
)
992 if (work
&& match
->fn(work
, match
->data
)) {
993 work
->flags
|= IO_WQ_WORK_CANCEL
;
994 __set_notify_signal(worker
->task
);
1001 static bool io_wq_worker_cancel(struct io_worker
*worker
, void *data
)
1003 struct io_cb_cancel_data
*match
= data
;
1006 * Hold the lock to avoid ->cur_work going out of scope, caller
1007 * may dereference the passed in work.
1009 raw_spin_lock(&worker
->lock
);
1010 if (__io_wq_worker_cancel(worker
, match
, worker
->cur_work
) ||
1011 __io_wq_worker_cancel(worker
, match
, worker
->next_work
))
1012 match
->nr_running
++;
1013 raw_spin_unlock(&worker
->lock
);
1015 return match
->nr_running
&& !match
->cancel_all
;
1018 static inline void io_wqe_remove_pending(struct io_wqe
*wqe
,
1019 struct io_wq_work
*work
,
1020 struct io_wq_work_node
*prev
)
1022 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
1023 unsigned int hash
= io_get_work_hash(work
);
1024 struct io_wq_work
*prev_work
= NULL
;
1026 if (io_wq_is_hashed(work
) && work
== wqe
->hash_tail
[hash
]) {
1028 prev_work
= container_of(prev
, struct io_wq_work
, list
);
1029 if (prev_work
&& io_get_work_hash(prev_work
) == hash
)
1030 wqe
->hash_tail
[hash
] = prev_work
;
1032 wqe
->hash_tail
[hash
] = NULL
;
1034 wq_list_del(&acct
->work_list
, &work
->list
, prev
);
1037 static bool io_acct_cancel_pending_work(struct io_wqe
*wqe
,
1038 struct io_wqe_acct
*acct
,
1039 struct io_cb_cancel_data
*match
)
1041 struct io_wq_work_node
*node
, *prev
;
1042 struct io_wq_work
*work
;
1044 raw_spin_lock(&acct
->lock
);
1045 wq_list_for_each(node
, prev
, &acct
->work_list
) {
1046 work
= container_of(node
, struct io_wq_work
, list
);
1047 if (!match
->fn(work
, match
->data
))
1049 io_wqe_remove_pending(wqe
, work
, prev
);
1050 raw_spin_unlock(&acct
->lock
);
1051 io_run_cancel(work
, wqe
);
1052 match
->nr_pending
++;
1053 /* not safe to continue after unlock */
1056 raw_spin_unlock(&acct
->lock
);
1061 static void io_wqe_cancel_pending_work(struct io_wqe
*wqe
,
1062 struct io_cb_cancel_data
*match
)
1066 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1067 struct io_wqe_acct
*acct
= io_get_acct(wqe
, i
== 0);
1069 if (io_acct_cancel_pending_work(wqe
, acct
, match
)) {
1070 if (match
->cancel_all
)
1077 static void io_wqe_cancel_running_work(struct io_wqe
*wqe
,
1078 struct io_cb_cancel_data
*match
)
1081 io_wq_for_each_worker(wqe
, io_wq_worker_cancel
, match
);
1085 enum io_wq_cancel
io_wq_cancel_cb(struct io_wq
*wq
, work_cancel_fn
*cancel
,
1086 void *data
, bool cancel_all
)
1088 struct io_cb_cancel_data match
= {
1091 .cancel_all
= cancel_all
,
1096 * First check pending list, if we're lucky we can just remove it
1097 * from there. CANCEL_OK means that the work is returned as-new,
1098 * no completion will be posted for it.
1100 * Then check if a free (going busy) or busy worker has the work
1101 * currently running. If we find it there, we'll return CANCEL_RUNNING
1102 * as an indication that we attempt to signal cancellation. The
1103 * completion will run normally in this case.
1105 * Do both of these while holding the wqe->lock, to ensure that
1106 * we'll find a work item regardless of state.
1108 for_each_node(node
) {
1109 struct io_wqe
*wqe
= wq
->wqes
[node
];
1111 io_wqe_cancel_pending_work(wqe
, &match
);
1112 if (match
.nr_pending
&& !match
.cancel_all
)
1113 return IO_WQ_CANCEL_OK
;
1115 raw_spin_lock(&wqe
->lock
);
1116 io_wqe_cancel_running_work(wqe
, &match
);
1117 raw_spin_unlock(&wqe
->lock
);
1118 if (match
.nr_running
&& !match
.cancel_all
)
1119 return IO_WQ_CANCEL_RUNNING
;
1122 if (match
.nr_running
)
1123 return IO_WQ_CANCEL_RUNNING
;
1124 if (match
.nr_pending
)
1125 return IO_WQ_CANCEL_OK
;
1126 return IO_WQ_CANCEL_NOTFOUND
;
1129 static int io_wqe_hash_wake(struct wait_queue_entry
*wait
, unsigned mode
,
1130 int sync
, void *key
)
1132 struct io_wqe
*wqe
= container_of(wait
, struct io_wqe
, wait
);
1135 list_del_init(&wait
->entry
);
1138 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1139 struct io_wqe_acct
*acct
= &wqe
->acct
[i
];
1141 if (test_and_clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
))
1142 io_wqe_activate_free_worker(wqe
, acct
);
1148 struct io_wq
*io_wq_create(unsigned bounded
, struct io_wq_data
*data
)
1153 if (WARN_ON_ONCE(!data
->free_work
|| !data
->do_work
))
1154 return ERR_PTR(-EINVAL
);
1155 if (WARN_ON_ONCE(!bounded
))
1156 return ERR_PTR(-EINVAL
);
1158 wq
= kzalloc(struct_size(wq
, wqes
, nr_node_ids
), GFP_KERNEL
);
1160 return ERR_PTR(-ENOMEM
);
1161 ret
= cpuhp_state_add_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1165 refcount_inc(&data
->hash
->refs
);
1166 wq
->hash
= data
->hash
;
1167 wq
->free_work
= data
->free_work
;
1168 wq
->do_work
= data
->do_work
;
1171 for_each_node(node
) {
1173 int alloc_node
= node
;
1175 if (!node_online(alloc_node
))
1176 alloc_node
= NUMA_NO_NODE
;
1177 wqe
= kzalloc_node(sizeof(struct io_wqe
), GFP_KERNEL
, alloc_node
);
1180 if (!alloc_cpumask_var(&wqe
->cpu_mask
, GFP_KERNEL
))
1182 cpumask_copy(wqe
->cpu_mask
, cpumask_of_node(node
));
1183 wq
->wqes
[node
] = wqe
;
1184 wqe
->node
= alloc_node
;
1185 wqe
->acct
[IO_WQ_ACCT_BOUND
].max_workers
= bounded
;
1186 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].max_workers
=
1187 task_rlimit(current
, RLIMIT_NPROC
);
1188 INIT_LIST_HEAD(&wqe
->wait
.entry
);
1189 wqe
->wait
.func
= io_wqe_hash_wake
;
1190 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1191 struct io_wqe_acct
*acct
= &wqe
->acct
[i
];
1194 atomic_set(&acct
->nr_running
, 0);
1195 INIT_WQ_LIST(&acct
->work_list
);
1196 raw_spin_lock_init(&acct
->lock
);
1199 raw_spin_lock_init(&wqe
->lock
);
1200 INIT_HLIST_NULLS_HEAD(&wqe
->free_list
, 0);
1201 INIT_LIST_HEAD(&wqe
->all_list
);
1204 wq
->task
= get_task_struct(data
->task
);
1205 atomic_set(&wq
->worker_refs
, 1);
1206 init_completion(&wq
->worker_done
);
1209 io_wq_put_hash(data
->hash
);
1210 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1211 for_each_node(node
) {
1212 if (!wq
->wqes
[node
])
1214 free_cpumask_var(wq
->wqes
[node
]->cpu_mask
);
1215 kfree(wq
->wqes
[node
]);
1219 return ERR_PTR(ret
);
1222 static bool io_task_work_match(struct callback_head
*cb
, void *data
)
1224 struct io_worker
*worker
;
1226 if (cb
->func
!= create_worker_cb
&& cb
->func
!= create_worker_cont
)
1228 worker
= container_of(cb
, struct io_worker
, create_work
);
1229 return worker
->wqe
->wq
== data
;
1232 void io_wq_exit_start(struct io_wq
*wq
)
1234 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
1237 static void io_wq_cancel_tw_create(struct io_wq
*wq
)
1239 struct callback_head
*cb
;
1241 while ((cb
= task_work_cancel_match(wq
->task
, io_task_work_match
, wq
)) != NULL
) {
1242 struct io_worker
*worker
;
1244 worker
= container_of(cb
, struct io_worker
, create_work
);
1245 io_worker_cancel_cb(worker
);
1249 static void io_wq_exit_workers(struct io_wq
*wq
)
1256 io_wq_cancel_tw_create(wq
);
1259 for_each_node(node
) {
1260 struct io_wqe
*wqe
= wq
->wqes
[node
];
1262 io_wq_for_each_worker(wqe
, io_wq_worker_wake
, NULL
);
1265 io_worker_ref_put(wq
);
1266 wait_for_completion(&wq
->worker_done
);
1268 for_each_node(node
) {
1269 spin_lock_irq(&wq
->hash
->wait
.lock
);
1270 list_del_init(&wq
->wqes
[node
]->wait
.entry
);
1271 spin_unlock_irq(&wq
->hash
->wait
.lock
);
1273 put_task_struct(wq
->task
);
1277 static void io_wq_destroy(struct io_wq
*wq
)
1281 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1283 for_each_node(node
) {
1284 struct io_wqe
*wqe
= wq
->wqes
[node
];
1285 struct io_cb_cancel_data match
= {
1286 .fn
= io_wq_work_match_all
,
1289 io_wqe_cancel_pending_work(wqe
, &match
);
1290 free_cpumask_var(wqe
->cpu_mask
);
1293 io_wq_put_hash(wq
->hash
);
1297 void io_wq_put_and_exit(struct io_wq
*wq
)
1299 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
));
1301 io_wq_exit_workers(wq
);
1305 struct online_data
{
1310 static bool io_wq_worker_affinity(struct io_worker
*worker
, void *data
)
1312 struct online_data
*od
= data
;
1315 cpumask_set_cpu(od
->cpu
, worker
->wqe
->cpu_mask
);
1317 cpumask_clear_cpu(od
->cpu
, worker
->wqe
->cpu_mask
);
1321 static int __io_wq_cpu_online(struct io_wq
*wq
, unsigned int cpu
, bool online
)
1323 struct online_data od
= {
1331 io_wq_for_each_worker(wq
->wqes
[i
], io_wq_worker_affinity
, &od
);
1336 static int io_wq_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
1338 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1340 return __io_wq_cpu_online(wq
, cpu
, true);
1343 static int io_wq_cpu_offline(unsigned int cpu
, struct hlist_node
*node
)
1345 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1347 return __io_wq_cpu_online(wq
, cpu
, false);
1350 int io_wq_cpu_affinity(struct io_wq
*wq
, cpumask_var_t mask
)
1356 struct io_wqe
*wqe
= wq
->wqes
[i
];
1359 cpumask_copy(wqe
->cpu_mask
, mask
);
1361 cpumask_copy(wqe
->cpu_mask
, cpumask_of_node(i
));
1368 * Set max number of unbounded workers, returns old value. If new_count is 0,
1369 * then just return the old value.
1371 int io_wq_max_workers(struct io_wq
*wq
, int *new_count
)
1373 int prev
[IO_WQ_ACCT_NR
];
1374 bool first_node
= true;
1377 BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND
!= (int) IO_WQ_BOUND
);
1378 BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND
!= (int) IO_WQ_UNBOUND
);
1379 BUILD_BUG_ON((int) IO_WQ_ACCT_NR
!= 2);
1381 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1382 if (new_count
[i
] > task_rlimit(current
, RLIMIT_NPROC
))
1383 new_count
[i
] = task_rlimit(current
, RLIMIT_NPROC
);
1386 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++)
1390 for_each_node(node
) {
1391 struct io_wqe
*wqe
= wq
->wqes
[node
];
1392 struct io_wqe_acct
*acct
;
1394 raw_spin_lock(&wqe
->lock
);
1395 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1396 acct
= &wqe
->acct
[i
];
1398 prev
[i
] = max_t(int, acct
->max_workers
, prev
[i
]);
1400 acct
->max_workers
= new_count
[i
];
1402 raw_spin_unlock(&wqe
->lock
);
1407 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++)
1408 new_count
[i
] = prev
[i
];
1413 static __init
int io_wq_init(void)
1417 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "io-wq/online",
1418 io_wq_cpu_online
, io_wq_cpu_offline
);
1424 subsys_initcall(io_wq_init
);