1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/rculist_nulls.h>
15 #include <linux/cpu.h>
16 #include <linux/task_work.h>
17 #include <linux/audit.h>
18 #include <linux/mmu_context.h>
19 #include <uapi/linux/io_uring.h>
25 #define WORKER_IDLE_TIMEOUT (5 * HZ)
28 IO_WORKER_F_UP
= 1, /* up and active */
29 IO_WORKER_F_RUNNING
= 2, /* account as running */
30 IO_WORKER_F_FREE
= 4, /* worker on free list */
31 IO_WORKER_F_BOUND
= 8, /* is doing bounded work */
35 IO_WQ_BIT_EXIT
= 0, /* wq exiting */
39 IO_ACCT_STALLED_BIT
= 0, /* stalled on hash */
43 * One for each thread in a wq pool
48 struct hlist_nulls_node nulls_node
;
49 struct list_head all_list
;
50 struct task_struct
*task
;
53 struct io_wq_work
*cur_work
;
54 struct io_wq_work
*next_work
;
57 struct completion ref_done
;
59 unsigned long create_state
;
60 struct callback_head create_work
;
65 struct work_struct work
;
69 #if BITS_PER_LONG == 64
70 #define IO_WQ_HASH_ORDER 6
72 #define IO_WQ_HASH_ORDER 5
75 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
83 struct io_wq_work_list work_list
;
99 free_work_fn
*free_work
;
100 io_wq_work_fn
*do_work
;
102 struct io_wq_hash
*hash
;
104 atomic_t worker_refs
;
105 struct completion worker_done
;
107 struct hlist_node cpuhp_node
;
109 struct task_struct
*task
;
111 struct io_wq_acct acct
[IO_WQ_ACCT_NR
];
113 /* lock protects access to elements below */
116 struct hlist_nulls_head free_list
;
117 struct list_head all_list
;
119 struct wait_queue_entry wait
;
121 struct io_wq_work
*hash_tail
[IO_WQ_NR_HASH_BUCKETS
];
123 cpumask_var_t cpu_mask
;
126 static enum cpuhp_state io_wq_online
;
128 struct io_cb_cancel_data
{
136 static bool create_io_worker(struct io_wq
*wq
, int index
);
137 static void io_wq_dec_running(struct io_worker
*worker
);
138 static bool io_acct_cancel_pending_work(struct io_wq
*wq
,
139 struct io_wq_acct
*acct
,
140 struct io_cb_cancel_data
*match
);
141 static void create_worker_cb(struct callback_head
*cb
);
142 static void io_wq_cancel_tw_create(struct io_wq
*wq
);
144 static bool io_worker_get(struct io_worker
*worker
)
146 return refcount_inc_not_zero(&worker
->ref
);
149 static void io_worker_release(struct io_worker
*worker
)
151 if (refcount_dec_and_test(&worker
->ref
))
152 complete(&worker
->ref_done
);
155 static inline struct io_wq_acct
*io_get_acct(struct io_wq
*wq
, bool bound
)
157 return &wq
->acct
[bound
? IO_WQ_ACCT_BOUND
: IO_WQ_ACCT_UNBOUND
];
160 static inline struct io_wq_acct
*io_work_get_acct(struct io_wq
*wq
,
161 struct io_wq_work
*work
)
163 return io_get_acct(wq
, !(work
->flags
& IO_WQ_WORK_UNBOUND
));
166 static inline struct io_wq_acct
*io_wq_get_acct(struct io_worker
*worker
)
168 return io_get_acct(worker
->wq
, worker
->flags
& IO_WORKER_F_BOUND
);
171 static void io_worker_ref_put(struct io_wq
*wq
)
173 if (atomic_dec_and_test(&wq
->worker_refs
))
174 complete(&wq
->worker_done
);
177 static void io_worker_cancel_cb(struct io_worker
*worker
)
179 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
180 struct io_wq
*wq
= worker
->wq
;
182 atomic_dec(&acct
->nr_running
);
183 raw_spin_lock(&wq
->lock
);
185 raw_spin_unlock(&wq
->lock
);
186 io_worker_ref_put(wq
);
187 clear_bit_unlock(0, &worker
->create_state
);
188 io_worker_release(worker
);
191 static bool io_task_worker_match(struct callback_head
*cb
, void *data
)
193 struct io_worker
*worker
;
195 if (cb
->func
!= create_worker_cb
)
197 worker
= container_of(cb
, struct io_worker
, create_work
);
198 return worker
== data
;
201 static void io_worker_exit(struct io_worker
*worker
)
203 struct io_wq
*wq
= worker
->wq
;
206 struct callback_head
*cb
= task_work_cancel_match(wq
->task
,
207 io_task_worker_match
, worker
);
211 io_worker_cancel_cb(worker
);
214 io_worker_release(worker
);
215 wait_for_completion(&worker
->ref_done
);
217 raw_spin_lock(&wq
->lock
);
218 if (worker
->flags
& IO_WORKER_F_FREE
)
219 hlist_nulls_del_rcu(&worker
->nulls_node
);
220 list_del_rcu(&worker
->all_list
);
221 raw_spin_unlock(&wq
->lock
);
222 io_wq_dec_running(worker
);
224 * this worker is a goner, clear ->worker_private to avoid any
225 * inc/dec running calls that could happen as part of exit from
228 current
->worker_private
= NULL
;
230 kfree_rcu(worker
, rcu
);
231 io_worker_ref_put(wq
);
235 static inline bool io_acct_run_queue(struct io_wq_acct
*acct
)
239 raw_spin_lock(&acct
->lock
);
240 if (!wq_list_empty(&acct
->work_list
) &&
241 !test_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
))
243 raw_spin_unlock(&acct
->lock
);
249 * Check head of free list for an available worker. If one isn't available,
250 * caller must create one.
252 static bool io_wq_activate_free_worker(struct io_wq
*wq
,
253 struct io_wq_acct
*acct
)
256 struct hlist_nulls_node
*n
;
257 struct io_worker
*worker
;
260 * Iterate free_list and see if we can find an idle worker to
261 * activate. If a given worker is on the free_list but in the process
262 * of exiting, keep trying.
264 hlist_nulls_for_each_entry_rcu(worker
, n
, &wq
->free_list
, nulls_node
) {
265 if (!io_worker_get(worker
))
267 if (io_wq_get_acct(worker
) != acct
) {
268 io_worker_release(worker
);
271 if (wake_up_process(worker
->task
)) {
272 io_worker_release(worker
);
275 io_worker_release(worker
);
282 * We need a worker. If we find a free one, we're good. If not, and we're
283 * below the max number of workers, create one.
285 static bool io_wq_create_worker(struct io_wq
*wq
, struct io_wq_acct
*acct
)
288 * Most likely an attempt to queue unbounded work on an io_wq that
289 * wasn't setup with any unbounded workers.
291 if (unlikely(!acct
->max_workers
))
292 pr_warn_once("io-wq is not configured for unbound workers");
294 raw_spin_lock(&wq
->lock
);
295 if (acct
->nr_workers
>= acct
->max_workers
) {
296 raw_spin_unlock(&wq
->lock
);
300 raw_spin_unlock(&wq
->lock
);
301 atomic_inc(&acct
->nr_running
);
302 atomic_inc(&wq
->worker_refs
);
303 return create_io_worker(wq
, acct
->index
);
306 static void io_wq_inc_running(struct io_worker
*worker
)
308 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
310 atomic_inc(&acct
->nr_running
);
313 static void create_worker_cb(struct callback_head
*cb
)
315 struct io_worker
*worker
;
318 struct io_wq_acct
*acct
;
319 bool do_create
= false;
321 worker
= container_of(cb
, struct io_worker
, create_work
);
323 acct
= &wq
->acct
[worker
->create_index
];
324 raw_spin_lock(&wq
->lock
);
326 if (acct
->nr_workers
< acct
->max_workers
) {
330 raw_spin_unlock(&wq
->lock
);
332 create_io_worker(wq
, worker
->create_index
);
334 atomic_dec(&acct
->nr_running
);
335 io_worker_ref_put(wq
);
337 clear_bit_unlock(0, &worker
->create_state
);
338 io_worker_release(worker
);
341 static bool io_queue_worker_create(struct io_worker
*worker
,
342 struct io_wq_acct
*acct
,
343 task_work_func_t func
)
345 struct io_wq
*wq
= worker
->wq
;
347 /* raced with exit, just ignore create call */
348 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
350 if (!io_worker_get(worker
))
353 * create_state manages ownership of create_work/index. We should
354 * only need one entry per worker, as the worker going to sleep
355 * will trigger the condition, and waking will clear it once it
356 * runs the task_work.
358 if (test_bit(0, &worker
->create_state
) ||
359 test_and_set_bit_lock(0, &worker
->create_state
))
362 atomic_inc(&wq
->worker_refs
);
363 init_task_work(&worker
->create_work
, func
);
364 worker
->create_index
= acct
->index
;
365 if (!task_work_add(wq
->task
, &worker
->create_work
, TWA_SIGNAL
)) {
367 * EXIT may have been set after checking it above, check after
368 * adding the task_work and remove any creation item if it is
369 * now set. wq exit does that too, but we can have added this
370 * work item after we canceled in io_wq_exit_workers().
372 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
373 io_wq_cancel_tw_create(wq
);
374 io_worker_ref_put(wq
);
377 io_worker_ref_put(wq
);
378 clear_bit_unlock(0, &worker
->create_state
);
380 io_worker_release(worker
);
382 atomic_dec(&acct
->nr_running
);
383 io_worker_ref_put(wq
);
387 static void io_wq_dec_running(struct io_worker
*worker
)
389 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
390 struct io_wq
*wq
= worker
->wq
;
392 if (!(worker
->flags
& IO_WORKER_F_UP
))
395 if (!atomic_dec_and_test(&acct
->nr_running
))
397 if (!io_acct_run_queue(acct
))
400 atomic_inc(&acct
->nr_running
);
401 atomic_inc(&wq
->worker_refs
);
402 io_queue_worker_create(worker
, acct
, create_worker_cb
);
406 * Worker will start processing some work. Move it to the busy list, if
407 * it's currently on the freelist
409 static void __io_worker_busy(struct io_wq
*wq
, struct io_worker
*worker
)
411 if (worker
->flags
& IO_WORKER_F_FREE
) {
412 worker
->flags
&= ~IO_WORKER_F_FREE
;
413 raw_spin_lock(&wq
->lock
);
414 hlist_nulls_del_init_rcu(&worker
->nulls_node
);
415 raw_spin_unlock(&wq
->lock
);
420 * No work, worker going to sleep. Move to freelist.
422 static void __io_worker_idle(struct io_wq
*wq
, struct io_worker
*worker
)
423 __must_hold(wq
->lock
)
425 if (!(worker
->flags
& IO_WORKER_F_FREE
)) {
426 worker
->flags
|= IO_WORKER_F_FREE
;
427 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wq
->free_list
);
431 static inline unsigned int io_get_work_hash(struct io_wq_work
*work
)
433 return work
->flags
>> IO_WQ_HASH_SHIFT
;
436 static bool io_wait_on_hash(struct io_wq
*wq
, unsigned int hash
)
440 spin_lock_irq(&wq
->hash
->wait
.lock
);
441 if (list_empty(&wq
->wait
.entry
)) {
442 __add_wait_queue(&wq
->hash
->wait
, &wq
->wait
);
443 if (!test_bit(hash
, &wq
->hash
->map
)) {
444 __set_current_state(TASK_RUNNING
);
445 list_del_init(&wq
->wait
.entry
);
449 spin_unlock_irq(&wq
->hash
->wait
.lock
);
453 static struct io_wq_work
*io_get_next_work(struct io_wq_acct
*acct
,
454 struct io_worker
*worker
)
455 __must_hold(acct
->lock
)
457 struct io_wq_work_node
*node
, *prev
;
458 struct io_wq_work
*work
, *tail
;
459 unsigned int stall_hash
= -1U;
460 struct io_wq
*wq
= worker
->wq
;
462 wq_list_for_each(node
, prev
, &acct
->work_list
) {
465 work
= container_of(node
, struct io_wq_work
, list
);
467 /* not hashed, can run anytime */
468 if (!io_wq_is_hashed(work
)) {
469 wq_list_del(&acct
->work_list
, node
, prev
);
473 hash
= io_get_work_hash(work
);
474 /* all items with this hash lie in [work, tail] */
475 tail
= wq
->hash_tail
[hash
];
477 /* hashed, can run if not already running */
478 if (!test_and_set_bit(hash
, &wq
->hash
->map
)) {
479 wq
->hash_tail
[hash
] = NULL
;
480 wq_list_cut(&acct
->work_list
, &tail
->list
, prev
);
483 if (stall_hash
== -1U)
485 /* fast forward to a next hash, for-each will fix up @prev */
489 if (stall_hash
!= -1U) {
493 * Set this before dropping the lock to avoid racing with new
494 * work being added and clearing the stalled bit.
496 set_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
497 raw_spin_unlock(&acct
->lock
);
498 unstalled
= io_wait_on_hash(wq
, stall_hash
);
499 raw_spin_lock(&acct
->lock
);
501 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
502 if (wq_has_sleeper(&wq
->hash
->wait
))
503 wake_up(&wq
->hash
->wait
);
510 static void io_assign_current_work(struct io_worker
*worker
,
511 struct io_wq_work
*work
)
518 raw_spin_lock(&worker
->lock
);
519 worker
->cur_work
= work
;
520 worker
->next_work
= NULL
;
521 raw_spin_unlock(&worker
->lock
);
524 static void io_worker_handle_work(struct io_worker
*worker
)
526 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
527 struct io_wq
*wq
= worker
->wq
;
528 bool do_kill
= test_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
531 struct io_wq_work
*work
;
534 * If we got some work, mark us as busy. If we didn't, but
535 * the list isn't empty, it means we stalled on hashed work.
536 * Mark us stalled so we don't keep looking for work when we
537 * can't make progress, any work completion or insertion will
538 * clear the stalled flag.
540 raw_spin_lock(&acct
->lock
);
541 work
= io_get_next_work(acct
, worker
);
542 raw_spin_unlock(&acct
->lock
);
544 __io_worker_busy(wq
, worker
);
547 * Make sure cancelation can find this, even before
548 * it becomes the active work. That avoids a window
549 * where the work has been removed from our general
550 * work list, but isn't yet discoverable as the
551 * current work item for this worker.
553 raw_spin_lock(&worker
->lock
);
554 worker
->next_work
= work
;
555 raw_spin_unlock(&worker
->lock
);
559 io_assign_current_work(worker
, work
);
560 __set_current_state(TASK_RUNNING
);
562 /* handle a whole dependent link */
564 struct io_wq_work
*next_hashed
, *linked
;
565 unsigned int hash
= io_get_work_hash(work
);
567 next_hashed
= wq_next_work(work
);
569 if (unlikely(do_kill
) && (work
->flags
& IO_WQ_WORK_UNBOUND
))
570 work
->flags
|= IO_WQ_WORK_CANCEL
;
572 io_assign_current_work(worker
, NULL
);
574 linked
= wq
->free_work(work
);
576 if (!work
&& linked
&& !io_wq_is_hashed(linked
)) {
580 io_assign_current_work(worker
, work
);
582 io_wq_enqueue(wq
, linked
);
584 if (hash
!= -1U && !next_hashed
) {
585 /* serialize hash clear with wake_up() */
586 spin_lock_irq(&wq
->hash
->wait
.lock
);
587 clear_bit(hash
, &wq
->hash
->map
);
588 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
589 spin_unlock_irq(&wq
->hash
->wait
.lock
);
590 if (wq_has_sleeper(&wq
->hash
->wait
))
591 wake_up(&wq
->hash
->wait
);
597 static int io_wq_worker(void *data
)
599 struct io_worker
*worker
= data
;
600 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
601 struct io_wq
*wq
= worker
->wq
;
602 bool exit_mask
= false, last_timeout
= false;
603 char buf
[TASK_COMM_LEN
];
605 worker
->flags
|= (IO_WORKER_F_UP
| IO_WORKER_F_RUNNING
);
607 snprintf(buf
, sizeof(buf
), "iou-wrk-%d", wq
->task
->pid
);
608 set_task_comm(current
, buf
);
610 while (!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
613 set_current_state(TASK_INTERRUPTIBLE
);
614 while (io_acct_run_queue(acct
))
615 io_worker_handle_work(worker
);
617 raw_spin_lock(&wq
->lock
);
619 * Last sleep timed out. Exit if we're not the last worker,
620 * or if someone modified our affinity.
622 if (last_timeout
&& (exit_mask
|| acct
->nr_workers
> 1)) {
624 raw_spin_unlock(&wq
->lock
);
625 __set_current_state(TASK_RUNNING
);
628 last_timeout
= false;
629 __io_worker_idle(wq
, worker
);
630 raw_spin_unlock(&wq
->lock
);
631 if (io_run_task_work())
633 ret
= schedule_timeout(WORKER_IDLE_TIMEOUT
);
634 if (signal_pending(current
)) {
637 if (!get_signal(&ksig
))
643 exit_mask
= !cpumask_test_cpu(raw_smp_processor_id(),
648 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
649 io_worker_handle_work(worker
);
651 io_worker_exit(worker
);
656 * Called when a worker is scheduled in. Mark us as currently running.
658 void io_wq_worker_running(struct task_struct
*tsk
)
660 struct io_worker
*worker
= tsk
->worker_private
;
664 if (!(worker
->flags
& IO_WORKER_F_UP
))
666 if (worker
->flags
& IO_WORKER_F_RUNNING
)
668 worker
->flags
|= IO_WORKER_F_RUNNING
;
669 io_wq_inc_running(worker
);
673 * Called when worker is going to sleep. If there are no workers currently
674 * running and we have work pending, wake up a free one or create a new one.
676 void io_wq_worker_sleeping(struct task_struct
*tsk
)
678 struct io_worker
*worker
= tsk
->worker_private
;
682 if (!(worker
->flags
& IO_WORKER_F_UP
))
684 if (!(worker
->flags
& IO_WORKER_F_RUNNING
))
687 worker
->flags
&= ~IO_WORKER_F_RUNNING
;
688 io_wq_dec_running(worker
);
691 static void io_init_new_worker(struct io_wq
*wq
, struct io_worker
*worker
,
692 struct task_struct
*tsk
)
694 tsk
->worker_private
= worker
;
696 set_cpus_allowed_ptr(tsk
, wq
->cpu_mask
);
698 raw_spin_lock(&wq
->lock
);
699 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wq
->free_list
);
700 list_add_tail_rcu(&worker
->all_list
, &wq
->all_list
);
701 worker
->flags
|= IO_WORKER_F_FREE
;
702 raw_spin_unlock(&wq
->lock
);
703 wake_up_new_task(tsk
);
706 static bool io_wq_work_match_all(struct io_wq_work
*work
, void *data
)
711 static inline bool io_should_retry_thread(long err
)
714 * Prevent perpetual task_work retry, if the task (or its group) is
717 if (fatal_signal_pending(current
))
723 case -ERESTARTNOINTR
:
724 case -ERESTARTNOHAND
:
731 static void create_worker_cont(struct callback_head
*cb
)
733 struct io_worker
*worker
;
734 struct task_struct
*tsk
;
737 worker
= container_of(cb
, struct io_worker
, create_work
);
738 clear_bit_unlock(0, &worker
->create_state
);
740 tsk
= create_io_thread(io_wq_worker
, worker
, NUMA_NO_NODE
);
742 io_init_new_worker(wq
, worker
, tsk
);
743 io_worker_release(worker
);
745 } else if (!io_should_retry_thread(PTR_ERR(tsk
))) {
746 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
748 atomic_dec(&acct
->nr_running
);
749 raw_spin_lock(&wq
->lock
);
751 if (!acct
->nr_workers
) {
752 struct io_cb_cancel_data match
= {
753 .fn
= io_wq_work_match_all
,
757 raw_spin_unlock(&wq
->lock
);
758 while (io_acct_cancel_pending_work(wq
, acct
, &match
))
761 raw_spin_unlock(&wq
->lock
);
763 io_worker_ref_put(wq
);
768 /* re-create attempts grab a new worker ref, drop the existing one */
769 io_worker_release(worker
);
770 schedule_work(&worker
->work
);
773 static void io_workqueue_create(struct work_struct
*work
)
775 struct io_worker
*worker
= container_of(work
, struct io_worker
, work
);
776 struct io_wq_acct
*acct
= io_wq_get_acct(worker
);
778 if (!io_queue_worker_create(worker
, acct
, create_worker_cont
))
782 static bool create_io_worker(struct io_wq
*wq
, int index
)
784 struct io_wq_acct
*acct
= &wq
->acct
[index
];
785 struct io_worker
*worker
;
786 struct task_struct
*tsk
;
788 __set_current_state(TASK_RUNNING
);
790 worker
= kzalloc(sizeof(*worker
), GFP_KERNEL
);
793 atomic_dec(&acct
->nr_running
);
794 raw_spin_lock(&wq
->lock
);
796 raw_spin_unlock(&wq
->lock
);
797 io_worker_ref_put(wq
);
801 refcount_set(&worker
->ref
, 1);
803 raw_spin_lock_init(&worker
->lock
);
804 init_completion(&worker
->ref_done
);
806 if (index
== IO_WQ_ACCT_BOUND
)
807 worker
->flags
|= IO_WORKER_F_BOUND
;
809 tsk
= create_io_thread(io_wq_worker
, worker
, NUMA_NO_NODE
);
811 io_init_new_worker(wq
, worker
, tsk
);
812 } else if (!io_should_retry_thread(PTR_ERR(tsk
))) {
816 INIT_WORK(&worker
->work
, io_workqueue_create
);
817 schedule_work(&worker
->work
);
824 * Iterate the passed in list and call the specific function for each
825 * worker that isn't exiting
827 static bool io_wq_for_each_worker(struct io_wq
*wq
,
828 bool (*func
)(struct io_worker
*, void *),
831 struct io_worker
*worker
;
834 list_for_each_entry_rcu(worker
, &wq
->all_list
, all_list
) {
835 if (io_worker_get(worker
)) {
836 /* no task if node is/was offline */
838 ret
= func(worker
, data
);
839 io_worker_release(worker
);
848 static bool io_wq_worker_wake(struct io_worker
*worker
, void *data
)
850 __set_notify_signal(worker
->task
);
851 wake_up_process(worker
->task
);
855 static void io_run_cancel(struct io_wq_work
*work
, struct io_wq
*wq
)
858 work
->flags
|= IO_WQ_WORK_CANCEL
;
860 work
= wq
->free_work(work
);
864 static void io_wq_insert_work(struct io_wq
*wq
, struct io_wq_work
*work
)
866 struct io_wq_acct
*acct
= io_work_get_acct(wq
, work
);
868 struct io_wq_work
*tail
;
870 if (!io_wq_is_hashed(work
)) {
872 wq_list_add_tail(&work
->list
, &acct
->work_list
);
876 hash
= io_get_work_hash(work
);
877 tail
= wq
->hash_tail
[hash
];
878 wq
->hash_tail
[hash
] = work
;
882 wq_list_add_after(&work
->list
, &tail
->list
, &acct
->work_list
);
885 static bool io_wq_work_match_item(struct io_wq_work
*work
, void *data
)
890 void io_wq_enqueue(struct io_wq
*wq
, struct io_wq_work
*work
)
892 struct io_wq_acct
*acct
= io_work_get_acct(wq
, work
);
893 struct io_cb_cancel_data match
;
894 unsigned work_flags
= work
->flags
;
898 * If io-wq is exiting for this task, or if the request has explicitly
899 * been marked as one that should not get executed, cancel it here.
901 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
) ||
902 (work
->flags
& IO_WQ_WORK_CANCEL
)) {
903 io_run_cancel(work
, wq
);
907 raw_spin_lock(&acct
->lock
);
908 io_wq_insert_work(wq
, work
);
909 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
910 raw_spin_unlock(&acct
->lock
);
912 raw_spin_lock(&wq
->lock
);
914 do_create
= !io_wq_activate_free_worker(wq
, acct
);
917 raw_spin_unlock(&wq
->lock
);
919 if (do_create
&& ((work_flags
& IO_WQ_WORK_CONCURRENT
) ||
920 !atomic_read(&acct
->nr_running
))) {
923 did_create
= io_wq_create_worker(wq
, acct
);
924 if (likely(did_create
))
927 raw_spin_lock(&wq
->lock
);
928 if (acct
->nr_workers
) {
929 raw_spin_unlock(&wq
->lock
);
932 raw_spin_unlock(&wq
->lock
);
934 /* fatal condition, failed to create the first worker */
935 match
.fn
= io_wq_work_match_item
,
937 match
.cancel_all
= false,
939 io_acct_cancel_pending_work(wq
, acct
, &match
);
944 * Work items that hash to the same value will not be done in parallel.
945 * Used to limit concurrent writes, generally hashed by inode.
947 void io_wq_hash_work(struct io_wq_work
*work
, void *val
)
951 bit
= hash_ptr(val
, IO_WQ_HASH_ORDER
);
952 work
->flags
|= (IO_WQ_WORK_HASHED
| (bit
<< IO_WQ_HASH_SHIFT
));
955 static bool __io_wq_worker_cancel(struct io_worker
*worker
,
956 struct io_cb_cancel_data
*match
,
957 struct io_wq_work
*work
)
959 if (work
&& match
->fn(work
, match
->data
)) {
960 work
->flags
|= IO_WQ_WORK_CANCEL
;
961 __set_notify_signal(worker
->task
);
968 static bool io_wq_worker_cancel(struct io_worker
*worker
, void *data
)
970 struct io_cb_cancel_data
*match
= data
;
973 * Hold the lock to avoid ->cur_work going out of scope, caller
974 * may dereference the passed in work.
976 raw_spin_lock(&worker
->lock
);
977 if (__io_wq_worker_cancel(worker
, match
, worker
->cur_work
) ||
978 __io_wq_worker_cancel(worker
, match
, worker
->next_work
))
980 raw_spin_unlock(&worker
->lock
);
982 return match
->nr_running
&& !match
->cancel_all
;
985 static inline void io_wq_remove_pending(struct io_wq
*wq
,
986 struct io_wq_work
*work
,
987 struct io_wq_work_node
*prev
)
989 struct io_wq_acct
*acct
= io_work_get_acct(wq
, work
);
990 unsigned int hash
= io_get_work_hash(work
);
991 struct io_wq_work
*prev_work
= NULL
;
993 if (io_wq_is_hashed(work
) && work
== wq
->hash_tail
[hash
]) {
995 prev_work
= container_of(prev
, struct io_wq_work
, list
);
996 if (prev_work
&& io_get_work_hash(prev_work
) == hash
)
997 wq
->hash_tail
[hash
] = prev_work
;
999 wq
->hash_tail
[hash
] = NULL
;
1001 wq_list_del(&acct
->work_list
, &work
->list
, prev
);
1004 static bool io_acct_cancel_pending_work(struct io_wq
*wq
,
1005 struct io_wq_acct
*acct
,
1006 struct io_cb_cancel_data
*match
)
1008 struct io_wq_work_node
*node
, *prev
;
1009 struct io_wq_work
*work
;
1011 raw_spin_lock(&acct
->lock
);
1012 wq_list_for_each(node
, prev
, &acct
->work_list
) {
1013 work
= container_of(node
, struct io_wq_work
, list
);
1014 if (!match
->fn(work
, match
->data
))
1016 io_wq_remove_pending(wq
, work
, prev
);
1017 raw_spin_unlock(&acct
->lock
);
1018 io_run_cancel(work
, wq
);
1019 match
->nr_pending
++;
1020 /* not safe to continue after unlock */
1023 raw_spin_unlock(&acct
->lock
);
1028 static void io_wq_cancel_pending_work(struct io_wq
*wq
,
1029 struct io_cb_cancel_data
*match
)
1033 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1034 struct io_wq_acct
*acct
= io_get_acct(wq
, i
== 0);
1036 if (io_acct_cancel_pending_work(wq
, acct
, match
)) {
1037 if (match
->cancel_all
)
1044 static void io_wq_cancel_running_work(struct io_wq
*wq
,
1045 struct io_cb_cancel_data
*match
)
1048 io_wq_for_each_worker(wq
, io_wq_worker_cancel
, match
);
1052 enum io_wq_cancel
io_wq_cancel_cb(struct io_wq
*wq
, work_cancel_fn
*cancel
,
1053 void *data
, bool cancel_all
)
1055 struct io_cb_cancel_data match
= {
1058 .cancel_all
= cancel_all
,
1062 * First check pending list, if we're lucky we can just remove it
1063 * from there. CANCEL_OK means that the work is returned as-new,
1064 * no completion will be posted for it.
1066 * Then check if a free (going busy) or busy worker has the work
1067 * currently running. If we find it there, we'll return CANCEL_RUNNING
1068 * as an indication that we attempt to signal cancellation. The
1069 * completion will run normally in this case.
1071 * Do both of these while holding the wq->lock, to ensure that
1072 * we'll find a work item regardless of state.
1074 io_wq_cancel_pending_work(wq
, &match
);
1075 if (match
.nr_pending
&& !match
.cancel_all
)
1076 return IO_WQ_CANCEL_OK
;
1078 raw_spin_lock(&wq
->lock
);
1079 io_wq_cancel_running_work(wq
, &match
);
1080 raw_spin_unlock(&wq
->lock
);
1081 if (match
.nr_running
&& !match
.cancel_all
)
1082 return IO_WQ_CANCEL_RUNNING
;
1084 if (match
.nr_running
)
1085 return IO_WQ_CANCEL_RUNNING
;
1086 if (match
.nr_pending
)
1087 return IO_WQ_CANCEL_OK
;
1088 return IO_WQ_CANCEL_NOTFOUND
;
1091 static int io_wq_hash_wake(struct wait_queue_entry
*wait
, unsigned mode
,
1092 int sync
, void *key
)
1094 struct io_wq
*wq
= container_of(wait
, struct io_wq
, wait
);
1097 list_del_init(&wait
->entry
);
1100 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1101 struct io_wq_acct
*acct
= &wq
->acct
[i
];
1103 if (test_and_clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
))
1104 io_wq_activate_free_worker(wq
, acct
);
1110 struct io_wq
*io_wq_create(unsigned bounded
, struct io_wq_data
*data
)
1115 if (WARN_ON_ONCE(!data
->free_work
|| !data
->do_work
))
1116 return ERR_PTR(-EINVAL
);
1117 if (WARN_ON_ONCE(!bounded
))
1118 return ERR_PTR(-EINVAL
);
1120 wq
= kzalloc(sizeof(struct io_wq
), GFP_KERNEL
);
1122 return ERR_PTR(-ENOMEM
);
1123 ret
= cpuhp_state_add_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1127 refcount_inc(&data
->hash
->refs
);
1128 wq
->hash
= data
->hash
;
1129 wq
->free_work
= data
->free_work
;
1130 wq
->do_work
= data
->do_work
;
1134 if (!alloc_cpumask_var(&wq
->cpu_mask
, GFP_KERNEL
))
1136 cpumask_copy(wq
->cpu_mask
, cpu_possible_mask
);
1137 wq
->acct
[IO_WQ_ACCT_BOUND
].max_workers
= bounded
;
1138 wq
->acct
[IO_WQ_ACCT_UNBOUND
].max_workers
=
1139 task_rlimit(current
, RLIMIT_NPROC
);
1140 INIT_LIST_HEAD(&wq
->wait
.entry
);
1141 wq
->wait
.func
= io_wq_hash_wake
;
1142 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1143 struct io_wq_acct
*acct
= &wq
->acct
[i
];
1146 atomic_set(&acct
->nr_running
, 0);
1147 INIT_WQ_LIST(&acct
->work_list
);
1148 raw_spin_lock_init(&acct
->lock
);
1151 raw_spin_lock_init(&wq
->lock
);
1152 INIT_HLIST_NULLS_HEAD(&wq
->free_list
, 0);
1153 INIT_LIST_HEAD(&wq
->all_list
);
1155 wq
->task
= get_task_struct(data
->task
);
1156 atomic_set(&wq
->worker_refs
, 1);
1157 init_completion(&wq
->worker_done
);
1160 io_wq_put_hash(data
->hash
);
1161 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1163 free_cpumask_var(wq
->cpu_mask
);
1166 return ERR_PTR(ret
);
1169 static bool io_task_work_match(struct callback_head
*cb
, void *data
)
1171 struct io_worker
*worker
;
1173 if (cb
->func
!= create_worker_cb
&& cb
->func
!= create_worker_cont
)
1175 worker
= container_of(cb
, struct io_worker
, create_work
);
1176 return worker
->wq
== data
;
1179 void io_wq_exit_start(struct io_wq
*wq
)
1181 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
1184 static void io_wq_cancel_tw_create(struct io_wq
*wq
)
1186 struct callback_head
*cb
;
1188 while ((cb
= task_work_cancel_match(wq
->task
, io_task_work_match
, wq
)) != NULL
) {
1189 struct io_worker
*worker
;
1191 worker
= container_of(cb
, struct io_worker
, create_work
);
1192 io_worker_cancel_cb(worker
);
1194 * Only the worker continuation helper has worker allocated and
1195 * hence needs freeing.
1197 if (cb
->func
== create_worker_cont
)
1202 static void io_wq_exit_workers(struct io_wq
*wq
)
1207 io_wq_cancel_tw_create(wq
);
1210 io_wq_for_each_worker(wq
, io_wq_worker_wake
, NULL
);
1212 io_worker_ref_put(wq
);
1213 wait_for_completion(&wq
->worker_done
);
1215 spin_lock_irq(&wq
->hash
->wait
.lock
);
1216 list_del_init(&wq
->wait
.entry
);
1217 spin_unlock_irq(&wq
->hash
->wait
.lock
);
1219 put_task_struct(wq
->task
);
1223 static void io_wq_destroy(struct io_wq
*wq
)
1225 struct io_cb_cancel_data match
= {
1226 .fn
= io_wq_work_match_all
,
1230 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1231 io_wq_cancel_pending_work(wq
, &match
);
1232 free_cpumask_var(wq
->cpu_mask
);
1233 io_wq_put_hash(wq
->hash
);
1237 void io_wq_put_and_exit(struct io_wq
*wq
)
1239 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
));
1241 io_wq_exit_workers(wq
);
1245 struct online_data
{
1250 static bool io_wq_worker_affinity(struct io_worker
*worker
, void *data
)
1252 struct online_data
*od
= data
;
1255 cpumask_set_cpu(od
->cpu
, worker
->wq
->cpu_mask
);
1257 cpumask_clear_cpu(od
->cpu
, worker
->wq
->cpu_mask
);
1261 static int __io_wq_cpu_online(struct io_wq
*wq
, unsigned int cpu
, bool online
)
1263 struct online_data od
= {
1269 io_wq_for_each_worker(wq
, io_wq_worker_affinity
, &od
);
1274 static int io_wq_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
1276 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1278 return __io_wq_cpu_online(wq
, cpu
, true);
1281 static int io_wq_cpu_offline(unsigned int cpu
, struct hlist_node
*node
)
1283 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1285 return __io_wq_cpu_online(wq
, cpu
, false);
1288 int io_wq_cpu_affinity(struct io_wq
*wq
, cpumask_var_t mask
)
1292 cpumask_copy(wq
->cpu_mask
, mask
);
1294 cpumask_copy(wq
->cpu_mask
, cpu_possible_mask
);
1301 * Set max number of unbounded workers, returns old value. If new_count is 0,
1302 * then just return the old value.
1304 int io_wq_max_workers(struct io_wq
*wq
, int *new_count
)
1306 struct io_wq_acct
*acct
;
1307 int prev
[IO_WQ_ACCT_NR
];
1310 BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND
!= (int) IO_WQ_BOUND
);
1311 BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND
!= (int) IO_WQ_UNBOUND
);
1312 BUILD_BUG_ON((int) IO_WQ_ACCT_NR
!= 2);
1314 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1315 if (new_count
[i
] > task_rlimit(current
, RLIMIT_NPROC
))
1316 new_count
[i
] = task_rlimit(current
, RLIMIT_NPROC
);
1319 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++)
1324 raw_spin_lock(&wq
->lock
);
1325 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1326 acct
= &wq
->acct
[i
];
1327 prev
[i
] = max_t(int, acct
->max_workers
, prev
[i
]);
1329 acct
->max_workers
= new_count
[i
];
1331 raw_spin_unlock(&wq
->lock
);
1334 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++)
1335 new_count
[i
] = prev
[i
];
1340 static __init
int io_wq_init(void)
1344 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "io-wq/online",
1345 io_wq_cpu_online
, io_wq_cpu_offline
);
1351 subsys_initcall(io_wq_init
);