]> git.ipfire.org Git - thirdparty/linux.git/blob - io_uring/io-wq.c
io_uring/rsrc: check for nonconsecutive pages
[thirdparty/linux.git] / io_uring / io-wq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Basic worker thread pool for io_uring
4 *
5 * Copyright (C) 2019 Jens Axboe
6 *
7 */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/rculist_nulls.h>
15 #include <linux/cpu.h>
16 #include <linux/task_work.h>
17 #include <linux/audit.h>
18 #include <linux/mmu_context.h>
19 #include <uapi/linux/io_uring.h>
20
21 #include "io-wq.h"
22 #include "slist.h"
23 #include "io_uring.h"
24
25 #define WORKER_IDLE_TIMEOUT (5 * HZ)
26
27 enum {
28 IO_WORKER_F_UP = 1, /* up and active */
29 IO_WORKER_F_RUNNING = 2, /* account as running */
30 IO_WORKER_F_FREE = 4, /* worker on free list */
31 IO_WORKER_F_BOUND = 8, /* is doing bounded work */
32 };
33
34 enum {
35 IO_WQ_BIT_EXIT = 0, /* wq exiting */
36 };
37
38 enum {
39 IO_ACCT_STALLED_BIT = 0, /* stalled on hash */
40 };
41
42 /*
43 * One for each thread in a wq pool
44 */
45 struct io_worker {
46 refcount_t ref;
47 unsigned flags;
48 struct hlist_nulls_node nulls_node;
49 struct list_head all_list;
50 struct task_struct *task;
51 struct io_wq *wq;
52
53 struct io_wq_work *cur_work;
54 struct io_wq_work *next_work;
55 raw_spinlock_t lock;
56
57 struct completion ref_done;
58
59 unsigned long create_state;
60 struct callback_head create_work;
61 int create_index;
62
63 union {
64 struct rcu_head rcu;
65 struct work_struct work;
66 };
67 };
68
69 #if BITS_PER_LONG == 64
70 #define IO_WQ_HASH_ORDER 6
71 #else
72 #define IO_WQ_HASH_ORDER 5
73 #endif
74
75 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
76
77 struct io_wq_acct {
78 unsigned nr_workers;
79 unsigned max_workers;
80 int index;
81 atomic_t nr_running;
82 raw_spinlock_t lock;
83 struct io_wq_work_list work_list;
84 unsigned long flags;
85 };
86
87 enum {
88 IO_WQ_ACCT_BOUND,
89 IO_WQ_ACCT_UNBOUND,
90 IO_WQ_ACCT_NR,
91 };
92
93 /*
94 * Per io_wq state
95 */
96 struct io_wq {
97 unsigned long state;
98
99 free_work_fn *free_work;
100 io_wq_work_fn *do_work;
101
102 struct io_wq_hash *hash;
103
104 atomic_t worker_refs;
105 struct completion worker_done;
106
107 struct hlist_node cpuhp_node;
108
109 struct task_struct *task;
110
111 struct io_wq_acct acct[IO_WQ_ACCT_NR];
112
113 /* lock protects access to elements below */
114 raw_spinlock_t lock;
115
116 struct hlist_nulls_head free_list;
117 struct list_head all_list;
118
119 struct wait_queue_entry wait;
120
121 struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
122
123 cpumask_var_t cpu_mask;
124 };
125
126 static enum cpuhp_state io_wq_online;
127
128 struct io_cb_cancel_data {
129 work_cancel_fn *fn;
130 void *data;
131 int nr_running;
132 int nr_pending;
133 bool cancel_all;
134 };
135
136 static bool create_io_worker(struct io_wq *wq, int index);
137 static void io_wq_dec_running(struct io_worker *worker);
138 static bool io_acct_cancel_pending_work(struct io_wq *wq,
139 struct io_wq_acct *acct,
140 struct io_cb_cancel_data *match);
141 static void create_worker_cb(struct callback_head *cb);
142 static void io_wq_cancel_tw_create(struct io_wq *wq);
143
144 static bool io_worker_get(struct io_worker *worker)
145 {
146 return refcount_inc_not_zero(&worker->ref);
147 }
148
149 static void io_worker_release(struct io_worker *worker)
150 {
151 if (refcount_dec_and_test(&worker->ref))
152 complete(&worker->ref_done);
153 }
154
155 static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound)
156 {
157 return &wq->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
158 }
159
160 static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq,
161 struct io_wq_work *work)
162 {
163 return io_get_acct(wq, !(work->flags & IO_WQ_WORK_UNBOUND));
164 }
165
166 static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker)
167 {
168 return io_get_acct(worker->wq, worker->flags & IO_WORKER_F_BOUND);
169 }
170
171 static void io_worker_ref_put(struct io_wq *wq)
172 {
173 if (atomic_dec_and_test(&wq->worker_refs))
174 complete(&wq->worker_done);
175 }
176
177 static void io_worker_cancel_cb(struct io_worker *worker)
178 {
179 struct io_wq_acct *acct = io_wq_get_acct(worker);
180 struct io_wq *wq = worker->wq;
181
182 atomic_dec(&acct->nr_running);
183 raw_spin_lock(&wq->lock);
184 acct->nr_workers--;
185 raw_spin_unlock(&wq->lock);
186 io_worker_ref_put(wq);
187 clear_bit_unlock(0, &worker->create_state);
188 io_worker_release(worker);
189 }
190
191 static bool io_task_worker_match(struct callback_head *cb, void *data)
192 {
193 struct io_worker *worker;
194
195 if (cb->func != create_worker_cb)
196 return false;
197 worker = container_of(cb, struct io_worker, create_work);
198 return worker == data;
199 }
200
201 static void io_worker_exit(struct io_worker *worker)
202 {
203 struct io_wq *wq = worker->wq;
204
205 while (1) {
206 struct callback_head *cb = task_work_cancel_match(wq->task,
207 io_task_worker_match, worker);
208
209 if (!cb)
210 break;
211 io_worker_cancel_cb(worker);
212 }
213
214 io_worker_release(worker);
215 wait_for_completion(&worker->ref_done);
216
217 raw_spin_lock(&wq->lock);
218 if (worker->flags & IO_WORKER_F_FREE)
219 hlist_nulls_del_rcu(&worker->nulls_node);
220 list_del_rcu(&worker->all_list);
221 raw_spin_unlock(&wq->lock);
222 io_wq_dec_running(worker);
223 worker->flags = 0;
224 preempt_disable();
225 current->flags &= ~PF_IO_WORKER;
226 preempt_enable();
227
228 kfree_rcu(worker, rcu);
229 io_worker_ref_put(wq);
230 do_exit(0);
231 }
232
233 static inline bool io_acct_run_queue(struct io_wq_acct *acct)
234 {
235 bool ret = false;
236
237 raw_spin_lock(&acct->lock);
238 if (!wq_list_empty(&acct->work_list) &&
239 !test_bit(IO_ACCT_STALLED_BIT, &acct->flags))
240 ret = true;
241 raw_spin_unlock(&acct->lock);
242
243 return ret;
244 }
245
246 /*
247 * Check head of free list for an available worker. If one isn't available,
248 * caller must create one.
249 */
250 static bool io_wq_activate_free_worker(struct io_wq *wq,
251 struct io_wq_acct *acct)
252 __must_hold(RCU)
253 {
254 struct hlist_nulls_node *n;
255 struct io_worker *worker;
256
257 /*
258 * Iterate free_list and see if we can find an idle worker to
259 * activate. If a given worker is on the free_list but in the process
260 * of exiting, keep trying.
261 */
262 hlist_nulls_for_each_entry_rcu(worker, n, &wq->free_list, nulls_node) {
263 if (!io_worker_get(worker))
264 continue;
265 if (io_wq_get_acct(worker) != acct) {
266 io_worker_release(worker);
267 continue;
268 }
269 if (wake_up_process(worker->task)) {
270 io_worker_release(worker);
271 return true;
272 }
273 io_worker_release(worker);
274 }
275
276 return false;
277 }
278
279 /*
280 * We need a worker. If we find a free one, we're good. If not, and we're
281 * below the max number of workers, create one.
282 */
283 static bool io_wq_create_worker(struct io_wq *wq, struct io_wq_acct *acct)
284 {
285 /*
286 * Most likely an attempt to queue unbounded work on an io_wq that
287 * wasn't setup with any unbounded workers.
288 */
289 if (unlikely(!acct->max_workers))
290 pr_warn_once("io-wq is not configured for unbound workers");
291
292 raw_spin_lock(&wq->lock);
293 if (acct->nr_workers >= acct->max_workers) {
294 raw_spin_unlock(&wq->lock);
295 return true;
296 }
297 acct->nr_workers++;
298 raw_spin_unlock(&wq->lock);
299 atomic_inc(&acct->nr_running);
300 atomic_inc(&wq->worker_refs);
301 return create_io_worker(wq, acct->index);
302 }
303
304 static void io_wq_inc_running(struct io_worker *worker)
305 {
306 struct io_wq_acct *acct = io_wq_get_acct(worker);
307
308 atomic_inc(&acct->nr_running);
309 }
310
311 static void create_worker_cb(struct callback_head *cb)
312 {
313 struct io_worker *worker;
314 struct io_wq *wq;
315
316 struct io_wq_acct *acct;
317 bool do_create = false;
318
319 worker = container_of(cb, struct io_worker, create_work);
320 wq = worker->wq;
321 acct = &wq->acct[worker->create_index];
322 raw_spin_lock(&wq->lock);
323
324 if (acct->nr_workers < acct->max_workers) {
325 acct->nr_workers++;
326 do_create = true;
327 }
328 raw_spin_unlock(&wq->lock);
329 if (do_create) {
330 create_io_worker(wq, worker->create_index);
331 } else {
332 atomic_dec(&acct->nr_running);
333 io_worker_ref_put(wq);
334 }
335 clear_bit_unlock(0, &worker->create_state);
336 io_worker_release(worker);
337 }
338
339 static bool io_queue_worker_create(struct io_worker *worker,
340 struct io_wq_acct *acct,
341 task_work_func_t func)
342 {
343 struct io_wq *wq = worker->wq;
344
345 /* raced with exit, just ignore create call */
346 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
347 goto fail;
348 if (!io_worker_get(worker))
349 goto fail;
350 /*
351 * create_state manages ownership of create_work/index. We should
352 * only need one entry per worker, as the worker going to sleep
353 * will trigger the condition, and waking will clear it once it
354 * runs the task_work.
355 */
356 if (test_bit(0, &worker->create_state) ||
357 test_and_set_bit_lock(0, &worker->create_state))
358 goto fail_release;
359
360 atomic_inc(&wq->worker_refs);
361 init_task_work(&worker->create_work, func);
362 worker->create_index = acct->index;
363 if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
364 /*
365 * EXIT may have been set after checking it above, check after
366 * adding the task_work and remove any creation item if it is
367 * now set. wq exit does that too, but we can have added this
368 * work item after we canceled in io_wq_exit_workers().
369 */
370 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
371 io_wq_cancel_tw_create(wq);
372 io_worker_ref_put(wq);
373 return true;
374 }
375 io_worker_ref_put(wq);
376 clear_bit_unlock(0, &worker->create_state);
377 fail_release:
378 io_worker_release(worker);
379 fail:
380 atomic_dec(&acct->nr_running);
381 io_worker_ref_put(wq);
382 return false;
383 }
384
385 static void io_wq_dec_running(struct io_worker *worker)
386 {
387 struct io_wq_acct *acct = io_wq_get_acct(worker);
388 struct io_wq *wq = worker->wq;
389
390 if (!(worker->flags & IO_WORKER_F_UP))
391 return;
392
393 if (!atomic_dec_and_test(&acct->nr_running))
394 return;
395 if (!io_acct_run_queue(acct))
396 return;
397
398 atomic_inc(&acct->nr_running);
399 atomic_inc(&wq->worker_refs);
400 io_queue_worker_create(worker, acct, create_worker_cb);
401 }
402
403 /*
404 * Worker will start processing some work. Move it to the busy list, if
405 * it's currently on the freelist
406 */
407 static void __io_worker_busy(struct io_wq *wq, struct io_worker *worker)
408 {
409 if (worker->flags & IO_WORKER_F_FREE) {
410 worker->flags &= ~IO_WORKER_F_FREE;
411 raw_spin_lock(&wq->lock);
412 hlist_nulls_del_init_rcu(&worker->nulls_node);
413 raw_spin_unlock(&wq->lock);
414 }
415 }
416
417 /*
418 * No work, worker going to sleep. Move to freelist.
419 */
420 static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker)
421 __must_hold(wq->lock)
422 {
423 if (!(worker->flags & IO_WORKER_F_FREE)) {
424 worker->flags |= IO_WORKER_F_FREE;
425 hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list);
426 }
427 }
428
429 static inline unsigned int io_get_work_hash(struct io_wq_work *work)
430 {
431 return work->flags >> IO_WQ_HASH_SHIFT;
432 }
433
434 static bool io_wait_on_hash(struct io_wq *wq, unsigned int hash)
435 {
436 bool ret = false;
437
438 spin_lock_irq(&wq->hash->wait.lock);
439 if (list_empty(&wq->wait.entry)) {
440 __add_wait_queue(&wq->hash->wait, &wq->wait);
441 if (!test_bit(hash, &wq->hash->map)) {
442 __set_current_state(TASK_RUNNING);
443 list_del_init(&wq->wait.entry);
444 ret = true;
445 }
446 }
447 spin_unlock_irq(&wq->hash->wait.lock);
448 return ret;
449 }
450
451 static struct io_wq_work *io_get_next_work(struct io_wq_acct *acct,
452 struct io_worker *worker)
453 __must_hold(acct->lock)
454 {
455 struct io_wq_work_node *node, *prev;
456 struct io_wq_work *work, *tail;
457 unsigned int stall_hash = -1U;
458 struct io_wq *wq = worker->wq;
459
460 wq_list_for_each(node, prev, &acct->work_list) {
461 unsigned int hash;
462
463 work = container_of(node, struct io_wq_work, list);
464
465 /* not hashed, can run anytime */
466 if (!io_wq_is_hashed(work)) {
467 wq_list_del(&acct->work_list, node, prev);
468 return work;
469 }
470
471 hash = io_get_work_hash(work);
472 /* all items with this hash lie in [work, tail] */
473 tail = wq->hash_tail[hash];
474
475 /* hashed, can run if not already running */
476 if (!test_and_set_bit(hash, &wq->hash->map)) {
477 wq->hash_tail[hash] = NULL;
478 wq_list_cut(&acct->work_list, &tail->list, prev);
479 return work;
480 }
481 if (stall_hash == -1U)
482 stall_hash = hash;
483 /* fast forward to a next hash, for-each will fix up @prev */
484 node = &tail->list;
485 }
486
487 if (stall_hash != -1U) {
488 bool unstalled;
489
490 /*
491 * Set this before dropping the lock to avoid racing with new
492 * work being added and clearing the stalled bit.
493 */
494 set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
495 raw_spin_unlock(&acct->lock);
496 unstalled = io_wait_on_hash(wq, stall_hash);
497 raw_spin_lock(&acct->lock);
498 if (unstalled) {
499 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
500 if (wq_has_sleeper(&wq->hash->wait))
501 wake_up(&wq->hash->wait);
502 }
503 }
504
505 return NULL;
506 }
507
508 static void io_assign_current_work(struct io_worker *worker,
509 struct io_wq_work *work)
510 {
511 if (work) {
512 io_run_task_work();
513 cond_resched();
514 }
515
516 raw_spin_lock(&worker->lock);
517 worker->cur_work = work;
518 worker->next_work = NULL;
519 raw_spin_unlock(&worker->lock);
520 }
521
522 static void io_worker_handle_work(struct io_worker *worker)
523 {
524 struct io_wq_acct *acct = io_wq_get_acct(worker);
525 struct io_wq *wq = worker->wq;
526 bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
527
528 do {
529 struct io_wq_work *work;
530
531 /*
532 * If we got some work, mark us as busy. If we didn't, but
533 * the list isn't empty, it means we stalled on hashed work.
534 * Mark us stalled so we don't keep looking for work when we
535 * can't make progress, any work completion or insertion will
536 * clear the stalled flag.
537 */
538 raw_spin_lock(&acct->lock);
539 work = io_get_next_work(acct, worker);
540 raw_spin_unlock(&acct->lock);
541 if (work) {
542 __io_worker_busy(wq, worker);
543
544 /*
545 * Make sure cancelation can find this, even before
546 * it becomes the active work. That avoids a window
547 * where the work has been removed from our general
548 * work list, but isn't yet discoverable as the
549 * current work item for this worker.
550 */
551 raw_spin_lock(&worker->lock);
552 worker->next_work = work;
553 raw_spin_unlock(&worker->lock);
554 } else {
555 break;
556 }
557 io_assign_current_work(worker, work);
558 __set_current_state(TASK_RUNNING);
559
560 /* handle a whole dependent link */
561 do {
562 struct io_wq_work *next_hashed, *linked;
563 unsigned int hash = io_get_work_hash(work);
564
565 next_hashed = wq_next_work(work);
566
567 if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
568 work->flags |= IO_WQ_WORK_CANCEL;
569 wq->do_work(work);
570 io_assign_current_work(worker, NULL);
571
572 linked = wq->free_work(work);
573 work = next_hashed;
574 if (!work && linked && !io_wq_is_hashed(linked)) {
575 work = linked;
576 linked = NULL;
577 }
578 io_assign_current_work(worker, work);
579 if (linked)
580 io_wq_enqueue(wq, linked);
581
582 if (hash != -1U && !next_hashed) {
583 /* serialize hash clear with wake_up() */
584 spin_lock_irq(&wq->hash->wait.lock);
585 clear_bit(hash, &wq->hash->map);
586 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
587 spin_unlock_irq(&wq->hash->wait.lock);
588 if (wq_has_sleeper(&wq->hash->wait))
589 wake_up(&wq->hash->wait);
590 }
591 } while (work);
592 } while (1);
593 }
594
595 static int io_wq_worker(void *data)
596 {
597 struct io_worker *worker = data;
598 struct io_wq_acct *acct = io_wq_get_acct(worker);
599 struct io_wq *wq = worker->wq;
600 bool exit_mask = false, last_timeout = false;
601 char buf[TASK_COMM_LEN];
602
603 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
604
605 snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
606 set_task_comm(current, buf);
607
608 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
609 long ret;
610
611 set_current_state(TASK_INTERRUPTIBLE);
612 while (io_acct_run_queue(acct))
613 io_worker_handle_work(worker);
614
615 raw_spin_lock(&wq->lock);
616 /*
617 * Last sleep timed out. Exit if we're not the last worker,
618 * or if someone modified our affinity.
619 */
620 if (last_timeout && (exit_mask || acct->nr_workers > 1)) {
621 acct->nr_workers--;
622 raw_spin_unlock(&wq->lock);
623 __set_current_state(TASK_RUNNING);
624 break;
625 }
626 last_timeout = false;
627 __io_worker_idle(wq, worker);
628 raw_spin_unlock(&wq->lock);
629 if (io_run_task_work())
630 continue;
631 ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
632 if (signal_pending(current)) {
633 struct ksignal ksig;
634
635 if (!get_signal(&ksig))
636 continue;
637 break;
638 }
639 if (!ret) {
640 last_timeout = true;
641 exit_mask = !cpumask_test_cpu(raw_smp_processor_id(),
642 wq->cpu_mask);
643 }
644 }
645
646 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
647 io_worker_handle_work(worker);
648
649 io_worker_exit(worker);
650 return 0;
651 }
652
653 /*
654 * Called when a worker is scheduled in. Mark us as currently running.
655 */
656 void io_wq_worker_running(struct task_struct *tsk)
657 {
658 struct io_worker *worker = tsk->worker_private;
659
660 if (!worker)
661 return;
662 if (!(worker->flags & IO_WORKER_F_UP))
663 return;
664 if (worker->flags & IO_WORKER_F_RUNNING)
665 return;
666 worker->flags |= IO_WORKER_F_RUNNING;
667 io_wq_inc_running(worker);
668 }
669
670 /*
671 * Called when worker is going to sleep. If there are no workers currently
672 * running and we have work pending, wake up a free one or create a new one.
673 */
674 void io_wq_worker_sleeping(struct task_struct *tsk)
675 {
676 struct io_worker *worker = tsk->worker_private;
677
678 if (!worker)
679 return;
680 if (!(worker->flags & IO_WORKER_F_UP))
681 return;
682 if (!(worker->flags & IO_WORKER_F_RUNNING))
683 return;
684
685 worker->flags &= ~IO_WORKER_F_RUNNING;
686 io_wq_dec_running(worker);
687 }
688
689 static void io_init_new_worker(struct io_wq *wq, struct io_worker *worker,
690 struct task_struct *tsk)
691 {
692 tsk->worker_private = worker;
693 worker->task = tsk;
694 set_cpus_allowed_ptr(tsk, wq->cpu_mask);
695
696 raw_spin_lock(&wq->lock);
697 hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list);
698 list_add_tail_rcu(&worker->all_list, &wq->all_list);
699 worker->flags |= IO_WORKER_F_FREE;
700 raw_spin_unlock(&wq->lock);
701 wake_up_new_task(tsk);
702 }
703
704 static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
705 {
706 return true;
707 }
708
709 static inline bool io_should_retry_thread(long err)
710 {
711 /*
712 * Prevent perpetual task_work retry, if the task (or its group) is
713 * exiting.
714 */
715 if (fatal_signal_pending(current))
716 return false;
717
718 switch (err) {
719 case -EAGAIN:
720 case -ERESTARTSYS:
721 case -ERESTARTNOINTR:
722 case -ERESTARTNOHAND:
723 return true;
724 default:
725 return false;
726 }
727 }
728
729 static void create_worker_cont(struct callback_head *cb)
730 {
731 struct io_worker *worker;
732 struct task_struct *tsk;
733 struct io_wq *wq;
734
735 worker = container_of(cb, struct io_worker, create_work);
736 clear_bit_unlock(0, &worker->create_state);
737 wq = worker->wq;
738 tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
739 if (!IS_ERR(tsk)) {
740 io_init_new_worker(wq, worker, tsk);
741 io_worker_release(worker);
742 return;
743 } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
744 struct io_wq_acct *acct = io_wq_get_acct(worker);
745
746 atomic_dec(&acct->nr_running);
747 raw_spin_lock(&wq->lock);
748 acct->nr_workers--;
749 if (!acct->nr_workers) {
750 struct io_cb_cancel_data match = {
751 .fn = io_wq_work_match_all,
752 .cancel_all = true,
753 };
754
755 raw_spin_unlock(&wq->lock);
756 while (io_acct_cancel_pending_work(wq, acct, &match))
757 ;
758 } else {
759 raw_spin_unlock(&wq->lock);
760 }
761 io_worker_ref_put(wq);
762 kfree(worker);
763 return;
764 }
765
766 /* re-create attempts grab a new worker ref, drop the existing one */
767 io_worker_release(worker);
768 schedule_work(&worker->work);
769 }
770
771 static void io_workqueue_create(struct work_struct *work)
772 {
773 struct io_worker *worker = container_of(work, struct io_worker, work);
774 struct io_wq_acct *acct = io_wq_get_acct(worker);
775
776 if (!io_queue_worker_create(worker, acct, create_worker_cont))
777 kfree(worker);
778 }
779
780 static bool create_io_worker(struct io_wq *wq, int index)
781 {
782 struct io_wq_acct *acct = &wq->acct[index];
783 struct io_worker *worker;
784 struct task_struct *tsk;
785
786 __set_current_state(TASK_RUNNING);
787
788 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
789 if (!worker) {
790 fail:
791 atomic_dec(&acct->nr_running);
792 raw_spin_lock(&wq->lock);
793 acct->nr_workers--;
794 raw_spin_unlock(&wq->lock);
795 io_worker_ref_put(wq);
796 return false;
797 }
798
799 refcount_set(&worker->ref, 1);
800 worker->wq = wq;
801 raw_spin_lock_init(&worker->lock);
802 init_completion(&worker->ref_done);
803
804 if (index == IO_WQ_ACCT_BOUND)
805 worker->flags |= IO_WORKER_F_BOUND;
806
807 tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
808 if (!IS_ERR(tsk)) {
809 io_init_new_worker(wq, worker, tsk);
810 } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
811 kfree(worker);
812 goto fail;
813 } else {
814 INIT_WORK(&worker->work, io_workqueue_create);
815 schedule_work(&worker->work);
816 }
817
818 return true;
819 }
820
821 /*
822 * Iterate the passed in list and call the specific function for each
823 * worker that isn't exiting
824 */
825 static bool io_wq_for_each_worker(struct io_wq *wq,
826 bool (*func)(struct io_worker *, void *),
827 void *data)
828 {
829 struct io_worker *worker;
830 bool ret = false;
831
832 list_for_each_entry_rcu(worker, &wq->all_list, all_list) {
833 if (io_worker_get(worker)) {
834 /* no task if node is/was offline */
835 if (worker->task)
836 ret = func(worker, data);
837 io_worker_release(worker);
838 if (ret)
839 break;
840 }
841 }
842
843 return ret;
844 }
845
846 static bool io_wq_worker_wake(struct io_worker *worker, void *data)
847 {
848 __set_notify_signal(worker->task);
849 wake_up_process(worker->task);
850 return false;
851 }
852
853 static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq)
854 {
855 do {
856 work->flags |= IO_WQ_WORK_CANCEL;
857 wq->do_work(work);
858 work = wq->free_work(work);
859 } while (work);
860 }
861
862 static void io_wq_insert_work(struct io_wq *wq, struct io_wq_work *work)
863 {
864 struct io_wq_acct *acct = io_work_get_acct(wq, work);
865 unsigned int hash;
866 struct io_wq_work *tail;
867
868 if (!io_wq_is_hashed(work)) {
869 append:
870 wq_list_add_tail(&work->list, &acct->work_list);
871 return;
872 }
873
874 hash = io_get_work_hash(work);
875 tail = wq->hash_tail[hash];
876 wq->hash_tail[hash] = work;
877 if (!tail)
878 goto append;
879
880 wq_list_add_after(&work->list, &tail->list, &acct->work_list);
881 }
882
883 static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
884 {
885 return work == data;
886 }
887
888 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
889 {
890 struct io_wq_acct *acct = io_work_get_acct(wq, work);
891 struct io_cb_cancel_data match;
892 unsigned work_flags = work->flags;
893 bool do_create;
894
895 /*
896 * If io-wq is exiting for this task, or if the request has explicitly
897 * been marked as one that should not get executed, cancel it here.
898 */
899 if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
900 (work->flags & IO_WQ_WORK_CANCEL)) {
901 io_run_cancel(work, wq);
902 return;
903 }
904
905 raw_spin_lock(&acct->lock);
906 io_wq_insert_work(wq, work);
907 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
908 raw_spin_unlock(&acct->lock);
909
910 raw_spin_lock(&wq->lock);
911 rcu_read_lock();
912 do_create = !io_wq_activate_free_worker(wq, acct);
913 rcu_read_unlock();
914
915 raw_spin_unlock(&wq->lock);
916
917 if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
918 !atomic_read(&acct->nr_running))) {
919 bool did_create;
920
921 did_create = io_wq_create_worker(wq, acct);
922 if (likely(did_create))
923 return;
924
925 raw_spin_lock(&wq->lock);
926 if (acct->nr_workers) {
927 raw_spin_unlock(&wq->lock);
928 return;
929 }
930 raw_spin_unlock(&wq->lock);
931
932 /* fatal condition, failed to create the first worker */
933 match.fn = io_wq_work_match_item,
934 match.data = work,
935 match.cancel_all = false,
936
937 io_acct_cancel_pending_work(wq, acct, &match);
938 }
939 }
940
941 /*
942 * Work items that hash to the same value will not be done in parallel.
943 * Used to limit concurrent writes, generally hashed by inode.
944 */
945 void io_wq_hash_work(struct io_wq_work *work, void *val)
946 {
947 unsigned int bit;
948
949 bit = hash_ptr(val, IO_WQ_HASH_ORDER);
950 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
951 }
952
953 static bool __io_wq_worker_cancel(struct io_worker *worker,
954 struct io_cb_cancel_data *match,
955 struct io_wq_work *work)
956 {
957 if (work && match->fn(work, match->data)) {
958 work->flags |= IO_WQ_WORK_CANCEL;
959 __set_notify_signal(worker->task);
960 return true;
961 }
962
963 return false;
964 }
965
966 static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
967 {
968 struct io_cb_cancel_data *match = data;
969
970 /*
971 * Hold the lock to avoid ->cur_work going out of scope, caller
972 * may dereference the passed in work.
973 */
974 raw_spin_lock(&worker->lock);
975 if (__io_wq_worker_cancel(worker, match, worker->cur_work) ||
976 __io_wq_worker_cancel(worker, match, worker->next_work))
977 match->nr_running++;
978 raw_spin_unlock(&worker->lock);
979
980 return match->nr_running && !match->cancel_all;
981 }
982
983 static inline void io_wq_remove_pending(struct io_wq *wq,
984 struct io_wq_work *work,
985 struct io_wq_work_node *prev)
986 {
987 struct io_wq_acct *acct = io_work_get_acct(wq, work);
988 unsigned int hash = io_get_work_hash(work);
989 struct io_wq_work *prev_work = NULL;
990
991 if (io_wq_is_hashed(work) && work == wq->hash_tail[hash]) {
992 if (prev)
993 prev_work = container_of(prev, struct io_wq_work, list);
994 if (prev_work && io_get_work_hash(prev_work) == hash)
995 wq->hash_tail[hash] = prev_work;
996 else
997 wq->hash_tail[hash] = NULL;
998 }
999 wq_list_del(&acct->work_list, &work->list, prev);
1000 }
1001
1002 static bool io_acct_cancel_pending_work(struct io_wq *wq,
1003 struct io_wq_acct *acct,
1004 struct io_cb_cancel_data *match)
1005 {
1006 struct io_wq_work_node *node, *prev;
1007 struct io_wq_work *work;
1008
1009 raw_spin_lock(&acct->lock);
1010 wq_list_for_each(node, prev, &acct->work_list) {
1011 work = container_of(node, struct io_wq_work, list);
1012 if (!match->fn(work, match->data))
1013 continue;
1014 io_wq_remove_pending(wq, work, prev);
1015 raw_spin_unlock(&acct->lock);
1016 io_run_cancel(work, wq);
1017 match->nr_pending++;
1018 /* not safe to continue after unlock */
1019 return true;
1020 }
1021 raw_spin_unlock(&acct->lock);
1022
1023 return false;
1024 }
1025
1026 static void io_wq_cancel_pending_work(struct io_wq *wq,
1027 struct io_cb_cancel_data *match)
1028 {
1029 int i;
1030 retry:
1031 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1032 struct io_wq_acct *acct = io_get_acct(wq, i == 0);
1033
1034 if (io_acct_cancel_pending_work(wq, acct, match)) {
1035 if (match->cancel_all)
1036 goto retry;
1037 break;
1038 }
1039 }
1040 }
1041
1042 static void io_wq_cancel_running_work(struct io_wq *wq,
1043 struct io_cb_cancel_data *match)
1044 {
1045 rcu_read_lock();
1046 io_wq_for_each_worker(wq, io_wq_worker_cancel, match);
1047 rcu_read_unlock();
1048 }
1049
1050 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
1051 void *data, bool cancel_all)
1052 {
1053 struct io_cb_cancel_data match = {
1054 .fn = cancel,
1055 .data = data,
1056 .cancel_all = cancel_all,
1057 };
1058
1059 /*
1060 * First check pending list, if we're lucky we can just remove it
1061 * from there. CANCEL_OK means that the work is returned as-new,
1062 * no completion will be posted for it.
1063 *
1064 * Then check if a free (going busy) or busy worker has the work
1065 * currently running. If we find it there, we'll return CANCEL_RUNNING
1066 * as an indication that we attempt to signal cancellation. The
1067 * completion will run normally in this case.
1068 *
1069 * Do both of these while holding the wq->lock, to ensure that
1070 * we'll find a work item regardless of state.
1071 */
1072 io_wq_cancel_pending_work(wq, &match);
1073 if (match.nr_pending && !match.cancel_all)
1074 return IO_WQ_CANCEL_OK;
1075
1076 raw_spin_lock(&wq->lock);
1077 io_wq_cancel_running_work(wq, &match);
1078 raw_spin_unlock(&wq->lock);
1079 if (match.nr_running && !match.cancel_all)
1080 return IO_WQ_CANCEL_RUNNING;
1081
1082 if (match.nr_running)
1083 return IO_WQ_CANCEL_RUNNING;
1084 if (match.nr_pending)
1085 return IO_WQ_CANCEL_OK;
1086 return IO_WQ_CANCEL_NOTFOUND;
1087 }
1088
1089 static int io_wq_hash_wake(struct wait_queue_entry *wait, unsigned mode,
1090 int sync, void *key)
1091 {
1092 struct io_wq *wq = container_of(wait, struct io_wq, wait);
1093 int i;
1094
1095 list_del_init(&wait->entry);
1096
1097 rcu_read_lock();
1098 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1099 struct io_wq_acct *acct = &wq->acct[i];
1100
1101 if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
1102 io_wq_activate_free_worker(wq, acct);
1103 }
1104 rcu_read_unlock();
1105 return 1;
1106 }
1107
1108 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
1109 {
1110 int ret, i;
1111 struct io_wq *wq;
1112
1113 if (WARN_ON_ONCE(!data->free_work || !data->do_work))
1114 return ERR_PTR(-EINVAL);
1115 if (WARN_ON_ONCE(!bounded))
1116 return ERR_PTR(-EINVAL);
1117
1118 wq = kzalloc(sizeof(struct io_wq), GFP_KERNEL);
1119 if (!wq)
1120 return ERR_PTR(-ENOMEM);
1121 ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1122 if (ret)
1123 goto err_wq;
1124
1125 refcount_inc(&data->hash->refs);
1126 wq->hash = data->hash;
1127 wq->free_work = data->free_work;
1128 wq->do_work = data->do_work;
1129
1130 ret = -ENOMEM;
1131
1132 if (!alloc_cpumask_var(&wq->cpu_mask, GFP_KERNEL))
1133 goto err;
1134 cpumask_copy(wq->cpu_mask, cpu_possible_mask);
1135 wq->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
1136 wq->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1137 task_rlimit(current, RLIMIT_NPROC);
1138 INIT_LIST_HEAD(&wq->wait.entry);
1139 wq->wait.func = io_wq_hash_wake;
1140 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1141 struct io_wq_acct *acct = &wq->acct[i];
1142
1143 acct->index = i;
1144 atomic_set(&acct->nr_running, 0);
1145 INIT_WQ_LIST(&acct->work_list);
1146 raw_spin_lock_init(&acct->lock);
1147 }
1148
1149 raw_spin_lock_init(&wq->lock);
1150 INIT_HLIST_NULLS_HEAD(&wq->free_list, 0);
1151 INIT_LIST_HEAD(&wq->all_list);
1152
1153 wq->task = get_task_struct(data->task);
1154 atomic_set(&wq->worker_refs, 1);
1155 init_completion(&wq->worker_done);
1156 return wq;
1157 err:
1158 io_wq_put_hash(data->hash);
1159 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1160
1161 free_cpumask_var(wq->cpu_mask);
1162 err_wq:
1163 kfree(wq);
1164 return ERR_PTR(ret);
1165 }
1166
1167 static bool io_task_work_match(struct callback_head *cb, void *data)
1168 {
1169 struct io_worker *worker;
1170
1171 if (cb->func != create_worker_cb && cb->func != create_worker_cont)
1172 return false;
1173 worker = container_of(cb, struct io_worker, create_work);
1174 return worker->wq == data;
1175 }
1176
1177 void io_wq_exit_start(struct io_wq *wq)
1178 {
1179 set_bit(IO_WQ_BIT_EXIT, &wq->state);
1180 }
1181
1182 static void io_wq_cancel_tw_create(struct io_wq *wq)
1183 {
1184 struct callback_head *cb;
1185
1186 while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
1187 struct io_worker *worker;
1188
1189 worker = container_of(cb, struct io_worker, create_work);
1190 io_worker_cancel_cb(worker);
1191 /*
1192 * Only the worker continuation helper has worker allocated and
1193 * hence needs freeing.
1194 */
1195 if (cb->func == create_worker_cont)
1196 kfree(worker);
1197 }
1198 }
1199
1200 static void io_wq_exit_workers(struct io_wq *wq)
1201 {
1202 if (!wq->task)
1203 return;
1204
1205 io_wq_cancel_tw_create(wq);
1206
1207 rcu_read_lock();
1208 io_wq_for_each_worker(wq, io_wq_worker_wake, NULL);
1209 rcu_read_unlock();
1210 io_worker_ref_put(wq);
1211 wait_for_completion(&wq->worker_done);
1212
1213 spin_lock_irq(&wq->hash->wait.lock);
1214 list_del_init(&wq->wait.entry);
1215 spin_unlock_irq(&wq->hash->wait.lock);
1216
1217 put_task_struct(wq->task);
1218 wq->task = NULL;
1219 }
1220
1221 static void io_wq_destroy(struct io_wq *wq)
1222 {
1223 struct io_cb_cancel_data match = {
1224 .fn = io_wq_work_match_all,
1225 .cancel_all = true,
1226 };
1227
1228 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1229 io_wq_cancel_pending_work(wq, &match);
1230 free_cpumask_var(wq->cpu_mask);
1231 io_wq_put_hash(wq->hash);
1232 kfree(wq);
1233 }
1234
1235 void io_wq_put_and_exit(struct io_wq *wq)
1236 {
1237 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
1238
1239 io_wq_exit_workers(wq);
1240 io_wq_destroy(wq);
1241 }
1242
1243 struct online_data {
1244 unsigned int cpu;
1245 bool online;
1246 };
1247
1248 static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1249 {
1250 struct online_data *od = data;
1251
1252 if (od->online)
1253 cpumask_set_cpu(od->cpu, worker->wq->cpu_mask);
1254 else
1255 cpumask_clear_cpu(od->cpu, worker->wq->cpu_mask);
1256 return false;
1257 }
1258
1259 static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
1260 {
1261 struct online_data od = {
1262 .cpu = cpu,
1263 .online = online
1264 };
1265
1266 rcu_read_lock();
1267 io_wq_for_each_worker(wq, io_wq_worker_affinity, &od);
1268 rcu_read_unlock();
1269 return 0;
1270 }
1271
1272 static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1273 {
1274 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1275
1276 return __io_wq_cpu_online(wq, cpu, true);
1277 }
1278
1279 static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
1280 {
1281 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1282
1283 return __io_wq_cpu_online(wq, cpu, false);
1284 }
1285
1286 int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
1287 {
1288 rcu_read_lock();
1289 if (mask)
1290 cpumask_copy(wq->cpu_mask, mask);
1291 else
1292 cpumask_copy(wq->cpu_mask, cpu_possible_mask);
1293 rcu_read_unlock();
1294
1295 return 0;
1296 }
1297
1298 /*
1299 * Set max number of unbounded workers, returns old value. If new_count is 0,
1300 * then just return the old value.
1301 */
1302 int io_wq_max_workers(struct io_wq *wq, int *new_count)
1303 {
1304 struct io_wq_acct *acct;
1305 int prev[IO_WQ_ACCT_NR];
1306 int i;
1307
1308 BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
1309 BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
1310 BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2);
1311
1312 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1313 if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
1314 new_count[i] = task_rlimit(current, RLIMIT_NPROC);
1315 }
1316
1317 for (i = 0; i < IO_WQ_ACCT_NR; i++)
1318 prev[i] = 0;
1319
1320 rcu_read_lock();
1321
1322 raw_spin_lock(&wq->lock);
1323 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1324 acct = &wq->acct[i];
1325 prev[i] = max_t(int, acct->max_workers, prev[i]);
1326 if (new_count[i])
1327 acct->max_workers = new_count[i];
1328 }
1329 raw_spin_unlock(&wq->lock);
1330 rcu_read_unlock();
1331
1332 for (i = 0; i < IO_WQ_ACCT_NR; i++)
1333 new_count[i] = prev[i];
1334
1335 return 0;
1336 }
1337
1338 static __init int io_wq_init(void)
1339 {
1340 int ret;
1341
1342 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
1343 io_wq_cpu_online, io_wq_cpu_offline);
1344 if (ret < 0)
1345 return ret;
1346 io_wq_online = ret;
1347 return 0;
1348 }
1349 subsys_initcall(io_wq_init);