]> git.ipfire.org Git - people/ms/linux.git/blame - fs/io-wq.c
Merge tag 'dt-bindings-aspeed-5.20' of git://git.kernel.org/pub/scm/linux/kernel...
[people/ms/linux.git] / fs / io-wq.c
CommitLineData
771b53d0
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Basic worker thread pool for io_uring
4 *
5 * Copyright (C) 2019 Jens Axboe
6 *
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/errno.h>
11#include <linux/sched/signal.h>
771b53d0
JA
12#include <linux/percpu.h>
13#include <linux/slab.h>
771b53d0 14#include <linux/rculist_nulls.h>
43c01fbe 15#include <linux/cpu.h>
355f841a 16#include <linux/task_work.h>
5bd2182d 17#include <linux/audit.h>
dd47c104 18#include <uapi/linux/io_uring.h>
771b53d0
JA
19
20#include "io-wq.h"
21
22#define WORKER_IDLE_TIMEOUT (5 * HZ)
23
24enum {
25 IO_WORKER_F_UP = 1, /* up and active */
26 IO_WORKER_F_RUNNING = 2, /* account as running */
27 IO_WORKER_F_FREE = 4, /* worker on free list */
05c5f4ee 28 IO_WORKER_F_BOUND = 8, /* is doing bounded work */
771b53d0
JA
29};
30
31enum {
32 IO_WQ_BIT_EXIT = 0, /* wq exiting */
771b53d0
JA
33};
34
35enum {
f95dc207 36 IO_ACCT_STALLED_BIT = 0, /* stalled on hash */
771b53d0
JA
37};
38
39/*
40 * One for each thread in a wqe pool
41 */
42struct io_worker {
43 refcount_t ref;
44 unsigned flags;
45 struct hlist_nulls_node nulls_node;
e61df66c 46 struct list_head all_list;
771b53d0 47 struct task_struct *task;
771b53d0 48 struct io_wqe *wqe;
36c2f922 49
771b53d0 50 struct io_wq_work *cur_work;
361aee45 51 struct io_wq_work *next_work;
081b5820 52 raw_spinlock_t lock;
771b53d0 53
eb2de941
JA
54 struct completion ref_done;
55
d3e9f732
JA
56 unsigned long create_state;
57 struct callback_head create_work;
58 int create_index;
59
3146cba9
JA
60 union {
61 struct rcu_head rcu;
62 struct work_struct work;
63 };
771b53d0
JA
64};
65
771b53d0
JA
66#if BITS_PER_LONG == 64
67#define IO_WQ_HASH_ORDER 6
68#else
69#define IO_WQ_HASH_ORDER 5
70#endif
71
86f3cd1b
PB
72#define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
73
c5def4ab
JA
74struct io_wqe_acct {
75 unsigned nr_workers;
76 unsigned max_workers;
685fe7fe 77 int index;
c5def4ab 78 atomic_t nr_running;
42abc95f 79 raw_spinlock_t lock;
f95dc207
JA
80 struct io_wq_work_list work_list;
81 unsigned long flags;
c5def4ab
JA
82};
83
84enum {
85 IO_WQ_ACCT_BOUND,
86 IO_WQ_ACCT_UNBOUND,
f95dc207 87 IO_WQ_ACCT_NR,
c5def4ab
JA
88};
89
771b53d0
JA
90/*
91 * Per-node worker thread pool
92 */
93struct io_wqe {
f95dc207 94 raw_spinlock_t lock;
86127bb1 95 struct io_wqe_acct acct[IO_WQ_ACCT_NR];
771b53d0
JA
96
97 int node;
771b53d0 98
021d1cdd 99 struct hlist_nulls_head free_list;
e61df66c 100 struct list_head all_list;
771b53d0 101
e941894e
JA
102 struct wait_queue_entry wait;
103
771b53d0 104 struct io_wq *wq;
86f3cd1b 105 struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
0e03496d
JA
106
107 cpumask_var_t cpu_mask;
771b53d0
JA
108};
109
110/*
111 * Per io_wq state
112 */
113struct io_wq {
771b53d0 114 unsigned long state;
771b53d0 115
e9fd9396 116 free_work_fn *free_work;
f5fa38c5 117 io_wq_work_fn *do_work;
7d723065 118
e941894e
JA
119 struct io_wq_hash *hash;
120
fb3a1f6c
JA
121 atomic_t worker_refs;
122 struct completion worker_done;
123
43c01fbe 124 struct hlist_node cpuhp_node;
3bfe6106 125
685fe7fe 126 struct task_struct *task;
c7f405d6
PB
127
128 struct io_wqe *wqes[];
771b53d0
JA
129};
130
43c01fbe
JA
131static enum cpuhp_state io_wq_online;
132
f0127254
JA
133struct io_cb_cancel_data {
134 work_cancel_fn *fn;
135 void *data;
136 int nr_running;
137 int nr_pending;
138 bool cancel_all;
139};
140
3146cba9 141static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
83d6c393 142static void io_wqe_dec_running(struct io_worker *worker);
3146cba9
JA
143static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
144 struct io_wqe_acct *acct,
145 struct io_cb_cancel_data *match);
1d5f5ea7 146static void create_worker_cb(struct callback_head *cb);
71a85387 147static void io_wq_cancel_tw_create(struct io_wq *wq);
f0127254 148
771b53d0
JA
149static bool io_worker_get(struct io_worker *worker)
150{
151 return refcount_inc_not_zero(&worker->ref);
152}
153
154static void io_worker_release(struct io_worker *worker)
155{
156 if (refcount_dec_and_test(&worker->ref))
eb2de941 157 complete(&worker->ref_done);
771b53d0
JA
158}
159
8418f22a
PB
160static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound)
161{
162 return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
163}
164
c5def4ab
JA
165static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
166 struct io_wq_work *work)
167{
8418f22a 168 return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND));
c5def4ab
JA
169}
170
958234d5 171static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
c5def4ab 172{
8418f22a 173 return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND);
c5def4ab
JA
174}
175
685fe7fe
JA
176static void io_worker_ref_put(struct io_wq *wq)
177{
178 if (atomic_dec_and_test(&wq->worker_refs))
179 complete(&wq->worker_done);
180}
181
1d5f5ea7
PB
182static void io_worker_cancel_cb(struct io_worker *worker)
183{
184 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
185 struct io_wqe *wqe = worker->wqe;
186 struct io_wq *wq = wqe->wq;
187
188 atomic_dec(&acct->nr_running);
189 raw_spin_lock(&worker->wqe->lock);
190 acct->nr_workers--;
191 raw_spin_unlock(&worker->wqe->lock);
192 io_worker_ref_put(wq);
193 clear_bit_unlock(0, &worker->create_state);
194 io_worker_release(worker);
195}
196
197static bool io_task_worker_match(struct callback_head *cb, void *data)
198{
199 struct io_worker *worker;
200
201 if (cb->func != create_worker_cb)
202 return false;
203 worker = container_of(cb, struct io_worker, create_work);
204 return worker == data;
205}
206
771b53d0
JA
207static void io_worker_exit(struct io_worker *worker)
208{
209 struct io_wqe *wqe = worker->wqe;
1d5f5ea7 210 struct io_wq *wq = wqe->wq;
771b53d0 211
1d5f5ea7
PB
212 while (1) {
213 struct callback_head *cb = task_work_cancel_match(wq->task,
214 io_task_worker_match, worker);
215
216 if (!cb)
217 break;
218 io_worker_cancel_cb(worker);
219 }
771b53d0 220
c907e52c 221 io_worker_release(worker);
eb2de941 222 wait_for_completion(&worker->ref_done);
771b53d0 223
a9a4aa9f 224 raw_spin_lock(&wqe->lock);
83d6c393 225 if (worker->flags & IO_WORKER_F_FREE)
bf1daa4b 226 hlist_nulls_del_rcu(&worker->nulls_node);
e61df66c 227 list_del_rcu(&worker->all_list);
42abc95f 228 raw_spin_unlock(&wqe->lock);
83d6c393
JA
229 io_wqe_dec_running(worker);
230 worker->flags = 0;
42abc95f 231 preempt_disable();
83d6c393
JA
232 current->flags &= ~PF_IO_WORKER;
233 preempt_enable();
771b53d0 234
364b05fd 235 kfree_rcu(worker, rcu);
685fe7fe 236 io_worker_ref_put(wqe->wq);
46fe18b1 237 do_exit(0);
771b53d0
JA
238}
239
f95dc207 240static inline bool io_acct_run_queue(struct io_wqe_acct *acct)
c5def4ab 241{
e13fb1fe
HX
242 bool ret = false;
243
244 raw_spin_lock(&acct->lock);
f95dc207
JA
245 if (!wq_list_empty(&acct->work_list) &&
246 !test_bit(IO_ACCT_STALLED_BIT, &acct->flags))
e13fb1fe
HX
247 ret = true;
248 raw_spin_unlock(&acct->lock);
249
250 return ret;
c5def4ab
JA
251}
252
253/*
254 * Check head of free list for an available worker. If one isn't available,
685fe7fe 255 * caller must create one.
c5def4ab 256 */
f95dc207
JA
257static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
258 struct io_wqe_acct *acct)
c5def4ab
JA
259 __must_hold(RCU)
260{
261 struct hlist_nulls_node *n;
262 struct io_worker *worker;
263
83d6c393
JA
264 /*
265 * Iterate free_list and see if we can find an idle worker to
266 * activate. If a given worker is on the free_list but in the process
267 * of exiting, keep trying.
268 */
269 hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) {
270 if (!io_worker_get(worker))
271 continue;
f95dc207
JA
272 if (io_wqe_get_acct(worker) != acct) {
273 io_worker_release(worker);
274 continue;
275 }
83d6c393
JA
276 if (wake_up_process(worker->task)) {
277 io_worker_release(worker);
278 return true;
279 }
c5def4ab 280 io_worker_release(worker);
c5def4ab
JA
281 }
282
283 return false;
284}
285
286/*
287 * We need a worker. If we find a free one, we're good. If not, and we're
685fe7fe 288 * below the max number of workers, create one.
c5def4ab 289 */
3146cba9 290static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
c5def4ab 291{
c5def4ab
JA
292 /*
293 * Most likely an attempt to queue unbounded work on an io_wq that
294 * wasn't setup with any unbounded workers.
295 */
e6ab8991
PB
296 if (unlikely(!acct->max_workers))
297 pr_warn_once("io-wq is not configured for unbound workers");
c5def4ab 298
94ffb0a2 299 raw_spin_lock(&wqe->lock);
bc369921 300 if (acct->nr_workers >= acct->max_workers) {
7a842fb5
HX
301 raw_spin_unlock(&wqe->lock);
302 return true;
94ffb0a2 303 }
7a842fb5 304 acct->nr_workers++;
94ffb0a2 305 raw_spin_unlock(&wqe->lock);
7a842fb5
HX
306 atomic_inc(&acct->nr_running);
307 atomic_inc(&wqe->wq->worker_refs);
308 return create_io_worker(wqe->wq, wqe, acct->index);
c5def4ab
JA
309}
310
958234d5 311static void io_wqe_inc_running(struct io_worker *worker)
c5def4ab 312{
958234d5 313 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
c5def4ab
JA
314
315 atomic_inc(&acct->nr_running);
316}
317
685fe7fe
JA
318static void create_worker_cb(struct callback_head *cb)
319{
d3e9f732 320 struct io_worker *worker;
685fe7fe 321 struct io_wq *wq;
21698274
HX
322 struct io_wqe *wqe;
323 struct io_wqe_acct *acct;
05c5f4ee 324 bool do_create = false;
685fe7fe 325
d3e9f732
JA
326 worker = container_of(cb, struct io_worker, create_work);
327 wqe = worker->wqe;
21698274 328 wq = wqe->wq;
d3e9f732 329 acct = &wqe->acct[worker->create_index];
a9a4aa9f 330 raw_spin_lock(&wqe->lock);
49e7f0c7 331 if (acct->nr_workers < acct->max_workers) {
21698274 332 acct->nr_workers++;
49e7f0c7
HX
333 do_create = true;
334 }
a9a4aa9f 335 raw_spin_unlock(&wqe->lock);
49e7f0c7 336 if (do_create) {
05c5f4ee 337 create_io_worker(wq, wqe, worker->create_index);
49e7f0c7
HX
338 } else {
339 atomic_dec(&acct->nr_running);
340 io_worker_ref_put(wq);
341 }
d3e9f732
JA
342 clear_bit_unlock(0, &worker->create_state);
343 io_worker_release(worker);
685fe7fe
JA
344}
345
3146cba9
JA
346static bool io_queue_worker_create(struct io_worker *worker,
347 struct io_wqe_acct *acct,
348 task_work_func_t func)
685fe7fe 349{
3146cba9 350 struct io_wqe *wqe = worker->wqe;
685fe7fe
JA
351 struct io_wq *wq = wqe->wq;
352
353 /* raced with exit, just ignore create call */
354 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
355 goto fail;
d3e9f732
JA
356 if (!io_worker_get(worker))
357 goto fail;
358 /*
359 * create_state manages ownership of create_work/index. We should
360 * only need one entry per worker, as the worker going to sleep
361 * will trigger the condition, and waking will clear it once it
362 * runs the task_work.
363 */
364 if (test_bit(0, &worker->create_state) ||
365 test_and_set_bit_lock(0, &worker->create_state))
366 goto fail_release;
685fe7fe 367
71a85387 368 atomic_inc(&wq->worker_refs);
3146cba9 369 init_task_work(&worker->create_work, func);
d3e9f732 370 worker->create_index = acct->index;
71a85387
JA
371 if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
372 /*
373 * EXIT may have been set after checking it above, check after
374 * adding the task_work and remove any creation item if it is
375 * now set. wq exit does that too, but we can have added this
376 * work item after we canceled in io_wq_exit_workers().
377 */
378 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
379 io_wq_cancel_tw_create(wq);
380 io_worker_ref_put(wq);
3146cba9 381 return true;
71a85387
JA
382 }
383 io_worker_ref_put(wq);
d3e9f732
JA
384 clear_bit_unlock(0, &worker->create_state);
385fail_release:
386 io_worker_release(worker);
685fe7fe
JA
387fail:
388 atomic_dec(&acct->nr_running);
389 io_worker_ref_put(wq);
3146cba9 390 return false;
685fe7fe
JA
391}
392
958234d5 393static void io_wqe_dec_running(struct io_worker *worker)
c5def4ab 394{
958234d5
JA
395 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
396 struct io_wqe *wqe = worker->wqe;
c5def4ab 397
685fe7fe
JA
398 if (!(worker->flags & IO_WORKER_F_UP))
399 return;
400
42abc95f
HX
401 if (!atomic_dec_and_test(&acct->nr_running))
402 return;
e13fb1fe 403 if (!io_acct_run_queue(acct))
42abc95f 404 return;
42abc95f 405
42abc95f
HX
406 atomic_inc(&acct->nr_running);
407 atomic_inc(&wqe->wq->worker_refs);
408 io_queue_worker_create(worker, acct, create_worker_cb);
c5def4ab
JA
409}
410
771b53d0
JA
411/*
412 * Worker will start processing some work. Move it to the busy list, if
413 * it's currently on the freelist
414 */
ea6e7cee 415static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker)
771b53d0
JA
416{
417 if (worker->flags & IO_WORKER_F_FREE) {
418 worker->flags &= ~IO_WORKER_F_FREE;
42abc95f 419 raw_spin_lock(&wqe->lock);
771b53d0 420 hlist_nulls_del_init_rcu(&worker->nulls_node);
42abc95f 421 raw_spin_unlock(&wqe->lock);
771b53d0 422 }
771b53d0
JA
423}
424
425/*
426 * No work, worker going to sleep. Move to freelist, and unuse mm if we
427 * have one attached. Dropping the mm may potentially sleep, so we drop
428 * the lock in that case and return success. Since the caller has to
429 * retry the loop in that case (we changed task state), we don't regrab
430 * the lock if we return success.
431 */
c6d77d92 432static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
771b53d0
JA
433 __must_hold(wqe->lock)
434{
435 if (!(worker->flags & IO_WORKER_F_FREE)) {
436 worker->flags |= IO_WORKER_F_FREE;
021d1cdd 437 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
771b53d0 438 }
771b53d0
JA
439}
440
60cf46ae
PB
441static inline unsigned int io_get_work_hash(struct io_wq_work *work)
442{
443 return work->flags >> IO_WQ_HASH_SHIFT;
444}
445
d3e3c102 446static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
e941894e
JA
447{
448 struct io_wq *wq = wqe->wq;
d3e3c102 449 bool ret = false;
e941894e 450
08bdbd39 451 spin_lock_irq(&wq->hash->wait.lock);
e941894e
JA
452 if (list_empty(&wqe->wait.entry)) {
453 __add_wait_queue(&wq->hash->wait, &wqe->wait);
454 if (!test_bit(hash, &wq->hash->map)) {
455 __set_current_state(TASK_RUNNING);
456 list_del_init(&wqe->wait.entry);
d3e3c102 457 ret = true;
e941894e
JA
458 }
459 }
08bdbd39 460 spin_unlock_irq(&wq->hash->wait.lock);
d3e3c102 461 return ret;
e941894e
JA
462}
463
f95dc207 464static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
0242f642 465 struct io_worker *worker)
42abc95f 466 __must_hold(acct->lock)
771b53d0 467{
6206f0e1 468 struct io_wq_work_node *node, *prev;
86f3cd1b 469 struct io_wq_work *work, *tail;
e941894e 470 unsigned int stall_hash = -1U;
f95dc207 471 struct io_wqe *wqe = worker->wqe;
771b53d0 472
f95dc207 473 wq_list_for_each(node, prev, &acct->work_list) {
e941894e
JA
474 unsigned int hash;
475
6206f0e1
JA
476 work = container_of(node, struct io_wq_work, list);
477
771b53d0 478 /* not hashed, can run anytime */
8766dd51 479 if (!io_wq_is_hashed(work)) {
f95dc207 480 wq_list_del(&acct->work_list, node, prev);
771b53d0
JA
481 return work;
482 }
483
60cf46ae 484 hash = io_get_work_hash(work);
e941894e
JA
485 /* all items with this hash lie in [work, tail] */
486 tail = wqe->hash_tail[hash];
487
488 /* hashed, can run if not already running */
489 if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
86f3cd1b 490 wqe->hash_tail[hash] = NULL;
f95dc207 491 wq_list_cut(&acct->work_list, &tail->list, prev);
771b53d0
JA
492 return work;
493 }
e941894e
JA
494 if (stall_hash == -1U)
495 stall_hash = hash;
496 /* fast forward to a next hash, for-each will fix up @prev */
497 node = &tail->list;
498 }
499
500 if (stall_hash != -1U) {
d3e3c102
JA
501 bool unstalled;
502
0242f642
JA
503 /*
504 * Set this before dropping the lock to avoid racing with new
505 * work being added and clearing the stalled bit.
506 */
f95dc207 507 set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
42abc95f 508 raw_spin_unlock(&acct->lock);
d3e3c102 509 unstalled = io_wait_on_hash(wqe, stall_hash);
42abc95f 510 raw_spin_lock(&acct->lock);
d3e3c102
JA
511 if (unstalled) {
512 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
513 if (wq_has_sleeper(&wqe->wq->hash->wait))
514 wake_up(&wqe->wq->hash->wait);
515 }
771b53d0
JA
516 }
517
518 return NULL;
519}
520
00ddff43 521static bool io_flush_signals(void)
cccf0ee8 522{
0b8cfa97 523 if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) {
00ddff43 524 __set_current_state(TASK_RUNNING);
7c5d8fa6
EB
525 clear_notify_signal();
526 if (task_work_pending(current))
527 task_work_run();
00ddff43 528 return true;
cccf0ee8 529 }
00ddff43 530 return false;
dc026a73
PB
531}
532
533static void io_assign_current_work(struct io_worker *worker,
534 struct io_wq_work *work)
535{
d78298e7 536 if (work) {
3bfe6106 537 io_flush_signals();
d78298e7
PB
538 cond_resched();
539 }
dc026a73 540
081b5820 541 raw_spin_lock(&worker->lock);
dc026a73 542 worker->cur_work = work;
361aee45 543 worker->next_work = NULL;
081b5820 544 raw_spin_unlock(&worker->lock);
dc026a73
PB
545}
546
60cf46ae
PB
547static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
548
771b53d0 549static void io_worker_handle_work(struct io_worker *worker)
771b53d0 550{
f95dc207 551 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
771b53d0
JA
552 struct io_wqe *wqe = worker->wqe;
553 struct io_wq *wq = wqe->wq;
c60eb049 554 bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
771b53d0
JA
555
556 do {
86f3cd1b 557 struct io_wq_work *work;
73031f76 558
771b53d0
JA
559 /*
560 * If we got some work, mark us as busy. If we didn't, but
561 * the list isn't empty, it means we stalled on hashed work.
562 * Mark us stalled so we don't keep looking for work when we
563 * can't make progress, any work completion or insertion will
564 * clear the stalled flag.
565 */
e13fb1fe 566 raw_spin_lock(&acct->lock);
f95dc207 567 work = io_get_next_work(acct, worker);
42abc95f 568 raw_spin_unlock(&acct->lock);
361aee45 569 if (work) {
ea6e7cee 570 __io_worker_busy(wqe, worker);
771b53d0 571
361aee45
JA
572 /*
573 * Make sure cancelation can find this, even before
574 * it becomes the active work. That avoids a window
575 * where the work has been removed from our general
576 * work list, but isn't yet discoverable as the
577 * current work item for this worker.
578 */
579 raw_spin_lock(&worker->lock);
580 worker->next_work = work;
581 raw_spin_unlock(&worker->lock);
42abc95f 582 } else {
771b53d0 583 break;
42abc95f 584 }
58e39319 585 io_assign_current_work(worker, work);
e941894e 586 __set_current_state(TASK_RUNNING);
36c2f922 587
dc026a73
PB
588 /* handle a whole dependent link */
589 do {
5280f7e5 590 struct io_wq_work *next_hashed, *linked;
b089ed39 591 unsigned int hash = io_get_work_hash(work);
dc026a73 592
86f3cd1b 593 next_hashed = wq_next_work(work);
c60eb049
PB
594
595 if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
596 work->flags |= IO_WQ_WORK_CANCEL;
5280f7e5
PB
597 wq->do_work(work);
598 io_assign_current_work(worker, NULL);
dc026a73 599
5280f7e5 600 linked = wq->free_work(work);
86f3cd1b
PB
601 work = next_hashed;
602 if (!work && linked && !io_wq_is_hashed(linked)) {
603 work = linked;
604 linked = NULL;
605 }
606 io_assign_current_work(worker, work);
86f3cd1b
PB
607 if (linked)
608 io_wqe_enqueue(wqe, linked);
609
610 if (hash != -1U && !next_hashed) {
d3e3c102
JA
611 /* serialize hash clear with wake_up() */
612 spin_lock_irq(&wq->hash->wait.lock);
e941894e 613 clear_bit(hash, &wq->hash->map);
f95dc207 614 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
d3e3c102 615 spin_unlock_irq(&wq->hash->wait.lock);
e941894e
JA
616 if (wq_has_sleeper(&wq->hash->wait))
617 wake_up(&wq->hash->wait);
7d723065 618 }
58e39319 619 } while (work);
771b53d0
JA
620 } while (1);
621}
622
771b53d0
JA
623static int io_wqe_worker(void *data)
624{
625 struct io_worker *worker = data;
f95dc207 626 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
771b53d0
JA
627 struct io_wqe *wqe = worker->wqe;
628 struct io_wq *wq = wqe->wq;
05c5f4ee 629 bool last_timeout = false;
46fe18b1 630 char buf[TASK_COMM_LEN];
771b53d0 631
46fe18b1 632 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
46fe18b1 633
685fe7fe 634 snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
46fe18b1 635 set_task_comm(current, buf);
771b53d0 636
5bd2182d
PM
637 audit_alloc_kernel(current);
638
771b53d0 639 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
16efa4fc
JA
640 long ret;
641
506d95ff 642 set_current_state(TASK_INTERRUPTIBLE);
e13fb1fe 643 while (io_acct_run_queue(acct))
771b53d0 644 io_worker_handle_work(worker);
e13fb1fe 645
42abc95f 646 raw_spin_lock(&wqe->lock);
05c5f4ee
JA
647 /* timed out, exit unless we're the last worker */
648 if (last_timeout && acct->nr_workers > 1) {
767a65e9 649 acct->nr_workers--;
05c5f4ee
JA
650 raw_spin_unlock(&wqe->lock);
651 __set_current_state(TASK_RUNNING);
652 break;
653 }
654 last_timeout = false;
c6d77d92 655 __io_worker_idle(wqe, worker);
a9a4aa9f 656 raw_spin_unlock(&wqe->lock);
00ddff43
JA
657 if (io_flush_signals())
658 continue;
16efa4fc 659 ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
dbe1bdbb
JA
660 if (signal_pending(current)) {
661 struct ksignal ksig;
662
663 if (!get_signal(&ksig))
664 continue;
78f8876c 665 break;
dbe1bdbb 666 }
05c5f4ee 667 last_timeout = !ret;
771b53d0
JA
668 }
669
e13fb1fe 670 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
e587227b 671 io_worker_handle_work(worker);
771b53d0 672
5bd2182d 673 audit_free(current);
771b53d0
JA
674 io_worker_exit(worker);
675 return 0;
676}
677
771b53d0
JA
678/*
679 * Called when a worker is scheduled in. Mark us as currently running.
680 */
681void io_wq_worker_running(struct task_struct *tsk)
682{
e32cf5df 683 struct io_worker *worker = tsk->worker_private;
771b53d0 684
3bfe6106
JA
685 if (!worker)
686 return;
771b53d0
JA
687 if (!(worker->flags & IO_WORKER_F_UP))
688 return;
689 if (worker->flags & IO_WORKER_F_RUNNING)
690 return;
691 worker->flags |= IO_WORKER_F_RUNNING;
958234d5 692 io_wqe_inc_running(worker);
771b53d0
JA
693}
694
695/*
696 * Called when worker is going to sleep. If there are no workers currently
685fe7fe 697 * running and we have work pending, wake up a free one or create a new one.
771b53d0
JA
698 */
699void io_wq_worker_sleeping(struct task_struct *tsk)
700{
e32cf5df 701 struct io_worker *worker = tsk->worker_private;
771b53d0 702
3bfe6106
JA
703 if (!worker)
704 return;
771b53d0
JA
705 if (!(worker->flags & IO_WORKER_F_UP))
706 return;
707 if (!(worker->flags & IO_WORKER_F_RUNNING))
708 return;
709
710 worker->flags &= ~IO_WORKER_F_RUNNING;
958234d5 711 io_wqe_dec_running(worker);
771b53d0
JA
712}
713
3146cba9
JA
714static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
715 struct task_struct *tsk)
716{
e32cf5df 717 tsk->worker_private = worker;
3146cba9
JA
718 worker->task = tsk;
719 set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
720 tsk->flags |= PF_NO_SETAFFINITY;
721
722 raw_spin_lock(&wqe->lock);
723 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
724 list_add_tail_rcu(&worker->all_list, &wqe->all_list);
725 worker->flags |= IO_WORKER_F_FREE;
726 raw_spin_unlock(&wqe->lock);
727 wake_up_new_task(tsk);
728}
729
730static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
731{
732 return true;
733}
734
735static inline bool io_should_retry_thread(long err)
736{
a226abcd
JA
737 /*
738 * Prevent perpetual task_work retry, if the task (or its group) is
739 * exiting.
740 */
741 if (fatal_signal_pending(current))
742 return false;
743
3146cba9
JA
744 switch (err) {
745 case -EAGAIN:
746 case -ERESTARTSYS:
747 case -ERESTARTNOINTR:
748 case -ERESTARTNOHAND:
749 return true;
750 default:
751 return false;
752 }
753}
754
755static void create_worker_cont(struct callback_head *cb)
756{
757 struct io_worker *worker;
758 struct task_struct *tsk;
759 struct io_wqe *wqe;
760
761 worker = container_of(cb, struct io_worker, create_work);
762 clear_bit_unlock(0, &worker->create_state);
763 wqe = worker->wqe;
764 tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
765 if (!IS_ERR(tsk)) {
766 io_init_new_worker(wqe, worker, tsk);
767 io_worker_release(worker);
768 return;
769 } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
770 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
771
772 atomic_dec(&acct->nr_running);
773 raw_spin_lock(&wqe->lock);
774 acct->nr_workers--;
775 if (!acct->nr_workers) {
776 struct io_cb_cancel_data match = {
777 .fn = io_wq_work_match_all,
778 .cancel_all = true,
779 };
780
42abc95f 781 raw_spin_unlock(&wqe->lock);
3146cba9 782 while (io_acct_cancel_pending_work(wqe, acct, &match))
42abc95f
HX
783 ;
784 } else {
785 raw_spin_unlock(&wqe->lock);
3146cba9 786 }
3146cba9 787 io_worker_ref_put(wqe->wq);
66e70be7 788 kfree(worker);
3146cba9
JA
789 return;
790 }
791
792 /* re-create attempts grab a new worker ref, drop the existing one */
793 io_worker_release(worker);
794 schedule_work(&worker->work);
795}
796
797static void io_workqueue_create(struct work_struct *work)
798{
799 struct io_worker *worker = container_of(work, struct io_worker, work);
800 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
801
71e1cef2 802 if (!io_queue_worker_create(worker, acct, create_worker_cont))
66e70be7 803 kfree(worker);
3146cba9
JA
804}
805
806static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
3bfe6106 807{
46fe18b1 808 struct io_wqe_acct *acct = &wqe->acct[index];
3bfe6106 809 struct io_worker *worker;
46fe18b1 810 struct task_struct *tsk;
3bfe6106 811
8b3e78b5
JA
812 __set_current_state(TASK_RUNNING);
813
3bfe6106 814 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
3146cba9 815 if (!worker) {
685fe7fe
JA
816fail:
817 atomic_dec(&acct->nr_running);
a9a4aa9f 818 raw_spin_lock(&wqe->lock);
3d4e4fac 819 acct->nr_workers--;
a9a4aa9f 820 raw_spin_unlock(&wqe->lock);
685fe7fe 821 io_worker_ref_put(wq);
3146cba9 822 return false;
3bfe6106 823 }
46fe18b1 824
3146cba9
JA
825 refcount_set(&worker->ref, 1);
826 worker->wqe = wqe;
081b5820 827 raw_spin_lock_init(&worker->lock);
3146cba9 828 init_completion(&worker->ref_done);
46fe18b1 829
46fe18b1
JA
830 if (index == IO_WQ_ACCT_BOUND)
831 worker->flags |= IO_WORKER_F_BOUND;
3146cba9
JA
832
833 tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
834 if (!IS_ERR(tsk)) {
835 io_init_new_worker(wqe, worker, tsk);
836 } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
66e70be7 837 kfree(worker);
3146cba9
JA
838 goto fail;
839 } else {
840 INIT_WORK(&worker->work, io_workqueue_create);
841 schedule_work(&worker->work);
842 }
843
844 return true;
771b53d0
JA
845}
846
c4068bf8
HD
847/*
848 * Iterate the passed in list and call the specific function for each
849 * worker that isn't exiting
850 */
851static bool io_wq_for_each_worker(struct io_wqe *wqe,
852 bool (*func)(struct io_worker *, void *),
853 void *data)
854{
855 struct io_worker *worker;
856 bool ret = false;
857
858 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
859 if (io_worker_get(worker)) {
860 /* no task if node is/was offline */
861 if (worker->task)
862 ret = func(worker, data);
863 io_worker_release(worker);
864 if (ret)
865 break;
866 }
867 }
868
869 return ret;
870}
871
872static bool io_wq_worker_wake(struct io_worker *worker, void *data)
873{
6cf5862e 874 __set_notify_signal(worker->task);
c4068bf8
HD
875 wake_up_process(worker->task);
876 return false;
877}
878
e9fd9396 879static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
fc04c39b 880{
e9fd9396
PB
881 struct io_wq *wq = wqe->wq;
882
fc04c39b 883 do {
fc04c39b 884 work->flags |= IO_WQ_WORK_CANCEL;
5280f7e5
PB
885 wq->do_work(work);
886 work = wq->free_work(work);
fc04c39b
PB
887 } while (work);
888}
889
86f3cd1b
PB
890static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
891{
f95dc207 892 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
86f3cd1b
PB
893 unsigned int hash;
894 struct io_wq_work *tail;
895
896 if (!io_wq_is_hashed(work)) {
897append:
f95dc207 898 wq_list_add_tail(&work->list, &acct->work_list);
86f3cd1b
PB
899 return;
900 }
901
902 hash = io_get_work_hash(work);
903 tail = wqe->hash_tail[hash];
904 wqe->hash_tail[hash] = work;
905 if (!tail)
906 goto append;
907
f95dc207 908 wq_list_add_after(&work->list, &tail->list, &acct->work_list);
86f3cd1b
PB
909}
910
713b9825
PB
911static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
912{
913 return work == data;
914}
915
771b53d0
JA
916static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
917{
c5def4ab 918 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
42abc95f 919 struct io_cb_cancel_data match;
94ffb0a2
JA
920 unsigned work_flags = work->flags;
921 bool do_create;
771b53d0 922
991468dc
JA
923 /*
924 * If io-wq is exiting for this task, or if the request has explicitly
925 * been marked as one that should not get executed, cancel it here.
926 */
927 if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
928 (work->flags & IO_WQ_WORK_CANCEL)) {
70e35125 929 io_run_cancel(work, wqe);
4fb6ac32
JA
930 return;
931 }
932
42abc95f 933 raw_spin_lock(&acct->lock);
86f3cd1b 934 io_wqe_insert_work(wqe, work);
f95dc207 935 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
42abc95f 936 raw_spin_unlock(&acct->lock);
94ffb0a2 937
42abc95f 938 raw_spin_lock(&wqe->lock);
94ffb0a2 939 rcu_read_lock();
f95dc207 940 do_create = !io_wqe_activate_free_worker(wqe, acct);
94ffb0a2
JA
941 rcu_read_unlock();
942
a9a4aa9f 943 raw_spin_unlock(&wqe->lock);
771b53d0 944
94ffb0a2 945 if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
3146cba9
JA
946 !atomic_read(&acct->nr_running))) {
947 bool did_create;
948
949 did_create = io_wqe_create_worker(wqe, acct);
713b9825
PB
950 if (likely(did_create))
951 return;
952
953 raw_spin_lock(&wqe->lock);
42abc95f
HX
954 if (acct->nr_workers) {
955 raw_spin_unlock(&wqe->lock);
956 return;
3146cba9 957 }
713b9825 958 raw_spin_unlock(&wqe->lock);
42abc95f
HX
959
960 /* fatal condition, failed to create the first worker */
961 match.fn = io_wq_work_match_item,
962 match.data = work,
963 match.cancel_all = false,
964
965 io_acct_cancel_pending_work(wqe, acct, &match);
3146cba9 966 }
771b53d0
JA
967}
968
969void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
970{
971 struct io_wqe *wqe = wq->wqes[numa_node_id()];
972
973 io_wqe_enqueue(wqe, work);
974}
975
976/*
8766dd51
PB
977 * Work items that hash to the same value will not be done in parallel.
978 * Used to limit concurrent writes, generally hashed by inode.
771b53d0 979 */
8766dd51 980void io_wq_hash_work(struct io_wq_work *work, void *val)
771b53d0 981{
8766dd51 982 unsigned int bit;
771b53d0
JA
983
984 bit = hash_ptr(val, IO_WQ_HASH_ORDER);
985 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
771b53d0
JA
986}
987
361aee45
JA
988static bool __io_wq_worker_cancel(struct io_worker *worker,
989 struct io_cb_cancel_data *match,
990 struct io_wq_work *work)
991{
992 if (work && match->fn(work, match->data)) {
993 work->flags |= IO_WQ_WORK_CANCEL;
6cf5862e 994 __set_notify_signal(worker->task);
361aee45
JA
995 return true;
996 }
997
998 return false;
999}
1000
2293b419 1001static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
62755e35 1002{
2293b419 1003 struct io_cb_cancel_data *match = data;
62755e35
JA
1004
1005 /*
1006 * Hold the lock to avoid ->cur_work going out of scope, caller
36c2f922 1007 * may dereference the passed in work.
62755e35 1008 */
081b5820 1009 raw_spin_lock(&worker->lock);
361aee45
JA
1010 if (__io_wq_worker_cancel(worker, match, worker->cur_work) ||
1011 __io_wq_worker_cancel(worker, match, worker->next_work))
4f26bda1 1012 match->nr_running++;
081b5820 1013 raw_spin_unlock(&worker->lock);
771b53d0 1014
4f26bda1 1015 return match->nr_running && !match->cancel_all;
771b53d0
JA
1016}
1017
204361a7
PB
1018static inline void io_wqe_remove_pending(struct io_wqe *wqe,
1019 struct io_wq_work *work,
1020 struct io_wq_work_node *prev)
1021{
f95dc207 1022 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
204361a7
PB
1023 unsigned int hash = io_get_work_hash(work);
1024 struct io_wq_work *prev_work = NULL;
1025
1026 if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
1027 if (prev)
1028 prev_work = container_of(prev, struct io_wq_work, list);
1029 if (prev_work && io_get_work_hash(prev_work) == hash)
1030 wqe->hash_tail[hash] = prev_work;
1031 else
1032 wqe->hash_tail[hash] = NULL;
1033 }
f95dc207 1034 wq_list_del(&acct->work_list, &work->list, prev);
204361a7
PB
1035}
1036
3146cba9
JA
1037static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
1038 struct io_wqe_acct *acct,
1039 struct io_cb_cancel_data *match)
771b53d0 1040{
6206f0e1 1041 struct io_wq_work_node *node, *prev;
771b53d0 1042 struct io_wq_work *work;
771b53d0 1043
42abc95f 1044 raw_spin_lock(&acct->lock);
3146cba9
JA
1045 wq_list_for_each(node, prev, &acct->work_list) {
1046 work = container_of(node, struct io_wq_work, list);
1047 if (!match->fn(work, match->data))
1048 continue;
1049 io_wqe_remove_pending(wqe, work, prev);
42abc95f 1050 raw_spin_unlock(&acct->lock);
3146cba9
JA
1051 io_run_cancel(work, wqe);
1052 match->nr_pending++;
1053 /* not safe to continue after unlock */
1054 return true;
1055 }
42abc95f 1056 raw_spin_unlock(&acct->lock);
3146cba9
JA
1057
1058 return false;
1059}
1060
1061static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
1062 struct io_cb_cancel_data *match)
1063{
1064 int i;
4f26bda1 1065retry:
f95dc207
JA
1066 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1067 struct io_wqe_acct *acct = io_get_acct(wqe, i == 0);
4f26bda1 1068
3146cba9
JA
1069 if (io_acct_cancel_pending_work(wqe, acct, match)) {
1070 if (match->cancel_all)
1071 goto retry;
36e4c58b 1072 break;
f95dc207 1073 }
771b53d0 1074 }
f4c2665e
PB
1075}
1076
4f26bda1 1077static void io_wqe_cancel_running_work(struct io_wqe *wqe,
f4c2665e
PB
1078 struct io_cb_cancel_data *match)
1079{
771b53d0 1080 rcu_read_lock();
4f26bda1 1081 io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
771b53d0 1082 rcu_read_unlock();
771b53d0
JA
1083}
1084
2293b419 1085enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
4f26bda1 1086 void *data, bool cancel_all)
771b53d0 1087{
2293b419 1088 struct io_cb_cancel_data match = {
4f26bda1
PB
1089 .fn = cancel,
1090 .data = data,
1091 .cancel_all = cancel_all,
00bcda13 1092 };
3fc50ab5 1093 int node;
771b53d0 1094
f4c2665e
PB
1095 /*
1096 * First check pending list, if we're lucky we can just remove it
1097 * from there. CANCEL_OK means that the work is returned as-new,
1098 * no completion will be posted for it.
efdf5184
JA
1099 *
1100 * Then check if a free (going busy) or busy worker has the work
f4c2665e
PB
1101 * currently running. If we find it there, we'll return CANCEL_RUNNING
1102 * as an indication that we attempt to signal cancellation. The
1103 * completion will run normally in this case.
efdf5184
JA
1104 *
1105 * Do both of these while holding the wqe->lock, to ensure that
1106 * we'll find a work item regardless of state.
f4c2665e
PB
1107 */
1108 for_each_node(node) {
1109 struct io_wqe *wqe = wq->wqes[node];
1110
efdf5184 1111 io_wqe_cancel_pending_work(wqe, &match);
42abc95f 1112 if (match.nr_pending && !match.cancel_all)
efdf5184 1113 return IO_WQ_CANCEL_OK;
efdf5184 1114
42abc95f 1115 raw_spin_lock(&wqe->lock);
4f26bda1 1116 io_wqe_cancel_running_work(wqe, &match);
36e4c58b 1117 raw_spin_unlock(&wqe->lock);
4f26bda1 1118 if (match.nr_running && !match.cancel_all)
f4c2665e
PB
1119 return IO_WQ_CANCEL_RUNNING;
1120 }
1121
4f26bda1
PB
1122 if (match.nr_running)
1123 return IO_WQ_CANCEL_RUNNING;
1124 if (match.nr_pending)
1125 return IO_WQ_CANCEL_OK;
f4c2665e 1126 return IO_WQ_CANCEL_NOTFOUND;
771b53d0
JA
1127}
1128
e941894e
JA
1129static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
1130 int sync, void *key)
1131{
1132 struct io_wqe *wqe = container_of(wait, struct io_wqe, wait);
f95dc207 1133 int i;
e941894e
JA
1134
1135 list_del_init(&wait->entry);
1136
1137 rcu_read_lock();
f95dc207
JA
1138 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1139 struct io_wqe_acct *acct = &wqe->acct[i];
1140
1141 if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
1142 io_wqe_activate_free_worker(wqe, acct);
1143 }
e941894e 1144 rcu_read_unlock();
e941894e
JA
1145 return 1;
1146}
1147
576a347b 1148struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
771b53d0 1149{
f95dc207 1150 int ret, node, i;
771b53d0
JA
1151 struct io_wq *wq;
1152
f5fa38c5 1153 if (WARN_ON_ONCE(!data->free_work || !data->do_work))
e9fd9396 1154 return ERR_PTR(-EINVAL);
e6ab8991
PB
1155 if (WARN_ON_ONCE(!bounded))
1156 return ERR_PTR(-EINVAL);
e9fd9396 1157
c7f405d6 1158 wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL);
771b53d0
JA
1159 if (!wq)
1160 return ERR_PTR(-ENOMEM);
43c01fbe
JA
1161 ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1162 if (ret)
c7f405d6 1163 goto err_wq;
771b53d0 1164
e941894e
JA
1165 refcount_inc(&data->hash->refs);
1166 wq->hash = data->hash;
e9fd9396 1167 wq->free_work = data->free_work;
f5fa38c5 1168 wq->do_work = data->do_work;
7d723065 1169
43c01fbe 1170 ret = -ENOMEM;
3fc50ab5 1171 for_each_node(node) {
771b53d0 1172 struct io_wqe *wqe;
7563439a 1173 int alloc_node = node;
771b53d0 1174
7563439a
JA
1175 if (!node_online(alloc_node))
1176 alloc_node = NUMA_NO_NODE;
1177 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
771b53d0 1178 if (!wqe)
3fc50ab5 1179 goto err;
0e03496d
JA
1180 if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
1181 goto err;
1182 cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
3fc50ab5 1183 wq->wqes[node] = wqe;
7563439a 1184 wqe->node = alloc_node;
c5def4ab 1185 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
728f13e7 1186 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
c5def4ab 1187 task_rlimit(current, RLIMIT_NPROC);
e941894e 1188 INIT_LIST_HEAD(&wqe->wait.entry);
f95dc207
JA
1189 wqe->wait.func = io_wqe_hash_wake;
1190 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1191 struct io_wqe_acct *acct = &wqe->acct[i];
1192
1193 acct->index = i;
1194 atomic_set(&acct->nr_running, 0);
1195 INIT_WQ_LIST(&acct->work_list);
42abc95f 1196 raw_spin_lock_init(&acct->lock);
f95dc207 1197 }
771b53d0 1198 wqe->wq = wq;
95da8465 1199 raw_spin_lock_init(&wqe->lock);
021d1cdd 1200 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
e61df66c 1201 INIT_LIST_HEAD(&wqe->all_list);
771b53d0
JA
1202 }
1203
685fe7fe 1204 wq->task = get_task_struct(data->task);
685fe7fe
JA
1205 atomic_set(&wq->worker_refs, 1);
1206 init_completion(&wq->worker_done);
1207 return wq;
b60fda60 1208err:
dc7bbc9e 1209 io_wq_put_hash(data->hash);
43c01fbe 1210 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
0e03496d
JA
1211 for_each_node(node) {
1212 if (!wq->wqes[node])
1213 continue;
1214 free_cpumask_var(wq->wqes[node]->cpu_mask);
3fc50ab5 1215 kfree(wq->wqes[node]);
0e03496d 1216 }
43c01fbe 1217err_wq:
b60fda60 1218 kfree(wq);
771b53d0
JA
1219 return ERR_PTR(ret);
1220}
1221
c80ca470
JA
1222static bool io_task_work_match(struct callback_head *cb, void *data)
1223{
d3e9f732 1224 struct io_worker *worker;
c80ca470 1225
3b33e3f4 1226 if (cb->func != create_worker_cb && cb->func != create_worker_cont)
c80ca470 1227 return false;
d3e9f732
JA
1228 worker = container_of(cb, struct io_worker, create_work);
1229 return worker->wqe->wq == data;
c80ca470
JA
1230}
1231
17a91051
PB
1232void io_wq_exit_start(struct io_wq *wq)
1233{
1234 set_bit(IO_WQ_BIT_EXIT, &wq->state);
1235}
1236
71a85387 1237static void io_wq_cancel_tw_create(struct io_wq *wq)
afcc4015 1238{
685fe7fe 1239 struct callback_head *cb;
685fe7fe 1240
c80ca470 1241 while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
d3e9f732 1242 struct io_worker *worker;
685fe7fe 1243
d3e9f732 1244 worker = container_of(cb, struct io_worker, create_work);
1d5f5ea7 1245 io_worker_cancel_cb(worker);
685fe7fe 1246 }
71a85387
JA
1247}
1248
1249static void io_wq_exit_workers(struct io_wq *wq)
1250{
1251 int node;
1252
1253 if (!wq->task)
1254 return;
1255
1256 io_wq_cancel_tw_create(wq);
685fe7fe
JA
1257
1258 rcu_read_lock();
1259 for_each_node(node) {
1260 struct io_wqe *wqe = wq->wqes[node];
1261
1262 io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
afcc4015 1263 }
685fe7fe
JA
1264 rcu_read_unlock();
1265 io_worker_ref_put(wq);
1266 wait_for_completion(&wq->worker_done);
3743c172
Z
1267
1268 for_each_node(node) {
1269 spin_lock_irq(&wq->hash->wait.lock);
1270 list_del_init(&wq->wqes[node]->wait.entry);
1271 spin_unlock_irq(&wq->hash->wait.lock);
1272 }
685fe7fe
JA
1273 put_task_struct(wq->task);
1274 wq->task = NULL;
afcc4015
JA
1275}
1276
4fb6ac32 1277static void io_wq_destroy(struct io_wq *wq)
771b53d0 1278{
3fc50ab5 1279 int node;
771b53d0 1280
43c01fbe
JA
1281 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1282
e941894e
JA
1283 for_each_node(node) {
1284 struct io_wqe *wqe = wq->wqes[node];
f5d2d23b
JA
1285 struct io_cb_cancel_data match = {
1286 .fn = io_wq_work_match_all,
1287 .cancel_all = true,
1288 };
1289 io_wqe_cancel_pending_work(wqe, &match);
0e03496d 1290 free_cpumask_var(wqe->cpu_mask);
e941894e
JA
1291 kfree(wqe);
1292 }
e941894e 1293 io_wq_put_hash(wq->hash);
771b53d0 1294 kfree(wq);
4fb6ac32
JA
1295}
1296
afcc4015
JA
1297void io_wq_put_and_exit(struct io_wq *wq)
1298{
17a91051
PB
1299 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
1300
685fe7fe 1301 io_wq_exit_workers(wq);
382cb030 1302 io_wq_destroy(wq);
afcc4015
JA
1303}
1304
0e03496d
JA
1305struct online_data {
1306 unsigned int cpu;
1307 bool online;
1308};
1309
43c01fbe
JA
1310static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1311{
0e03496d 1312 struct online_data *od = data;
e0051d7d 1313
0e03496d
JA
1314 if (od->online)
1315 cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask);
1316 else
1317 cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask);
43c01fbe
JA
1318 return false;
1319}
1320
0e03496d 1321static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
43c01fbe 1322{
0e03496d
JA
1323 struct online_data od = {
1324 .cpu = cpu,
1325 .online = online
1326 };
43c01fbe
JA
1327 int i;
1328
1329 rcu_read_lock();
1330 for_each_node(i)
0e03496d 1331 io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, &od);
43c01fbe
JA
1332 rcu_read_unlock();
1333 return 0;
1334}
1335
0e03496d
JA
1336static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1337{
1338 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1339
1340 return __io_wq_cpu_online(wq, cpu, true);
1341}
1342
1343static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
1344{
1345 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1346
1347 return __io_wq_cpu_online(wq, cpu, false);
1348}
1349
fe76421d
JA
1350int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
1351{
1352 int i;
1353
1354 rcu_read_lock();
1355 for_each_node(i) {
1356 struct io_wqe *wqe = wq->wqes[i];
1357
1358 if (mask)
1359 cpumask_copy(wqe->cpu_mask, mask);
1360 else
1361 cpumask_copy(wqe->cpu_mask, cpumask_of_node(i));
1362 }
1363 rcu_read_unlock();
1364 return 0;
1365}
1366
2e480058
JA
1367/*
1368 * Set max number of unbounded workers, returns old value. If new_count is 0,
1369 * then just return the old value.
1370 */
1371int io_wq_max_workers(struct io_wq *wq, int *new_count)
1372{
71c9ce27
BZ
1373 int prev[IO_WQ_ACCT_NR];
1374 bool first_node = true;
1375 int i, node;
2e480058 1376
dd47c104
ES
1377 BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
1378 BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
1379 BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2);
1380
86127bb1 1381 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
2e480058
JA
1382 if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
1383 new_count[i] = task_rlimit(current, RLIMIT_NPROC);
1384 }
1385
71c9ce27
BZ
1386 for (i = 0; i < IO_WQ_ACCT_NR; i++)
1387 prev[i] = 0;
1388
2e480058
JA
1389 rcu_read_lock();
1390 for_each_node(node) {
bc369921 1391 struct io_wqe *wqe = wq->wqes[node];
2e480058
JA
1392 struct io_wqe_acct *acct;
1393
bc369921 1394 raw_spin_lock(&wqe->lock);
f95dc207 1395 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
bc369921 1396 acct = &wqe->acct[i];
71c9ce27
BZ
1397 if (first_node)
1398 prev[i] = max_t(int, acct->max_workers, prev[i]);
2e480058
JA
1399 if (new_count[i])
1400 acct->max_workers = new_count[i];
2e480058 1401 }
bc369921 1402 raw_spin_unlock(&wqe->lock);
71c9ce27 1403 first_node = false;
2e480058
JA
1404 }
1405 rcu_read_unlock();
71c9ce27
BZ
1406
1407 for (i = 0; i < IO_WQ_ACCT_NR; i++)
1408 new_count[i] = prev[i];
1409
2e480058
JA
1410 return 0;
1411}
1412
43c01fbe
JA
1413static __init int io_wq_init(void)
1414{
1415 int ret;
1416
1417 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
0e03496d 1418 io_wq_cpu_online, io_wq_cpu_offline);
43c01fbe
JA
1419 if (ret < 0)
1420 return ret;
1421 io_wq_online = ret;
1422 return 0;
1423}
1424subsys_initcall(io_wq_init);