]> git.ipfire.org Git - people/arne_f/kernel.git/blob - kernel/sched/wait.c
c37b3140763ebc919422969dd8303df3457ff8b6
[people/arne_f/kernel.git] / kernel / sched / wait.c
1 /*
2 * Generic waiting primitives.
3 *
4 * (C) 2004 Nadia Yvette Chambers, Oracle
5 */
6 #include <linux/init.h>
7 #include <linux/export.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/debug.h>
10 #include <linux/mm.h>
11 #include <linux/wait.h>
12 #include <linux/hash.h>
13 #include <linux/kthread.h>
14
15 void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
16 {
17 spin_lock_init(&q->lock);
18 lockdep_set_class_and_name(&q->lock, key, name);
19 INIT_LIST_HEAD(&q->task_list);
20 }
21
22 EXPORT_SYMBOL(__init_waitqueue_head);
23
24 void add_wait_queue(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
25 {
26 unsigned long flags;
27
28 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
29 spin_lock_irqsave(&q->lock, flags);
30 __add_wait_queue_entry_tail(q, wq_entry);
31 spin_unlock_irqrestore(&q->lock, flags);
32 }
33 EXPORT_SYMBOL(add_wait_queue);
34
35 void add_wait_queue_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
36 {
37 unsigned long flags;
38
39 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
40 spin_lock_irqsave(&q->lock, flags);
41 __add_wait_queue_entry_tail(q, wq_entry);
42 spin_unlock_irqrestore(&q->lock, flags);
43 }
44 EXPORT_SYMBOL(add_wait_queue_exclusive);
45
46 void remove_wait_queue(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
47 {
48 unsigned long flags;
49
50 spin_lock_irqsave(&q->lock, flags);
51 __remove_wait_queue(q, wq_entry);
52 spin_unlock_irqrestore(&q->lock, flags);
53 }
54 EXPORT_SYMBOL(remove_wait_queue);
55
56
57 /*
58 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
59 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
60 * number) then we wake all the non-exclusive tasks and one exclusive task.
61 *
62 * There are circumstances in which we can try to wake a task which has already
63 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
64 * zero in this (rare) case, and we handle it by continuing to scan the queue.
65 */
66 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
67 int nr_exclusive, int wake_flags, void *key)
68 {
69 wait_queue_entry_t *curr, *next;
70
71 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
72 unsigned flags = curr->flags;
73
74 if (curr->func(curr, mode, wake_flags, key) &&
75 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
76 break;
77 }
78 }
79
80 /**
81 * __wake_up - wake up threads blocked on a waitqueue.
82 * @q: the waitqueue
83 * @mode: which threads
84 * @nr_exclusive: how many wake-one or wake-many threads to wake up
85 * @key: is directly passed to the wakeup function
86 *
87 * It may be assumed that this function implies a write memory barrier before
88 * changing the task state if and only if any tasks are woken up.
89 */
90 void __wake_up(wait_queue_head_t *q, unsigned int mode,
91 int nr_exclusive, void *key)
92 {
93 unsigned long flags;
94
95 spin_lock_irqsave(&q->lock, flags);
96 __wake_up_common(q, mode, nr_exclusive, 0, key);
97 spin_unlock_irqrestore(&q->lock, flags);
98 }
99 EXPORT_SYMBOL(__wake_up);
100
101 /*
102 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
103 */
104 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
105 {
106 __wake_up_common(q, mode, nr, 0, NULL);
107 }
108 EXPORT_SYMBOL_GPL(__wake_up_locked);
109
110 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
111 {
112 __wake_up_common(q, mode, 1, 0, key);
113 }
114 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
115
116 /**
117 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
118 * @q: the waitqueue
119 * @mode: which threads
120 * @nr_exclusive: how many wake-one or wake-many threads to wake up
121 * @key: opaque value to be passed to wakeup targets
122 *
123 * The sync wakeup differs that the waker knows that it will schedule
124 * away soon, so while the target thread will be woken up, it will not
125 * be migrated to another CPU - ie. the two threads are 'synchronized'
126 * with each other. This can prevent needless bouncing between CPUs.
127 *
128 * On UP it can prevent extra preemption.
129 *
130 * It may be assumed that this function implies a write memory barrier before
131 * changing the task state if and only if any tasks are woken up.
132 */
133 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
134 int nr_exclusive, void *key)
135 {
136 unsigned long flags;
137 int wake_flags = 1; /* XXX WF_SYNC */
138
139 if (unlikely(!q))
140 return;
141
142 if (unlikely(nr_exclusive != 1))
143 wake_flags = 0;
144
145 spin_lock_irqsave(&q->lock, flags);
146 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
147 spin_unlock_irqrestore(&q->lock, flags);
148 }
149 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
150
151 /*
152 * __wake_up_sync - see __wake_up_sync_key()
153 */
154 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
155 {
156 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
157 }
158 EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
159
160 /*
161 * Note: we use "set_current_state()" _after_ the wait-queue add,
162 * because we need a memory barrier there on SMP, so that any
163 * wake-function that tests for the wait-queue being active
164 * will be guaranteed to see waitqueue addition _or_ subsequent
165 * tests in this thread will see the wakeup having taken place.
166 *
167 * The spin_unlock() itself is semi-permeable and only protects
168 * one way (it only protects stuff inside the critical region and
169 * stops them from bleeding out - it would still allow subsequent
170 * loads to move into the critical region).
171 */
172 void
173 prepare_to_wait(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state)
174 {
175 unsigned long flags;
176
177 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
178 spin_lock_irqsave(&q->lock, flags);
179 if (list_empty(&wq_entry->task_list))
180 __add_wait_queue(q, wq_entry);
181 set_current_state(state);
182 spin_unlock_irqrestore(&q->lock, flags);
183 }
184 EXPORT_SYMBOL(prepare_to_wait);
185
186 void
187 prepare_to_wait_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state)
188 {
189 unsigned long flags;
190
191 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
192 spin_lock_irqsave(&q->lock, flags);
193 if (list_empty(&wq_entry->task_list))
194 __add_wait_queue_entry_tail(q, wq_entry);
195 set_current_state(state);
196 spin_unlock_irqrestore(&q->lock, flags);
197 }
198 EXPORT_SYMBOL(prepare_to_wait_exclusive);
199
200 void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
201 {
202 wq_entry->flags = flags;
203 wq_entry->private = current;
204 wq_entry->func = autoremove_wake_function;
205 INIT_LIST_HEAD(&wq_entry->task_list);
206 }
207 EXPORT_SYMBOL(init_wait_entry);
208
209 long prepare_to_wait_event(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state)
210 {
211 unsigned long flags;
212 long ret = 0;
213
214 spin_lock_irqsave(&q->lock, flags);
215 if (unlikely(signal_pending_state(state, current))) {
216 /*
217 * Exclusive waiter must not fail if it was selected by wakeup,
218 * it should "consume" the condition we were waiting for.
219 *
220 * The caller will recheck the condition and return success if
221 * we were already woken up, we can not miss the event because
222 * wakeup locks/unlocks the same q->lock.
223 *
224 * But we need to ensure that set-condition + wakeup after that
225 * can't see us, it should wake up another exclusive waiter if
226 * we fail.
227 */
228 list_del_init(&wq_entry->task_list);
229 ret = -ERESTARTSYS;
230 } else {
231 if (list_empty(&wq_entry->task_list)) {
232 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
233 __add_wait_queue_entry_tail(q, wq_entry);
234 else
235 __add_wait_queue(q, wq_entry);
236 }
237 set_current_state(state);
238 }
239 spin_unlock_irqrestore(&q->lock, flags);
240
241 return ret;
242 }
243 EXPORT_SYMBOL(prepare_to_wait_event);
244
245 /*
246 * Note! These two wait functions are entered with the
247 * wait-queue lock held (and interrupts off in the _irq
248 * case), so there is no race with testing the wakeup
249 * condition in the caller before they add the wait
250 * entry to the wake queue.
251 */
252 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
253 {
254 if (likely(list_empty(&wait->task_list)))
255 __add_wait_queue_entry_tail(wq, wait);
256
257 set_current_state(TASK_INTERRUPTIBLE);
258 if (signal_pending(current))
259 return -ERESTARTSYS;
260
261 spin_unlock(&wq->lock);
262 schedule();
263 spin_lock(&wq->lock);
264 return 0;
265 }
266 EXPORT_SYMBOL(do_wait_intr);
267
268 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
269 {
270 if (likely(list_empty(&wait->task_list)))
271 __add_wait_queue_entry_tail(wq, wait);
272
273 set_current_state(TASK_INTERRUPTIBLE);
274 if (signal_pending(current))
275 return -ERESTARTSYS;
276
277 spin_unlock_irq(&wq->lock);
278 schedule();
279 spin_lock_irq(&wq->lock);
280 return 0;
281 }
282 EXPORT_SYMBOL(do_wait_intr_irq);
283
284 /**
285 * finish_wait - clean up after waiting in a queue
286 * @q: waitqueue waited on
287 * @wq_entry: wait descriptor
288 *
289 * Sets current thread back to running state and removes
290 * the wait descriptor from the given waitqueue if still
291 * queued.
292 */
293 void finish_wait(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
294 {
295 unsigned long flags;
296
297 __set_current_state(TASK_RUNNING);
298 /*
299 * We can check for list emptiness outside the lock
300 * IFF:
301 * - we use the "careful" check that verifies both
302 * the next and prev pointers, so that there cannot
303 * be any half-pending updates in progress on other
304 * CPU's that we haven't seen yet (and that might
305 * still change the stack area.
306 * and
307 * - all other users take the lock (ie we can only
308 * have _one_ other CPU that looks at or modifies
309 * the list).
310 */
311 if (!list_empty_careful(&wq_entry->task_list)) {
312 spin_lock_irqsave(&q->lock, flags);
313 list_del_init(&wq_entry->task_list);
314 spin_unlock_irqrestore(&q->lock, flags);
315 }
316 }
317 EXPORT_SYMBOL(finish_wait);
318
319 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
320 {
321 int ret = default_wake_function(wq_entry, mode, sync, key);
322
323 if (ret)
324 list_del_init(&wq_entry->task_list);
325 return ret;
326 }
327 EXPORT_SYMBOL(autoremove_wake_function);
328
329 static inline bool is_kthread_should_stop(void)
330 {
331 return (current->flags & PF_KTHREAD) && kthread_should_stop();
332 }
333
334 /*
335 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
336 *
337 * add_wait_queue(&wq, &wait);
338 * for (;;) {
339 * if (condition)
340 * break;
341 *
342 * p->state = mode; condition = true;
343 * smp_mb(); // A smp_wmb(); // C
344 * if (!wq_entry->flags & WQ_FLAG_WOKEN) wq_entry->flags |= WQ_FLAG_WOKEN;
345 * schedule() try_to_wake_up();
346 * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~
347 * wq_entry->flags &= ~WQ_FLAG_WOKEN; condition = true;
348 * smp_mb() // B smp_wmb(); // C
349 * wq_entry->flags |= WQ_FLAG_WOKEN;
350 * }
351 * remove_wait_queue(&wq, &wait);
352 *
353 */
354 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
355 {
356 set_current_state(mode); /* A */
357 /*
358 * The above implies an smp_mb(), which matches with the smp_wmb() from
359 * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
360 * also observe all state before the wakeup.
361 */
362 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
363 timeout = schedule_timeout(timeout);
364 __set_current_state(TASK_RUNNING);
365
366 /*
367 * The below implies an smp_mb(), it too pairs with the smp_wmb() from
368 * woken_wake_function() such that we must either observe the wait
369 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
370 * an event.
371 */
372 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
373
374 return timeout;
375 }
376 EXPORT_SYMBOL(wait_woken);
377
378 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
379 {
380 /*
381 * Although this function is called under waitqueue lock, LOCK
382 * doesn't imply write barrier and the users expects write
383 * barrier semantics on wakeup functions. The following
384 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
385 * and is paired with smp_store_mb() in wait_woken().
386 */
387 smp_wmb(); /* C */
388 wq_entry->flags |= WQ_FLAG_WOKEN;
389
390 return default_wake_function(wq_entry, mode, sync, key);
391 }
392 EXPORT_SYMBOL(woken_wake_function);
393
394 int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *arg)
395 {
396 struct wait_bit_key *key = arg;
397 struct wait_bit_queue *wait_bit
398 = container_of(wq_entry, struct wait_bit_queue, wait);
399
400 if (wait_bit->key.flags != key->flags ||
401 wait_bit->key.bit_nr != key->bit_nr ||
402 test_bit(key->bit_nr, key->flags))
403 return 0;
404 else
405 return autoremove_wake_function(wq_entry, mode, sync, key);
406 }
407 EXPORT_SYMBOL(wake_bit_function);
408
409 /*
410 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
411 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
412 * permitted return codes. Nonzero return codes halt waiting and return.
413 */
414 int __sched
415 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
416 wait_bit_action_f *action, unsigned mode)
417 {
418 int ret = 0;
419
420 do {
421 prepare_to_wait(wq, &q->wait, mode);
422 if (test_bit(q->key.bit_nr, q->key.flags))
423 ret = (*action)(&q->key, mode);
424 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
425 finish_wait(wq, &q->wait);
426 return ret;
427 }
428 EXPORT_SYMBOL(__wait_on_bit);
429
430 int __sched out_of_line_wait_on_bit(void *word, int bit,
431 wait_bit_action_f *action, unsigned mode)
432 {
433 wait_queue_head_t *wq = bit_waitqueue(word, bit);
434 DEFINE_WAIT_BIT(wait, word, bit);
435
436 return __wait_on_bit(wq, &wait, action, mode);
437 }
438 EXPORT_SYMBOL(out_of_line_wait_on_bit);
439
440 int __sched out_of_line_wait_on_bit_timeout(
441 void *word, int bit, wait_bit_action_f *action,
442 unsigned mode, unsigned long timeout)
443 {
444 wait_queue_head_t *wq = bit_waitqueue(word, bit);
445 DEFINE_WAIT_BIT(wait, word, bit);
446
447 wait.key.timeout = jiffies + timeout;
448 return __wait_on_bit(wq, &wait, action, mode);
449 }
450 EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
451
452 int __sched
453 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
454 wait_bit_action_f *action, unsigned mode)
455 {
456 int ret = 0;
457
458 for (;;) {
459 prepare_to_wait_exclusive(wq, &q->wait, mode);
460 if (test_bit(q->key.bit_nr, q->key.flags)) {
461 ret = action(&q->key, mode);
462 /*
463 * See the comment in prepare_to_wait_event().
464 * finish_wait() does not necessarily takes wq->lock,
465 * but test_and_set_bit() implies mb() which pairs with
466 * smp_mb__after_atomic() before wake_up_page().
467 */
468 if (ret)
469 finish_wait(wq, &q->wait);
470 }
471 if (!test_and_set_bit(q->key.bit_nr, q->key.flags)) {
472 if (!ret)
473 finish_wait(wq, &q->wait);
474 return 0;
475 } else if (ret) {
476 return ret;
477 }
478 }
479 }
480 EXPORT_SYMBOL(__wait_on_bit_lock);
481
482 int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
483 wait_bit_action_f *action, unsigned mode)
484 {
485 wait_queue_head_t *wq = bit_waitqueue(word, bit);
486 DEFINE_WAIT_BIT(wait, word, bit);
487
488 return __wait_on_bit_lock(wq, &wait, action, mode);
489 }
490 EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
491
492 void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
493 {
494 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
495 if (waitqueue_active(wq))
496 __wake_up(wq, TASK_NORMAL, 1, &key);
497 }
498 EXPORT_SYMBOL(__wake_up_bit);
499
500 /**
501 * wake_up_bit - wake up a waiter on a bit
502 * @word: the word being waited on, a kernel virtual address
503 * @bit: the bit of the word being waited on
504 *
505 * There is a standard hashed waitqueue table for generic use. This
506 * is the part of the hashtable's accessor API that wakes up waiters
507 * on a bit. For instance, if one were to have waiters on a bitflag,
508 * one would call wake_up_bit() after clearing the bit.
509 *
510 * In order for this to function properly, as it uses waitqueue_active()
511 * internally, some kind of memory barrier must be done prior to calling
512 * this. Typically, this will be smp_mb__after_atomic(), but in some
513 * cases where bitflags are manipulated non-atomically under a lock, one
514 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
515 * because spin_unlock() does not guarantee a memory barrier.
516 */
517 void wake_up_bit(void *word, int bit)
518 {
519 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
520 }
521 EXPORT_SYMBOL(wake_up_bit);
522
523 /*
524 * Manipulate the atomic_t address to produce a better bit waitqueue table hash
525 * index (we're keying off bit -1, but that would produce a horrible hash
526 * value).
527 */
528 static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
529 {
530 if (BITS_PER_LONG == 64) {
531 unsigned long q = (unsigned long)p;
532 return bit_waitqueue((void *)(q & ~1), q & 1);
533 }
534 return bit_waitqueue(p, 0);
535 }
536
537 static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync,
538 void *arg)
539 {
540 struct wait_bit_key *key = arg;
541 struct wait_bit_queue *wait_bit
542 = container_of(wq_entry, struct wait_bit_queue, wait);
543 atomic_t *val = key->flags;
544
545 if (wait_bit->key.flags != key->flags ||
546 wait_bit->key.bit_nr != key->bit_nr ||
547 atomic_read(val) != 0)
548 return 0;
549 return autoremove_wake_function(wq_entry, mode, sync, key);
550 }
551
552 /*
553 * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
554 * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero
555 * return codes halt waiting and return.
556 */
557 static __sched
558 int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
559 int (*action)(atomic_t *), unsigned mode)
560 {
561 atomic_t *val;
562 int ret = 0;
563
564 do {
565 prepare_to_wait(wq, &q->wait, mode);
566 val = q->key.flags;
567 if (atomic_read(val) == 0)
568 break;
569 ret = (*action)(val);
570 } while (!ret && atomic_read(val) != 0);
571 finish_wait(wq, &q->wait);
572 return ret;
573 }
574
575 #define DEFINE_WAIT_ATOMIC_T(name, p) \
576 struct wait_bit_queue name = { \
577 .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \
578 .wait = { \
579 .private = current, \
580 .func = wake_atomic_t_function, \
581 .task_list = \
582 LIST_HEAD_INIT((name).wait.task_list), \
583 }, \
584 }
585
586 __sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
587 unsigned mode)
588 {
589 wait_queue_head_t *wq = atomic_t_waitqueue(p);
590 DEFINE_WAIT_ATOMIC_T(wait, p);
591
592 return __wait_on_atomic_t(wq, &wait, action, mode);
593 }
594 EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
595
596 /**
597 * wake_up_atomic_t - Wake up a waiter on a atomic_t
598 * @p: The atomic_t being waited on, a kernel virtual address
599 *
600 * Wake up anyone waiting for the atomic_t to go to zero.
601 *
602 * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
603 * check is done by the waiter's wake function, not the by the waker itself).
604 */
605 void wake_up_atomic_t(atomic_t *p)
606 {
607 __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
608 }
609 EXPORT_SYMBOL(wake_up_atomic_t);
610
611 __sched int bit_wait(struct wait_bit_key *word, int mode)
612 {
613 schedule();
614 if (signal_pending_state(mode, current))
615 return -EINTR;
616 return 0;
617 }
618 EXPORT_SYMBOL(bit_wait);
619
620 __sched int bit_wait_io(struct wait_bit_key *word, int mode)
621 {
622 io_schedule();
623 if (signal_pending_state(mode, current))
624 return -EINTR;
625 return 0;
626 }
627 EXPORT_SYMBOL(bit_wait_io);
628
629 __sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
630 {
631 unsigned long now = READ_ONCE(jiffies);
632 if (time_after_eq(now, word->timeout))
633 return -EAGAIN;
634 schedule_timeout(word->timeout - now);
635 if (signal_pending_state(mode, current))
636 return -EINTR;
637 return 0;
638 }
639 EXPORT_SYMBOL_GPL(bit_wait_timeout);
640
641 __sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
642 {
643 unsigned long now = READ_ONCE(jiffies);
644 if (time_after_eq(now, word->timeout))
645 return -EAGAIN;
646 io_schedule_timeout(word->timeout - now);
647 if (signal_pending_state(mode, current))
648 return -EINTR;
649 return 0;
650 }
651 EXPORT_SYMBOL_GPL(bit_wait_io_timeout);