]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/base/power/main.c
Merge tag 'drm/tegra/for-5.7-fixes' of git://anongit.freedesktop.org/tegra/linux...
[thirdparty/linux.git] / drivers / base / power / main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
16 */
17
18 #define pr_fmt(fmt) "PM: " fmt
19
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
46
47 /*
48 * The entries in the dpm_list list are in a depth first order, simply
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
51 *
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
54 * dpm_list_mutex.
55 */
56
57 LIST_HEAD(dpm_list);
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
62
63 struct suspend_stats suspend_stats;
64 static DEFINE_MUTEX(dpm_list_mtx);
65 static pm_message_t pm_transition;
66
67 static int async_error;
68
69 static const char *pm_verb(int event)
70 {
71 switch (event) {
72 case PM_EVENT_SUSPEND:
73 return "suspend";
74 case PM_EVENT_RESUME:
75 return "resume";
76 case PM_EVENT_FREEZE:
77 return "freeze";
78 case PM_EVENT_QUIESCE:
79 return "quiesce";
80 case PM_EVENT_HIBERNATE:
81 return "hibernate";
82 case PM_EVENT_THAW:
83 return "thaw";
84 case PM_EVENT_RESTORE:
85 return "restore";
86 case PM_EVENT_RECOVER:
87 return "recover";
88 default:
89 return "(unknown PM event)";
90 }
91 }
92
93 /**
94 * device_pm_sleep_init - Initialize system suspend-related device fields.
95 * @dev: Device object being initialized.
96 */
97 void device_pm_sleep_init(struct device *dev)
98 {
99 dev->power.is_prepared = false;
100 dev->power.is_suspended = false;
101 dev->power.is_noirq_suspended = false;
102 dev->power.is_late_suspended = false;
103 init_completion(&dev->power.completion);
104 complete_all(&dev->power.completion);
105 dev->power.wakeup = NULL;
106 INIT_LIST_HEAD(&dev->power.entry);
107 }
108
109 /**
110 * device_pm_lock - Lock the list of active devices used by the PM core.
111 */
112 void device_pm_lock(void)
113 {
114 mutex_lock(&dpm_list_mtx);
115 }
116
117 /**
118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
119 */
120 void device_pm_unlock(void)
121 {
122 mutex_unlock(&dpm_list_mtx);
123 }
124
125 /**
126 * device_pm_add - Add a device to the PM core's list of active devices.
127 * @dev: Device to add to the list.
128 */
129 void device_pm_add(struct device *dev)
130 {
131 /* Skip PM setup/initialization. */
132 if (device_pm_not_required(dev))
133 return;
134
135 pr_debug("Adding info for %s:%s\n",
136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137 device_pm_check_callbacks(dev);
138 mutex_lock(&dpm_list_mtx);
139 if (dev->parent && dev->parent->power.is_prepared)
140 dev_warn(dev, "parent %s should not be sleeping\n",
141 dev_name(dev->parent));
142 list_add_tail(&dev->power.entry, &dpm_list);
143 dev->power.in_dpm_list = true;
144 mutex_unlock(&dpm_list_mtx);
145 }
146
147 /**
148 * device_pm_remove - Remove a device from the PM core's list of active devices.
149 * @dev: Device to be removed from the list.
150 */
151 void device_pm_remove(struct device *dev)
152 {
153 if (device_pm_not_required(dev))
154 return;
155
156 pr_debug("Removing info for %s:%s\n",
157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 complete_all(&dev->power.completion);
159 mutex_lock(&dpm_list_mtx);
160 list_del_init(&dev->power.entry);
161 dev->power.in_dpm_list = false;
162 mutex_unlock(&dpm_list_mtx);
163 device_wakeup_disable(dev);
164 pm_runtime_remove(dev);
165 device_pm_check_callbacks(dev);
166 }
167
168 /**
169 * device_pm_move_before - Move device in the PM core's list of active devices.
170 * @deva: Device to move in dpm_list.
171 * @devb: Device @deva should come before.
172 */
173 void device_pm_move_before(struct device *deva, struct device *devb)
174 {
175 pr_debug("Moving %s:%s before %s:%s\n",
176 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178 /* Delete deva from dpm_list and reinsert before devb. */
179 list_move_tail(&deva->power.entry, &devb->power.entry);
180 }
181
182 /**
183 * device_pm_move_after - Move device in the PM core's list of active devices.
184 * @deva: Device to move in dpm_list.
185 * @devb: Device @deva should come after.
186 */
187 void device_pm_move_after(struct device *deva, struct device *devb)
188 {
189 pr_debug("Moving %s:%s after %s:%s\n",
190 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 /* Delete deva from dpm_list and reinsert after devb. */
193 list_move(&deva->power.entry, &devb->power.entry);
194 }
195
196 /**
197 * device_pm_move_last - Move device to end of the PM core's list of devices.
198 * @dev: Device to move in dpm_list.
199 */
200 void device_pm_move_last(struct device *dev)
201 {
202 pr_debug("Moving %s:%s to end of list\n",
203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204 list_move_tail(&dev->power.entry, &dpm_list);
205 }
206
207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
208 {
209 if (!pm_print_times_enabled)
210 return 0;
211
212 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
213 task_pid_nr(current),
214 dev->parent ? dev_name(dev->parent) : "none");
215 return ktime_get();
216 }
217
218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
219 void *cb, int error)
220 {
221 ktime_t rettime;
222 s64 nsecs;
223
224 if (!pm_print_times_enabled)
225 return;
226
227 rettime = ktime_get();
228 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
229
230 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
231 (unsigned long long)nsecs >> 10);
232 }
233
234 /**
235 * dpm_wait - Wait for a PM operation to complete.
236 * @dev: Device to wait for.
237 * @async: If unset, wait only if the device's power.async_suspend flag is set.
238 */
239 static void dpm_wait(struct device *dev, bool async)
240 {
241 if (!dev)
242 return;
243
244 if (async || (pm_async_enabled && dev->power.async_suspend))
245 wait_for_completion(&dev->power.completion);
246 }
247
248 static int dpm_wait_fn(struct device *dev, void *async_ptr)
249 {
250 dpm_wait(dev, *((bool *)async_ptr));
251 return 0;
252 }
253
254 static void dpm_wait_for_children(struct device *dev, bool async)
255 {
256 device_for_each_child(dev, &async, dpm_wait_fn);
257 }
258
259 static void dpm_wait_for_suppliers(struct device *dev, bool async)
260 {
261 struct device_link *link;
262 int idx;
263
264 idx = device_links_read_lock();
265
266 /*
267 * If the supplier goes away right after we've checked the link to it,
268 * we'll wait for its completion to change the state, but that's fine,
269 * because the only things that will block as a result are the SRCU
270 * callbacks freeing the link objects for the links in the list we're
271 * walking.
272 */
273 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
274 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
275 dpm_wait(link->supplier, async);
276
277 device_links_read_unlock(idx);
278 }
279
280 static bool dpm_wait_for_superior(struct device *dev, bool async)
281 {
282 struct device *parent;
283
284 /*
285 * If the device is resumed asynchronously and the parent's callback
286 * deletes both the device and the parent itself, the parent object may
287 * be freed while this function is running, so avoid that by reference
288 * counting the parent once more unless the device has been deleted
289 * already (in which case return right away).
290 */
291 mutex_lock(&dpm_list_mtx);
292
293 if (!device_pm_initialized(dev)) {
294 mutex_unlock(&dpm_list_mtx);
295 return false;
296 }
297
298 parent = get_device(dev->parent);
299
300 mutex_unlock(&dpm_list_mtx);
301
302 dpm_wait(parent, async);
303 put_device(parent);
304
305 dpm_wait_for_suppliers(dev, async);
306
307 /*
308 * If the parent's callback has deleted the device, attempting to resume
309 * it would be invalid, so avoid doing that then.
310 */
311 return device_pm_initialized(dev);
312 }
313
314 static void dpm_wait_for_consumers(struct device *dev, bool async)
315 {
316 struct device_link *link;
317 int idx;
318
319 idx = device_links_read_lock();
320
321 /*
322 * The status of a device link can only be changed from "dormant" by a
323 * probe, but that cannot happen during system suspend/resume. In
324 * theory it can change to "dormant" at that time, but then it is
325 * reasonable to wait for the target device anyway (eg. if it goes
326 * away, it's better to wait for it to go away completely and then
327 * continue instead of trying to continue in parallel with its
328 * unregistration).
329 */
330 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
331 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
332 dpm_wait(link->consumer, async);
333
334 device_links_read_unlock(idx);
335 }
336
337 static void dpm_wait_for_subordinate(struct device *dev, bool async)
338 {
339 dpm_wait_for_children(dev, async);
340 dpm_wait_for_consumers(dev, async);
341 }
342
343 /**
344 * pm_op - Return the PM operation appropriate for given PM event.
345 * @ops: PM operations to choose from.
346 * @state: PM transition of the system being carried out.
347 */
348 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
349 {
350 switch (state.event) {
351 #ifdef CONFIG_SUSPEND
352 case PM_EVENT_SUSPEND:
353 return ops->suspend;
354 case PM_EVENT_RESUME:
355 return ops->resume;
356 #endif /* CONFIG_SUSPEND */
357 #ifdef CONFIG_HIBERNATE_CALLBACKS
358 case PM_EVENT_FREEZE:
359 case PM_EVENT_QUIESCE:
360 return ops->freeze;
361 case PM_EVENT_HIBERNATE:
362 return ops->poweroff;
363 case PM_EVENT_THAW:
364 case PM_EVENT_RECOVER:
365 return ops->thaw;
366 break;
367 case PM_EVENT_RESTORE:
368 return ops->restore;
369 #endif /* CONFIG_HIBERNATE_CALLBACKS */
370 }
371
372 return NULL;
373 }
374
375 /**
376 * pm_late_early_op - Return the PM operation appropriate for given PM event.
377 * @ops: PM operations to choose from.
378 * @state: PM transition of the system being carried out.
379 *
380 * Runtime PM is disabled for @dev while this function is being executed.
381 */
382 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
383 pm_message_t state)
384 {
385 switch (state.event) {
386 #ifdef CONFIG_SUSPEND
387 case PM_EVENT_SUSPEND:
388 return ops->suspend_late;
389 case PM_EVENT_RESUME:
390 return ops->resume_early;
391 #endif /* CONFIG_SUSPEND */
392 #ifdef CONFIG_HIBERNATE_CALLBACKS
393 case PM_EVENT_FREEZE:
394 case PM_EVENT_QUIESCE:
395 return ops->freeze_late;
396 case PM_EVENT_HIBERNATE:
397 return ops->poweroff_late;
398 case PM_EVENT_THAW:
399 case PM_EVENT_RECOVER:
400 return ops->thaw_early;
401 case PM_EVENT_RESTORE:
402 return ops->restore_early;
403 #endif /* CONFIG_HIBERNATE_CALLBACKS */
404 }
405
406 return NULL;
407 }
408
409 /**
410 * pm_noirq_op - Return the PM operation appropriate for given PM event.
411 * @ops: PM operations to choose from.
412 * @state: PM transition of the system being carried out.
413 *
414 * The driver of @dev will not receive interrupts while this function is being
415 * executed.
416 */
417 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
418 {
419 switch (state.event) {
420 #ifdef CONFIG_SUSPEND
421 case PM_EVENT_SUSPEND:
422 return ops->suspend_noirq;
423 case PM_EVENT_RESUME:
424 return ops->resume_noirq;
425 #endif /* CONFIG_SUSPEND */
426 #ifdef CONFIG_HIBERNATE_CALLBACKS
427 case PM_EVENT_FREEZE:
428 case PM_EVENT_QUIESCE:
429 return ops->freeze_noirq;
430 case PM_EVENT_HIBERNATE:
431 return ops->poweroff_noirq;
432 case PM_EVENT_THAW:
433 case PM_EVENT_RECOVER:
434 return ops->thaw_noirq;
435 case PM_EVENT_RESTORE:
436 return ops->restore_noirq;
437 #endif /* CONFIG_HIBERNATE_CALLBACKS */
438 }
439
440 return NULL;
441 }
442
443 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
444 {
445 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
446 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
447 ", may wakeup" : "");
448 }
449
450 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
451 int error)
452 {
453 pr_err("Device %s failed to %s%s: error %d\n",
454 dev_name(dev), pm_verb(state.event), info, error);
455 }
456
457 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
458 const char *info)
459 {
460 ktime_t calltime;
461 u64 usecs64;
462 int usecs;
463
464 calltime = ktime_get();
465 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
466 do_div(usecs64, NSEC_PER_USEC);
467 usecs = usecs64;
468 if (usecs == 0)
469 usecs = 1;
470
471 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
472 info ?: "", info ? " " : "", pm_verb(state.event),
473 error ? "aborted" : "complete",
474 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
475 }
476
477 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
478 pm_message_t state, const char *info)
479 {
480 ktime_t calltime;
481 int error;
482
483 if (!cb)
484 return 0;
485
486 calltime = initcall_debug_start(dev, cb);
487
488 pm_dev_dbg(dev, state, info);
489 trace_device_pm_callback_start(dev, info, state.event);
490 error = cb(dev);
491 trace_device_pm_callback_end(dev, error);
492 suspend_report_result(cb, error);
493
494 initcall_debug_report(dev, calltime, cb, error);
495
496 return error;
497 }
498
499 #ifdef CONFIG_DPM_WATCHDOG
500 struct dpm_watchdog {
501 struct device *dev;
502 struct task_struct *tsk;
503 struct timer_list timer;
504 };
505
506 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
507 struct dpm_watchdog wd
508
509 /**
510 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
511 * @t: The timer that PM watchdog depends on.
512 *
513 * Called when a driver has timed out suspending or resuming.
514 * There's not much we can do here to recover so panic() to
515 * capture a crash-dump in pstore.
516 */
517 static void dpm_watchdog_handler(struct timer_list *t)
518 {
519 struct dpm_watchdog *wd = from_timer(wd, t, timer);
520
521 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
522 show_stack(wd->tsk, NULL);
523 panic("%s %s: unrecoverable failure\n",
524 dev_driver_string(wd->dev), dev_name(wd->dev));
525 }
526
527 /**
528 * dpm_watchdog_set - Enable pm watchdog for given device.
529 * @wd: Watchdog. Must be allocated on the stack.
530 * @dev: Device to handle.
531 */
532 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
533 {
534 struct timer_list *timer = &wd->timer;
535
536 wd->dev = dev;
537 wd->tsk = current;
538
539 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
540 /* use same timeout value for both suspend and resume */
541 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
542 add_timer(timer);
543 }
544
545 /**
546 * dpm_watchdog_clear - Disable suspend/resume watchdog.
547 * @wd: Watchdog to disable.
548 */
549 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
550 {
551 struct timer_list *timer = &wd->timer;
552
553 del_timer_sync(timer);
554 destroy_timer_on_stack(timer);
555 }
556 #else
557 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
558 #define dpm_watchdog_set(x, y)
559 #define dpm_watchdog_clear(x)
560 #endif
561
562 /*------------------------- Resume routines -------------------------*/
563
564 /**
565 * suspend_event - Return a "suspend" message for given "resume" one.
566 * @resume_msg: PM message representing a system-wide resume transition.
567 */
568 static pm_message_t suspend_event(pm_message_t resume_msg)
569 {
570 switch (resume_msg.event) {
571 case PM_EVENT_RESUME:
572 return PMSG_SUSPEND;
573 case PM_EVENT_THAW:
574 case PM_EVENT_RESTORE:
575 return PMSG_FREEZE;
576 case PM_EVENT_RECOVER:
577 return PMSG_HIBERNATE;
578 }
579 return PMSG_ON;
580 }
581
582 /**
583 * dev_pm_may_skip_resume - System-wide device resume optimization check.
584 * @dev: Target device.
585 *
586 * Checks whether or not the device may be left in suspend after a system-wide
587 * transition to the working state.
588 */
589 bool dev_pm_may_skip_resume(struct device *dev)
590 {
591 return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
592 }
593
594 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
595 pm_message_t state,
596 const char **info_p)
597 {
598 pm_callback_t callback;
599 const char *info;
600
601 if (dev->pm_domain) {
602 info = "noirq power domain ";
603 callback = pm_noirq_op(&dev->pm_domain->ops, state);
604 } else if (dev->type && dev->type->pm) {
605 info = "noirq type ";
606 callback = pm_noirq_op(dev->type->pm, state);
607 } else if (dev->class && dev->class->pm) {
608 info = "noirq class ";
609 callback = pm_noirq_op(dev->class->pm, state);
610 } else if (dev->bus && dev->bus->pm) {
611 info = "noirq bus ";
612 callback = pm_noirq_op(dev->bus->pm, state);
613 } else {
614 return NULL;
615 }
616
617 if (info_p)
618 *info_p = info;
619
620 return callback;
621 }
622
623 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
624 pm_message_t state,
625 const char **info_p);
626
627 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
628 pm_message_t state,
629 const char **info_p);
630
631 /**
632 * device_resume_noirq - Execute a "noirq resume" callback for given device.
633 * @dev: Device to handle.
634 * @state: PM transition of the system being carried out.
635 * @async: If true, the device is being resumed asynchronously.
636 *
637 * The driver of @dev will not receive interrupts while this function is being
638 * executed.
639 */
640 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
641 {
642 pm_callback_t callback;
643 const char *info;
644 bool skip_resume;
645 int error = 0;
646
647 TRACE_DEVICE(dev);
648 TRACE_RESUME(0);
649
650 if (dev->power.syscore || dev->power.direct_complete)
651 goto Out;
652
653 if (!dev->power.is_noirq_suspended)
654 goto Out;
655
656 if (!dpm_wait_for_superior(dev, async))
657 goto Out;
658
659 skip_resume = dev_pm_may_skip_resume(dev);
660
661 callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
662 if (callback)
663 goto Run;
664
665 if (skip_resume)
666 goto Skip;
667
668 if (dev_pm_smart_suspend_and_suspended(dev)) {
669 pm_message_t suspend_msg = suspend_event(state);
670
671 /*
672 * If "freeze" callbacks have been skipped during a transition
673 * related to hibernation, the subsequent "thaw" callbacks must
674 * be skipped too or bad things may happen. Otherwise, resume
675 * callbacks are going to be run for the device, so its runtime
676 * PM status must be changed to reflect the new state after the
677 * transition under way.
678 */
679 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
680 !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
681 if (state.event == PM_EVENT_THAW) {
682 skip_resume = true;
683 goto Skip;
684 } else {
685 pm_runtime_set_active(dev);
686 }
687 }
688 }
689
690 if (dev->driver && dev->driver->pm) {
691 info = "noirq driver ";
692 callback = pm_noirq_op(dev->driver->pm, state);
693 }
694
695 Run:
696 error = dpm_run_callback(callback, dev, state, info);
697
698 Skip:
699 dev->power.is_noirq_suspended = false;
700
701 if (skip_resume) {
702 /* Make the next phases of resume skip the device. */
703 dev->power.is_late_suspended = false;
704 dev->power.is_suspended = false;
705 /*
706 * The device is going to be left in suspend, but it might not
707 * have been in runtime suspend before the system suspended, so
708 * its runtime PM status needs to be updated to avoid confusing
709 * the runtime PM framework when runtime PM is enabled for the
710 * device again.
711 */
712 pm_runtime_set_suspended(dev);
713 }
714
715 Out:
716 complete_all(&dev->power.completion);
717 TRACE_RESUME(error);
718 return error;
719 }
720
721 static bool is_async(struct device *dev)
722 {
723 return dev->power.async_suspend && pm_async_enabled
724 && !pm_trace_is_enabled();
725 }
726
727 static bool dpm_async_fn(struct device *dev, async_func_t func)
728 {
729 reinit_completion(&dev->power.completion);
730
731 if (is_async(dev)) {
732 get_device(dev);
733 async_schedule_dev(func, dev);
734 return true;
735 }
736
737 return false;
738 }
739
740 static void async_resume_noirq(void *data, async_cookie_t cookie)
741 {
742 struct device *dev = (struct device *)data;
743 int error;
744
745 error = device_resume_noirq(dev, pm_transition, true);
746 if (error)
747 pm_dev_err(dev, pm_transition, " async", error);
748
749 put_device(dev);
750 }
751
752 static void dpm_noirq_resume_devices(pm_message_t state)
753 {
754 struct device *dev;
755 ktime_t starttime = ktime_get();
756
757 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
758 mutex_lock(&dpm_list_mtx);
759 pm_transition = state;
760
761 /*
762 * Advanced the async threads upfront,
763 * in case the starting of async threads is
764 * delayed by non-async resuming devices.
765 */
766 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
767 dpm_async_fn(dev, async_resume_noirq);
768
769 while (!list_empty(&dpm_noirq_list)) {
770 dev = to_device(dpm_noirq_list.next);
771 get_device(dev);
772 list_move_tail(&dev->power.entry, &dpm_late_early_list);
773 mutex_unlock(&dpm_list_mtx);
774
775 if (!is_async(dev)) {
776 int error;
777
778 error = device_resume_noirq(dev, state, false);
779 if (error) {
780 suspend_stats.failed_resume_noirq++;
781 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
782 dpm_save_failed_dev(dev_name(dev));
783 pm_dev_err(dev, state, " noirq", error);
784 }
785 }
786
787 mutex_lock(&dpm_list_mtx);
788 put_device(dev);
789 }
790 mutex_unlock(&dpm_list_mtx);
791 async_synchronize_full();
792 dpm_show_time(starttime, state, 0, "noirq");
793 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
794 }
795
796 /**
797 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
798 * @state: PM transition of the system being carried out.
799 *
800 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
801 * allow device drivers' interrupt handlers to be called.
802 */
803 void dpm_resume_noirq(pm_message_t state)
804 {
805 dpm_noirq_resume_devices(state);
806
807 resume_device_irqs();
808 device_wakeup_disarm_wake_irqs();
809
810 cpuidle_resume();
811 }
812
813 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
814 pm_message_t state,
815 const char **info_p)
816 {
817 pm_callback_t callback;
818 const char *info;
819
820 if (dev->pm_domain) {
821 info = "early power domain ";
822 callback = pm_late_early_op(&dev->pm_domain->ops, state);
823 } else if (dev->type && dev->type->pm) {
824 info = "early type ";
825 callback = pm_late_early_op(dev->type->pm, state);
826 } else if (dev->class && dev->class->pm) {
827 info = "early class ";
828 callback = pm_late_early_op(dev->class->pm, state);
829 } else if (dev->bus && dev->bus->pm) {
830 info = "early bus ";
831 callback = pm_late_early_op(dev->bus->pm, state);
832 } else {
833 return NULL;
834 }
835
836 if (info_p)
837 *info_p = info;
838
839 return callback;
840 }
841
842 /**
843 * device_resume_early - Execute an "early resume" callback for given device.
844 * @dev: Device to handle.
845 * @state: PM transition of the system being carried out.
846 * @async: If true, the device is being resumed asynchronously.
847 *
848 * Runtime PM is disabled for @dev while this function is being executed.
849 */
850 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
851 {
852 pm_callback_t callback;
853 const char *info;
854 int error = 0;
855
856 TRACE_DEVICE(dev);
857 TRACE_RESUME(0);
858
859 if (dev->power.syscore || dev->power.direct_complete)
860 goto Out;
861
862 if (!dev->power.is_late_suspended)
863 goto Out;
864
865 if (!dpm_wait_for_superior(dev, async))
866 goto Out;
867
868 callback = dpm_subsys_resume_early_cb(dev, state, &info);
869
870 if (!callback && dev->driver && dev->driver->pm) {
871 info = "early driver ";
872 callback = pm_late_early_op(dev->driver->pm, state);
873 }
874
875 error = dpm_run_callback(callback, dev, state, info);
876 dev->power.is_late_suspended = false;
877
878 Out:
879 TRACE_RESUME(error);
880
881 pm_runtime_enable(dev);
882 complete_all(&dev->power.completion);
883 return error;
884 }
885
886 static void async_resume_early(void *data, async_cookie_t cookie)
887 {
888 struct device *dev = (struct device *)data;
889 int error;
890
891 error = device_resume_early(dev, pm_transition, true);
892 if (error)
893 pm_dev_err(dev, pm_transition, " async", error);
894
895 put_device(dev);
896 }
897
898 /**
899 * dpm_resume_early - Execute "early resume" callbacks for all devices.
900 * @state: PM transition of the system being carried out.
901 */
902 void dpm_resume_early(pm_message_t state)
903 {
904 struct device *dev;
905 ktime_t starttime = ktime_get();
906
907 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
908 mutex_lock(&dpm_list_mtx);
909 pm_transition = state;
910
911 /*
912 * Advanced the async threads upfront,
913 * in case the starting of async threads is
914 * delayed by non-async resuming devices.
915 */
916 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
917 dpm_async_fn(dev, async_resume_early);
918
919 while (!list_empty(&dpm_late_early_list)) {
920 dev = to_device(dpm_late_early_list.next);
921 get_device(dev);
922 list_move_tail(&dev->power.entry, &dpm_suspended_list);
923 mutex_unlock(&dpm_list_mtx);
924
925 if (!is_async(dev)) {
926 int error;
927
928 error = device_resume_early(dev, state, false);
929 if (error) {
930 suspend_stats.failed_resume_early++;
931 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
932 dpm_save_failed_dev(dev_name(dev));
933 pm_dev_err(dev, state, " early", error);
934 }
935 }
936 mutex_lock(&dpm_list_mtx);
937 put_device(dev);
938 }
939 mutex_unlock(&dpm_list_mtx);
940 async_synchronize_full();
941 dpm_show_time(starttime, state, 0, "early");
942 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
943 }
944
945 /**
946 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
947 * @state: PM transition of the system being carried out.
948 */
949 void dpm_resume_start(pm_message_t state)
950 {
951 dpm_resume_noirq(state);
952 dpm_resume_early(state);
953 }
954 EXPORT_SYMBOL_GPL(dpm_resume_start);
955
956 /**
957 * device_resume - Execute "resume" callbacks for given device.
958 * @dev: Device to handle.
959 * @state: PM transition of the system being carried out.
960 * @async: If true, the device is being resumed asynchronously.
961 */
962 static int device_resume(struct device *dev, pm_message_t state, bool async)
963 {
964 pm_callback_t callback = NULL;
965 const char *info = NULL;
966 int error = 0;
967 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
968
969 TRACE_DEVICE(dev);
970 TRACE_RESUME(0);
971
972 if (dev->power.syscore)
973 goto Complete;
974
975 if (dev->power.direct_complete) {
976 /* Match the pm_runtime_disable() in __device_suspend(). */
977 pm_runtime_enable(dev);
978 goto Complete;
979 }
980
981 if (!dpm_wait_for_superior(dev, async))
982 goto Complete;
983
984 dpm_watchdog_set(&wd, dev);
985 device_lock(dev);
986
987 /*
988 * This is a fib. But we'll allow new children to be added below
989 * a resumed device, even if the device hasn't been completed yet.
990 */
991 dev->power.is_prepared = false;
992
993 if (!dev->power.is_suspended)
994 goto Unlock;
995
996 if (dev->pm_domain) {
997 info = "power domain ";
998 callback = pm_op(&dev->pm_domain->ops, state);
999 goto Driver;
1000 }
1001
1002 if (dev->type && dev->type->pm) {
1003 info = "type ";
1004 callback = pm_op(dev->type->pm, state);
1005 goto Driver;
1006 }
1007
1008 if (dev->class && dev->class->pm) {
1009 info = "class ";
1010 callback = pm_op(dev->class->pm, state);
1011 goto Driver;
1012 }
1013
1014 if (dev->bus) {
1015 if (dev->bus->pm) {
1016 info = "bus ";
1017 callback = pm_op(dev->bus->pm, state);
1018 } else if (dev->bus->resume) {
1019 info = "legacy bus ";
1020 callback = dev->bus->resume;
1021 goto End;
1022 }
1023 }
1024
1025 Driver:
1026 if (!callback && dev->driver && dev->driver->pm) {
1027 info = "driver ";
1028 callback = pm_op(dev->driver->pm, state);
1029 }
1030
1031 End:
1032 error = dpm_run_callback(callback, dev, state, info);
1033 dev->power.is_suspended = false;
1034
1035 Unlock:
1036 device_unlock(dev);
1037 dpm_watchdog_clear(&wd);
1038
1039 Complete:
1040 complete_all(&dev->power.completion);
1041
1042 TRACE_RESUME(error);
1043
1044 return error;
1045 }
1046
1047 static void async_resume(void *data, async_cookie_t cookie)
1048 {
1049 struct device *dev = (struct device *)data;
1050 int error;
1051
1052 error = device_resume(dev, pm_transition, true);
1053 if (error)
1054 pm_dev_err(dev, pm_transition, " async", error);
1055 put_device(dev);
1056 }
1057
1058 /**
1059 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1060 * @state: PM transition of the system being carried out.
1061 *
1062 * Execute the appropriate "resume" callback for all devices whose status
1063 * indicates that they are suspended.
1064 */
1065 void dpm_resume(pm_message_t state)
1066 {
1067 struct device *dev;
1068 ktime_t starttime = ktime_get();
1069
1070 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1071 might_sleep();
1072
1073 mutex_lock(&dpm_list_mtx);
1074 pm_transition = state;
1075 async_error = 0;
1076
1077 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1078 dpm_async_fn(dev, async_resume);
1079
1080 while (!list_empty(&dpm_suspended_list)) {
1081 dev = to_device(dpm_suspended_list.next);
1082 get_device(dev);
1083 if (!is_async(dev)) {
1084 int error;
1085
1086 mutex_unlock(&dpm_list_mtx);
1087
1088 error = device_resume(dev, state, false);
1089 if (error) {
1090 suspend_stats.failed_resume++;
1091 dpm_save_failed_step(SUSPEND_RESUME);
1092 dpm_save_failed_dev(dev_name(dev));
1093 pm_dev_err(dev, state, "", error);
1094 }
1095
1096 mutex_lock(&dpm_list_mtx);
1097 }
1098 if (!list_empty(&dev->power.entry))
1099 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1100 put_device(dev);
1101 }
1102 mutex_unlock(&dpm_list_mtx);
1103 async_synchronize_full();
1104 dpm_show_time(starttime, state, 0, NULL);
1105
1106 cpufreq_resume();
1107 devfreq_resume();
1108 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1109 }
1110
1111 /**
1112 * device_complete - Complete a PM transition for given device.
1113 * @dev: Device to handle.
1114 * @state: PM transition of the system being carried out.
1115 */
1116 static void device_complete(struct device *dev, pm_message_t state)
1117 {
1118 void (*callback)(struct device *) = NULL;
1119 const char *info = NULL;
1120
1121 if (dev->power.syscore)
1122 return;
1123
1124 device_lock(dev);
1125
1126 if (dev->pm_domain) {
1127 info = "completing power domain ";
1128 callback = dev->pm_domain->ops.complete;
1129 } else if (dev->type && dev->type->pm) {
1130 info = "completing type ";
1131 callback = dev->type->pm->complete;
1132 } else if (dev->class && dev->class->pm) {
1133 info = "completing class ";
1134 callback = dev->class->pm->complete;
1135 } else if (dev->bus && dev->bus->pm) {
1136 info = "completing bus ";
1137 callback = dev->bus->pm->complete;
1138 }
1139
1140 if (!callback && dev->driver && dev->driver->pm) {
1141 info = "completing driver ";
1142 callback = dev->driver->pm->complete;
1143 }
1144
1145 if (callback) {
1146 pm_dev_dbg(dev, state, info);
1147 callback(dev);
1148 }
1149
1150 device_unlock(dev);
1151
1152 pm_runtime_put(dev);
1153 }
1154
1155 /**
1156 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1157 * @state: PM transition of the system being carried out.
1158 *
1159 * Execute the ->complete() callbacks for all devices whose PM status is not
1160 * DPM_ON (this allows new devices to be registered).
1161 */
1162 void dpm_complete(pm_message_t state)
1163 {
1164 struct list_head list;
1165
1166 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1167 might_sleep();
1168
1169 INIT_LIST_HEAD(&list);
1170 mutex_lock(&dpm_list_mtx);
1171 while (!list_empty(&dpm_prepared_list)) {
1172 struct device *dev = to_device(dpm_prepared_list.prev);
1173
1174 get_device(dev);
1175 dev->power.is_prepared = false;
1176 list_move(&dev->power.entry, &list);
1177 mutex_unlock(&dpm_list_mtx);
1178
1179 trace_device_pm_callback_start(dev, "", state.event);
1180 device_complete(dev, state);
1181 trace_device_pm_callback_end(dev, 0);
1182
1183 mutex_lock(&dpm_list_mtx);
1184 put_device(dev);
1185 }
1186 list_splice(&list, &dpm_list);
1187 mutex_unlock(&dpm_list_mtx);
1188
1189 /* Allow device probing and trigger re-probing of deferred devices */
1190 device_unblock_probing();
1191 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1192 }
1193
1194 /**
1195 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1196 * @state: PM transition of the system being carried out.
1197 *
1198 * Execute "resume" callbacks for all devices and complete the PM transition of
1199 * the system.
1200 */
1201 void dpm_resume_end(pm_message_t state)
1202 {
1203 dpm_resume(state);
1204 dpm_complete(state);
1205 }
1206 EXPORT_SYMBOL_GPL(dpm_resume_end);
1207
1208
1209 /*------------------------- Suspend routines -------------------------*/
1210
1211 /**
1212 * resume_event - Return a "resume" message for given "suspend" sleep state.
1213 * @sleep_state: PM message representing a sleep state.
1214 *
1215 * Return a PM message representing the resume event corresponding to given
1216 * sleep state.
1217 */
1218 static pm_message_t resume_event(pm_message_t sleep_state)
1219 {
1220 switch (sleep_state.event) {
1221 case PM_EVENT_SUSPEND:
1222 return PMSG_RESUME;
1223 case PM_EVENT_FREEZE:
1224 case PM_EVENT_QUIESCE:
1225 return PMSG_RECOVER;
1226 case PM_EVENT_HIBERNATE:
1227 return PMSG_RESTORE;
1228 }
1229 return PMSG_ON;
1230 }
1231
1232 static void dpm_superior_set_must_resume(struct device *dev)
1233 {
1234 struct device_link *link;
1235 int idx;
1236
1237 if (dev->parent)
1238 dev->parent->power.must_resume = true;
1239
1240 idx = device_links_read_lock();
1241
1242 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1243 link->supplier->power.must_resume = true;
1244
1245 device_links_read_unlock(idx);
1246 }
1247
1248 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1249 pm_message_t state,
1250 const char **info_p)
1251 {
1252 pm_callback_t callback;
1253 const char *info;
1254
1255 if (dev->pm_domain) {
1256 info = "noirq power domain ";
1257 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1258 } else if (dev->type && dev->type->pm) {
1259 info = "noirq type ";
1260 callback = pm_noirq_op(dev->type->pm, state);
1261 } else if (dev->class && dev->class->pm) {
1262 info = "noirq class ";
1263 callback = pm_noirq_op(dev->class->pm, state);
1264 } else if (dev->bus && dev->bus->pm) {
1265 info = "noirq bus ";
1266 callback = pm_noirq_op(dev->bus->pm, state);
1267 } else {
1268 return NULL;
1269 }
1270
1271 if (info_p)
1272 *info_p = info;
1273
1274 return callback;
1275 }
1276
1277 static bool device_must_resume(struct device *dev, pm_message_t state,
1278 bool no_subsys_suspend_noirq)
1279 {
1280 pm_message_t resume_msg = resume_event(state);
1281
1282 /*
1283 * If all of the device driver's "noirq", "late" and "early" callbacks
1284 * are invoked directly by the core, the decision to allow the device to
1285 * stay in suspend can be based on its current runtime PM status and its
1286 * wakeup settings.
1287 */
1288 if (no_subsys_suspend_noirq &&
1289 !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1290 !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1291 !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1292 return !pm_runtime_status_suspended(dev) &&
1293 (resume_msg.event != PM_EVENT_RESUME ||
1294 (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1295
1296 /*
1297 * The only safe strategy here is to require that if the device may not
1298 * be left in suspend, resume callbacks must be invoked for it.
1299 */
1300 return !dev->power.may_skip_resume;
1301 }
1302
1303 /**
1304 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1305 * @dev: Device to handle.
1306 * @state: PM transition of the system being carried out.
1307 * @async: If true, the device is being suspended asynchronously.
1308 *
1309 * The driver of @dev will not receive interrupts while this function is being
1310 * executed.
1311 */
1312 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1313 {
1314 pm_callback_t callback;
1315 const char *info;
1316 bool no_subsys_cb = false;
1317 int error = 0;
1318
1319 TRACE_DEVICE(dev);
1320 TRACE_SUSPEND(0);
1321
1322 dpm_wait_for_subordinate(dev, async);
1323
1324 if (async_error)
1325 goto Complete;
1326
1327 if (dev->power.syscore || dev->power.direct_complete)
1328 goto Complete;
1329
1330 callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1331 if (callback)
1332 goto Run;
1333
1334 no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1335
1336 if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1337 goto Skip;
1338
1339 if (dev->driver && dev->driver->pm) {
1340 info = "noirq driver ";
1341 callback = pm_noirq_op(dev->driver->pm, state);
1342 }
1343
1344 Run:
1345 error = dpm_run_callback(callback, dev, state, info);
1346 if (error) {
1347 async_error = error;
1348 goto Complete;
1349 }
1350
1351 Skip:
1352 dev->power.is_noirq_suspended = true;
1353
1354 if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1355 dev->power.must_resume = dev->power.must_resume ||
1356 atomic_read(&dev->power.usage_count) > 1 ||
1357 device_must_resume(dev, state, no_subsys_cb);
1358 } else {
1359 dev->power.must_resume = true;
1360 }
1361
1362 if (dev->power.must_resume)
1363 dpm_superior_set_must_resume(dev);
1364
1365 Complete:
1366 complete_all(&dev->power.completion);
1367 TRACE_SUSPEND(error);
1368 return error;
1369 }
1370
1371 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1372 {
1373 struct device *dev = (struct device *)data;
1374 int error;
1375
1376 error = __device_suspend_noirq(dev, pm_transition, true);
1377 if (error) {
1378 dpm_save_failed_dev(dev_name(dev));
1379 pm_dev_err(dev, pm_transition, " async", error);
1380 }
1381
1382 put_device(dev);
1383 }
1384
1385 static int device_suspend_noirq(struct device *dev)
1386 {
1387 if (dpm_async_fn(dev, async_suspend_noirq))
1388 return 0;
1389
1390 return __device_suspend_noirq(dev, pm_transition, false);
1391 }
1392
1393 static int dpm_noirq_suspend_devices(pm_message_t state)
1394 {
1395 ktime_t starttime = ktime_get();
1396 int error = 0;
1397
1398 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1399 mutex_lock(&dpm_list_mtx);
1400 pm_transition = state;
1401 async_error = 0;
1402
1403 while (!list_empty(&dpm_late_early_list)) {
1404 struct device *dev = to_device(dpm_late_early_list.prev);
1405
1406 get_device(dev);
1407 mutex_unlock(&dpm_list_mtx);
1408
1409 error = device_suspend_noirq(dev);
1410
1411 mutex_lock(&dpm_list_mtx);
1412 if (error) {
1413 pm_dev_err(dev, state, " noirq", error);
1414 dpm_save_failed_dev(dev_name(dev));
1415 put_device(dev);
1416 break;
1417 }
1418 if (!list_empty(&dev->power.entry))
1419 list_move(&dev->power.entry, &dpm_noirq_list);
1420 put_device(dev);
1421
1422 if (async_error)
1423 break;
1424 }
1425 mutex_unlock(&dpm_list_mtx);
1426 async_synchronize_full();
1427 if (!error)
1428 error = async_error;
1429
1430 if (error) {
1431 suspend_stats.failed_suspend_noirq++;
1432 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1433 }
1434 dpm_show_time(starttime, state, error, "noirq");
1435 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1436 return error;
1437 }
1438
1439 /**
1440 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1441 * @state: PM transition of the system being carried out.
1442 *
1443 * Prevent device drivers' interrupt handlers from being called and invoke
1444 * "noirq" suspend callbacks for all non-sysdev devices.
1445 */
1446 int dpm_suspend_noirq(pm_message_t state)
1447 {
1448 int ret;
1449
1450 cpuidle_pause();
1451
1452 device_wakeup_arm_wake_irqs();
1453 suspend_device_irqs();
1454
1455 ret = dpm_noirq_suspend_devices(state);
1456 if (ret)
1457 dpm_resume_noirq(resume_event(state));
1458
1459 return ret;
1460 }
1461
1462 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1463 {
1464 struct device *parent = dev->parent;
1465
1466 if (!parent)
1467 return;
1468
1469 spin_lock_irq(&parent->power.lock);
1470
1471 if (dev->power.wakeup_path && !parent->power.ignore_children)
1472 parent->power.wakeup_path = true;
1473
1474 spin_unlock_irq(&parent->power.lock);
1475 }
1476
1477 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1478 pm_message_t state,
1479 const char **info_p)
1480 {
1481 pm_callback_t callback;
1482 const char *info;
1483
1484 if (dev->pm_domain) {
1485 info = "late power domain ";
1486 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1487 } else if (dev->type && dev->type->pm) {
1488 info = "late type ";
1489 callback = pm_late_early_op(dev->type->pm, state);
1490 } else if (dev->class && dev->class->pm) {
1491 info = "late class ";
1492 callback = pm_late_early_op(dev->class->pm, state);
1493 } else if (dev->bus && dev->bus->pm) {
1494 info = "late bus ";
1495 callback = pm_late_early_op(dev->bus->pm, state);
1496 } else {
1497 return NULL;
1498 }
1499
1500 if (info_p)
1501 *info_p = info;
1502
1503 return callback;
1504 }
1505
1506 /**
1507 * __device_suspend_late - Execute a "late suspend" callback for given device.
1508 * @dev: Device to handle.
1509 * @state: PM transition of the system being carried out.
1510 * @async: If true, the device is being suspended asynchronously.
1511 *
1512 * Runtime PM is disabled for @dev while this function is being executed.
1513 */
1514 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1515 {
1516 pm_callback_t callback;
1517 const char *info;
1518 int error = 0;
1519
1520 TRACE_DEVICE(dev);
1521 TRACE_SUSPEND(0);
1522
1523 __pm_runtime_disable(dev, false);
1524
1525 dpm_wait_for_subordinate(dev, async);
1526
1527 if (async_error)
1528 goto Complete;
1529
1530 if (pm_wakeup_pending()) {
1531 async_error = -EBUSY;
1532 goto Complete;
1533 }
1534
1535 if (dev->power.syscore || dev->power.direct_complete)
1536 goto Complete;
1537
1538 callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1539 if (callback)
1540 goto Run;
1541
1542 if (dev_pm_smart_suspend_and_suspended(dev) &&
1543 !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1544 goto Skip;
1545
1546 if (dev->driver && dev->driver->pm) {
1547 info = "late driver ";
1548 callback = pm_late_early_op(dev->driver->pm, state);
1549 }
1550
1551 Run:
1552 error = dpm_run_callback(callback, dev, state, info);
1553 if (error) {
1554 async_error = error;
1555 goto Complete;
1556 }
1557 dpm_propagate_wakeup_to_parent(dev);
1558
1559 Skip:
1560 dev->power.is_late_suspended = true;
1561
1562 Complete:
1563 TRACE_SUSPEND(error);
1564 complete_all(&dev->power.completion);
1565 return error;
1566 }
1567
1568 static void async_suspend_late(void *data, async_cookie_t cookie)
1569 {
1570 struct device *dev = (struct device *)data;
1571 int error;
1572
1573 error = __device_suspend_late(dev, pm_transition, true);
1574 if (error) {
1575 dpm_save_failed_dev(dev_name(dev));
1576 pm_dev_err(dev, pm_transition, " async", error);
1577 }
1578 put_device(dev);
1579 }
1580
1581 static int device_suspend_late(struct device *dev)
1582 {
1583 if (dpm_async_fn(dev, async_suspend_late))
1584 return 0;
1585
1586 return __device_suspend_late(dev, pm_transition, false);
1587 }
1588
1589 /**
1590 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1591 * @state: PM transition of the system being carried out.
1592 */
1593 int dpm_suspend_late(pm_message_t state)
1594 {
1595 ktime_t starttime = ktime_get();
1596 int error = 0;
1597
1598 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1599 mutex_lock(&dpm_list_mtx);
1600 pm_transition = state;
1601 async_error = 0;
1602
1603 while (!list_empty(&dpm_suspended_list)) {
1604 struct device *dev = to_device(dpm_suspended_list.prev);
1605
1606 get_device(dev);
1607 mutex_unlock(&dpm_list_mtx);
1608
1609 error = device_suspend_late(dev);
1610
1611 mutex_lock(&dpm_list_mtx);
1612 if (!list_empty(&dev->power.entry))
1613 list_move(&dev->power.entry, &dpm_late_early_list);
1614
1615 if (error) {
1616 pm_dev_err(dev, state, " late", error);
1617 dpm_save_failed_dev(dev_name(dev));
1618 put_device(dev);
1619 break;
1620 }
1621 put_device(dev);
1622
1623 if (async_error)
1624 break;
1625 }
1626 mutex_unlock(&dpm_list_mtx);
1627 async_synchronize_full();
1628 if (!error)
1629 error = async_error;
1630 if (error) {
1631 suspend_stats.failed_suspend_late++;
1632 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1633 dpm_resume_early(resume_event(state));
1634 }
1635 dpm_show_time(starttime, state, error, "late");
1636 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1637 return error;
1638 }
1639
1640 /**
1641 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1642 * @state: PM transition of the system being carried out.
1643 */
1644 int dpm_suspend_end(pm_message_t state)
1645 {
1646 ktime_t starttime = ktime_get();
1647 int error;
1648
1649 error = dpm_suspend_late(state);
1650 if (error)
1651 goto out;
1652
1653 error = dpm_suspend_noirq(state);
1654 if (error)
1655 dpm_resume_early(resume_event(state));
1656
1657 out:
1658 dpm_show_time(starttime, state, error, "end");
1659 return error;
1660 }
1661 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1662
1663 /**
1664 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1665 * @dev: Device to suspend.
1666 * @state: PM transition of the system being carried out.
1667 * @cb: Suspend callback to execute.
1668 * @info: string description of caller.
1669 */
1670 static int legacy_suspend(struct device *dev, pm_message_t state,
1671 int (*cb)(struct device *dev, pm_message_t state),
1672 const char *info)
1673 {
1674 int error;
1675 ktime_t calltime;
1676
1677 calltime = initcall_debug_start(dev, cb);
1678
1679 trace_device_pm_callback_start(dev, info, state.event);
1680 error = cb(dev, state);
1681 trace_device_pm_callback_end(dev, error);
1682 suspend_report_result(cb, error);
1683
1684 initcall_debug_report(dev, calltime, cb, error);
1685
1686 return error;
1687 }
1688
1689 static void dpm_clear_superiors_direct_complete(struct device *dev)
1690 {
1691 struct device_link *link;
1692 int idx;
1693
1694 if (dev->parent) {
1695 spin_lock_irq(&dev->parent->power.lock);
1696 dev->parent->power.direct_complete = false;
1697 spin_unlock_irq(&dev->parent->power.lock);
1698 }
1699
1700 idx = device_links_read_lock();
1701
1702 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1703 spin_lock_irq(&link->supplier->power.lock);
1704 link->supplier->power.direct_complete = false;
1705 spin_unlock_irq(&link->supplier->power.lock);
1706 }
1707
1708 device_links_read_unlock(idx);
1709 }
1710
1711 /**
1712 * __device_suspend - Execute "suspend" callbacks for given device.
1713 * @dev: Device to handle.
1714 * @state: PM transition of the system being carried out.
1715 * @async: If true, the device is being suspended asynchronously.
1716 */
1717 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1718 {
1719 pm_callback_t callback = NULL;
1720 const char *info = NULL;
1721 int error = 0;
1722 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1723
1724 TRACE_DEVICE(dev);
1725 TRACE_SUSPEND(0);
1726
1727 dpm_wait_for_subordinate(dev, async);
1728
1729 if (async_error) {
1730 dev->power.direct_complete = false;
1731 goto Complete;
1732 }
1733
1734 /*
1735 * If a device configured to wake up the system from sleep states
1736 * has been suspended at run time and there's a resume request pending
1737 * for it, this is equivalent to the device signaling wakeup, so the
1738 * system suspend operation should be aborted.
1739 */
1740 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1741 pm_wakeup_event(dev, 0);
1742
1743 if (pm_wakeup_pending()) {
1744 dev->power.direct_complete = false;
1745 async_error = -EBUSY;
1746 goto Complete;
1747 }
1748
1749 if (dev->power.syscore)
1750 goto Complete;
1751
1752 /* Avoid direct_complete to let wakeup_path propagate. */
1753 if (device_may_wakeup(dev) || dev->power.wakeup_path)
1754 dev->power.direct_complete = false;
1755
1756 if (dev->power.direct_complete) {
1757 if (pm_runtime_status_suspended(dev)) {
1758 pm_runtime_disable(dev);
1759 if (pm_runtime_status_suspended(dev)) {
1760 pm_dev_dbg(dev, state, "direct-complete ");
1761 goto Complete;
1762 }
1763
1764 pm_runtime_enable(dev);
1765 }
1766 dev->power.direct_complete = false;
1767 }
1768
1769 dev->power.may_skip_resume = false;
1770 dev->power.must_resume = false;
1771
1772 dpm_watchdog_set(&wd, dev);
1773 device_lock(dev);
1774
1775 if (dev->pm_domain) {
1776 info = "power domain ";
1777 callback = pm_op(&dev->pm_domain->ops, state);
1778 goto Run;
1779 }
1780
1781 if (dev->type && dev->type->pm) {
1782 info = "type ";
1783 callback = pm_op(dev->type->pm, state);
1784 goto Run;
1785 }
1786
1787 if (dev->class && dev->class->pm) {
1788 info = "class ";
1789 callback = pm_op(dev->class->pm, state);
1790 goto Run;
1791 }
1792
1793 if (dev->bus) {
1794 if (dev->bus->pm) {
1795 info = "bus ";
1796 callback = pm_op(dev->bus->pm, state);
1797 } else if (dev->bus->suspend) {
1798 pm_dev_dbg(dev, state, "legacy bus ");
1799 error = legacy_suspend(dev, state, dev->bus->suspend,
1800 "legacy bus ");
1801 goto End;
1802 }
1803 }
1804
1805 Run:
1806 if (!callback && dev->driver && dev->driver->pm) {
1807 info = "driver ";
1808 callback = pm_op(dev->driver->pm, state);
1809 }
1810
1811 error = dpm_run_callback(callback, dev, state, info);
1812
1813 End:
1814 if (!error) {
1815 dev->power.is_suspended = true;
1816 if (device_may_wakeup(dev))
1817 dev->power.wakeup_path = true;
1818
1819 dpm_propagate_wakeup_to_parent(dev);
1820 dpm_clear_superiors_direct_complete(dev);
1821 }
1822
1823 device_unlock(dev);
1824 dpm_watchdog_clear(&wd);
1825
1826 Complete:
1827 if (error)
1828 async_error = error;
1829
1830 complete_all(&dev->power.completion);
1831 TRACE_SUSPEND(error);
1832 return error;
1833 }
1834
1835 static void async_suspend(void *data, async_cookie_t cookie)
1836 {
1837 struct device *dev = (struct device *)data;
1838 int error;
1839
1840 error = __device_suspend(dev, pm_transition, true);
1841 if (error) {
1842 dpm_save_failed_dev(dev_name(dev));
1843 pm_dev_err(dev, pm_transition, " async", error);
1844 }
1845
1846 put_device(dev);
1847 }
1848
1849 static int device_suspend(struct device *dev)
1850 {
1851 if (dpm_async_fn(dev, async_suspend))
1852 return 0;
1853
1854 return __device_suspend(dev, pm_transition, false);
1855 }
1856
1857 /**
1858 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1859 * @state: PM transition of the system being carried out.
1860 */
1861 int dpm_suspend(pm_message_t state)
1862 {
1863 ktime_t starttime = ktime_get();
1864 int error = 0;
1865
1866 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1867 might_sleep();
1868
1869 devfreq_suspend();
1870 cpufreq_suspend();
1871
1872 mutex_lock(&dpm_list_mtx);
1873 pm_transition = state;
1874 async_error = 0;
1875 while (!list_empty(&dpm_prepared_list)) {
1876 struct device *dev = to_device(dpm_prepared_list.prev);
1877
1878 get_device(dev);
1879 mutex_unlock(&dpm_list_mtx);
1880
1881 error = device_suspend(dev);
1882
1883 mutex_lock(&dpm_list_mtx);
1884 if (error) {
1885 pm_dev_err(dev, state, "", error);
1886 dpm_save_failed_dev(dev_name(dev));
1887 put_device(dev);
1888 break;
1889 }
1890 if (!list_empty(&dev->power.entry))
1891 list_move(&dev->power.entry, &dpm_suspended_list);
1892 put_device(dev);
1893 if (async_error)
1894 break;
1895 }
1896 mutex_unlock(&dpm_list_mtx);
1897 async_synchronize_full();
1898 if (!error)
1899 error = async_error;
1900 if (error) {
1901 suspend_stats.failed_suspend++;
1902 dpm_save_failed_step(SUSPEND_SUSPEND);
1903 }
1904 dpm_show_time(starttime, state, error, NULL);
1905 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1906 return error;
1907 }
1908
1909 /**
1910 * device_prepare - Prepare a device for system power transition.
1911 * @dev: Device to handle.
1912 * @state: PM transition of the system being carried out.
1913 *
1914 * Execute the ->prepare() callback(s) for given device. No new children of the
1915 * device may be registered after this function has returned.
1916 */
1917 static int device_prepare(struct device *dev, pm_message_t state)
1918 {
1919 int (*callback)(struct device *) = NULL;
1920 int ret = 0;
1921
1922 if (dev->power.syscore)
1923 return 0;
1924
1925 /*
1926 * If a device's parent goes into runtime suspend at the wrong time,
1927 * it won't be possible to resume the device. To prevent this we
1928 * block runtime suspend here, during the prepare phase, and allow
1929 * it again during the complete phase.
1930 */
1931 pm_runtime_get_noresume(dev);
1932
1933 device_lock(dev);
1934
1935 dev->power.wakeup_path = false;
1936
1937 if (dev->power.no_pm_callbacks)
1938 goto unlock;
1939
1940 if (dev->pm_domain)
1941 callback = dev->pm_domain->ops.prepare;
1942 else if (dev->type && dev->type->pm)
1943 callback = dev->type->pm->prepare;
1944 else if (dev->class && dev->class->pm)
1945 callback = dev->class->pm->prepare;
1946 else if (dev->bus && dev->bus->pm)
1947 callback = dev->bus->pm->prepare;
1948
1949 if (!callback && dev->driver && dev->driver->pm)
1950 callback = dev->driver->pm->prepare;
1951
1952 if (callback)
1953 ret = callback(dev);
1954
1955 unlock:
1956 device_unlock(dev);
1957
1958 if (ret < 0) {
1959 suspend_report_result(callback, ret);
1960 pm_runtime_put(dev);
1961 return ret;
1962 }
1963 /*
1964 * A positive return value from ->prepare() means "this device appears
1965 * to be runtime-suspended and its state is fine, so if it really is
1966 * runtime-suspended, you can leave it in that state provided that you
1967 * will do the same thing with all of its descendants". This only
1968 * applies to suspend transitions, however.
1969 */
1970 spin_lock_irq(&dev->power.lock);
1971 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1972 (ret > 0 || dev->power.no_pm_callbacks) &&
1973 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1974 spin_unlock_irq(&dev->power.lock);
1975 return 0;
1976 }
1977
1978 /**
1979 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1980 * @state: PM transition of the system being carried out.
1981 *
1982 * Execute the ->prepare() callback(s) for all devices.
1983 */
1984 int dpm_prepare(pm_message_t state)
1985 {
1986 int error = 0;
1987
1988 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1989 might_sleep();
1990
1991 /*
1992 * Give a chance for the known devices to complete their probes, before
1993 * disable probing of devices. This sync point is important at least
1994 * at boot time + hibernation restore.
1995 */
1996 wait_for_device_probe();
1997 /*
1998 * It is unsafe if probing of devices will happen during suspend or
1999 * hibernation and system behavior will be unpredictable in this case.
2000 * So, let's prohibit device's probing here and defer their probes
2001 * instead. The normal behavior will be restored in dpm_complete().
2002 */
2003 device_block_probing();
2004
2005 mutex_lock(&dpm_list_mtx);
2006 while (!list_empty(&dpm_list)) {
2007 struct device *dev = to_device(dpm_list.next);
2008
2009 get_device(dev);
2010 mutex_unlock(&dpm_list_mtx);
2011
2012 trace_device_pm_callback_start(dev, "", state.event);
2013 error = device_prepare(dev, state);
2014 trace_device_pm_callback_end(dev, error);
2015
2016 mutex_lock(&dpm_list_mtx);
2017 if (error) {
2018 if (error == -EAGAIN) {
2019 put_device(dev);
2020 error = 0;
2021 continue;
2022 }
2023 pr_info("Device %s not prepared for power transition: code %d\n",
2024 dev_name(dev), error);
2025 put_device(dev);
2026 break;
2027 }
2028 dev->power.is_prepared = true;
2029 if (!list_empty(&dev->power.entry))
2030 list_move_tail(&dev->power.entry, &dpm_prepared_list);
2031 put_device(dev);
2032 }
2033 mutex_unlock(&dpm_list_mtx);
2034 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2035 return error;
2036 }
2037
2038 /**
2039 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2040 * @state: PM transition of the system being carried out.
2041 *
2042 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2043 * callbacks for them.
2044 */
2045 int dpm_suspend_start(pm_message_t state)
2046 {
2047 ktime_t starttime = ktime_get();
2048 int error;
2049
2050 error = dpm_prepare(state);
2051 if (error) {
2052 suspend_stats.failed_prepare++;
2053 dpm_save_failed_step(SUSPEND_PREPARE);
2054 } else
2055 error = dpm_suspend(state);
2056 dpm_show_time(starttime, state, error, "start");
2057 return error;
2058 }
2059 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2060
2061 void __suspend_report_result(const char *function, void *fn, int ret)
2062 {
2063 if (ret)
2064 pr_err("%s(): %pS returns %d\n", function, fn, ret);
2065 }
2066 EXPORT_SYMBOL_GPL(__suspend_report_result);
2067
2068 /**
2069 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2070 * @subordinate: Device that needs to wait for @dev.
2071 * @dev: Device to wait for.
2072 */
2073 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2074 {
2075 dpm_wait(dev, subordinate->power.async_suspend);
2076 return async_error;
2077 }
2078 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2079
2080 /**
2081 * dpm_for_each_dev - device iterator.
2082 * @data: data for the callback.
2083 * @fn: function to be called for each device.
2084 *
2085 * Iterate over devices in dpm_list, and call @fn for each device,
2086 * passing it @data.
2087 */
2088 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2089 {
2090 struct device *dev;
2091
2092 if (!fn)
2093 return;
2094
2095 device_pm_lock();
2096 list_for_each_entry(dev, &dpm_list, power.entry)
2097 fn(dev, data);
2098 device_pm_unlock();
2099 }
2100 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2101
2102 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2103 {
2104 if (!ops)
2105 return true;
2106
2107 return !ops->prepare &&
2108 !ops->suspend &&
2109 !ops->suspend_late &&
2110 !ops->suspend_noirq &&
2111 !ops->resume_noirq &&
2112 !ops->resume_early &&
2113 !ops->resume &&
2114 !ops->complete;
2115 }
2116
2117 void device_pm_check_callbacks(struct device *dev)
2118 {
2119 spin_lock_irq(&dev->power.lock);
2120 dev->power.no_pm_callbacks =
2121 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2122 !dev->bus->suspend && !dev->bus->resume)) &&
2123 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2124 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2125 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2126 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2127 !dev->driver->suspend && !dev->driver->resume));
2128 spin_unlock_irq(&dev->power.lock);
2129 }
2130
2131 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2132 {
2133 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2134 pm_runtime_status_suspended(dev);
2135 }