1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/base/power/main.c - Where the driver meets power management.
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
18 #define pr_fmt(fmt) "PM: " fmt
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
41 typedef int (*pm_callback_t
)(struct device
*);
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
48 * The entries in the dpm_list list are in a depth first order, simply
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
58 static LIST_HEAD(dpm_prepared_list
);
59 static LIST_HEAD(dpm_suspended_list
);
60 static LIST_HEAD(dpm_late_early_list
);
61 static LIST_HEAD(dpm_noirq_list
);
63 struct suspend_stats suspend_stats
;
64 static DEFINE_MUTEX(dpm_list_mtx
);
65 static pm_message_t pm_transition
;
67 static int async_error
;
69 static const char *pm_verb(int event
)
72 case PM_EVENT_SUSPEND
:
78 case PM_EVENT_QUIESCE
:
80 case PM_EVENT_HIBERNATE
:
84 case PM_EVENT_RESTORE
:
86 case PM_EVENT_RECOVER
:
89 return "(unknown PM event)";
94 * device_pm_sleep_init - Initialize system suspend-related device fields.
95 * @dev: Device object being initialized.
97 void device_pm_sleep_init(struct device
*dev
)
99 dev
->power
.is_prepared
= false;
100 dev
->power
.is_suspended
= false;
101 dev
->power
.is_noirq_suspended
= false;
102 dev
->power
.is_late_suspended
= false;
103 init_completion(&dev
->power
.completion
);
104 complete_all(&dev
->power
.completion
);
105 dev
->power
.wakeup
= NULL
;
106 INIT_LIST_HEAD(&dev
->power
.entry
);
110 * device_pm_lock - Lock the list of active devices used by the PM core.
112 void device_pm_lock(void)
114 mutex_lock(&dpm_list_mtx
);
118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
120 void device_pm_unlock(void)
122 mutex_unlock(&dpm_list_mtx
);
126 * device_pm_add - Add a device to the PM core's list of active devices.
127 * @dev: Device to add to the list.
129 void device_pm_add(struct device
*dev
)
131 /* Skip PM setup/initialization. */
132 if (device_pm_not_required(dev
))
135 pr_debug("Adding info for %s:%s\n",
136 dev
->bus
? dev
->bus
->name
: "No Bus", dev_name(dev
));
137 device_pm_check_callbacks(dev
);
138 mutex_lock(&dpm_list_mtx
);
139 if (dev
->parent
&& dev
->parent
->power
.is_prepared
)
140 dev_warn(dev
, "parent %s should not be sleeping\n",
141 dev_name(dev
->parent
));
142 list_add_tail(&dev
->power
.entry
, &dpm_list
);
143 dev
->power
.in_dpm_list
= true;
144 mutex_unlock(&dpm_list_mtx
);
148 * device_pm_remove - Remove a device from the PM core's list of active devices.
149 * @dev: Device to be removed from the list.
151 void device_pm_remove(struct device
*dev
)
153 if (device_pm_not_required(dev
))
156 pr_debug("Removing info for %s:%s\n",
157 dev
->bus
? dev
->bus
->name
: "No Bus", dev_name(dev
));
158 complete_all(&dev
->power
.completion
);
159 mutex_lock(&dpm_list_mtx
);
160 list_del_init(&dev
->power
.entry
);
161 dev
->power
.in_dpm_list
= false;
162 mutex_unlock(&dpm_list_mtx
);
163 device_wakeup_disable(dev
);
164 pm_runtime_remove(dev
);
165 device_pm_check_callbacks(dev
);
169 * device_pm_move_before - Move device in the PM core's list of active devices.
170 * @deva: Device to move in dpm_list.
171 * @devb: Device @deva should come before.
173 void device_pm_move_before(struct device
*deva
, struct device
*devb
)
175 pr_debug("Moving %s:%s before %s:%s\n",
176 deva
->bus
? deva
->bus
->name
: "No Bus", dev_name(deva
),
177 devb
->bus
? devb
->bus
->name
: "No Bus", dev_name(devb
));
178 /* Delete deva from dpm_list and reinsert before devb. */
179 list_move_tail(&deva
->power
.entry
, &devb
->power
.entry
);
183 * device_pm_move_after - Move device in the PM core's list of active devices.
184 * @deva: Device to move in dpm_list.
185 * @devb: Device @deva should come after.
187 void device_pm_move_after(struct device
*deva
, struct device
*devb
)
189 pr_debug("Moving %s:%s after %s:%s\n",
190 deva
->bus
? deva
->bus
->name
: "No Bus", dev_name(deva
),
191 devb
->bus
? devb
->bus
->name
: "No Bus", dev_name(devb
));
192 /* Delete deva from dpm_list and reinsert after devb. */
193 list_move(&deva
->power
.entry
, &devb
->power
.entry
);
197 * device_pm_move_last - Move device to end of the PM core's list of devices.
198 * @dev: Device to move in dpm_list.
200 void device_pm_move_last(struct device
*dev
)
202 pr_debug("Moving %s:%s to end of list\n",
203 dev
->bus
? dev
->bus
->name
: "No Bus", dev_name(dev
));
204 list_move_tail(&dev
->power
.entry
, &dpm_list
);
207 static ktime_t
initcall_debug_start(struct device
*dev
, void *cb
)
209 if (!pm_print_times_enabled
)
212 dev_info(dev
, "calling %pS @ %i, parent: %s\n", cb
,
213 task_pid_nr(current
),
214 dev
->parent
? dev_name(dev
->parent
) : "none");
218 static void initcall_debug_report(struct device
*dev
, ktime_t calltime
,
224 if (!pm_print_times_enabled
)
227 rettime
= ktime_get();
228 nsecs
= (s64
) ktime_to_ns(ktime_sub(rettime
, calltime
));
230 dev_info(dev
, "%pS returned %d after %Ld usecs\n", cb
, error
,
231 (unsigned long long)nsecs
>> 10);
235 * dpm_wait - Wait for a PM operation to complete.
236 * @dev: Device to wait for.
237 * @async: If unset, wait only if the device's power.async_suspend flag is set.
239 static void dpm_wait(struct device
*dev
, bool async
)
244 if (async
|| (pm_async_enabled
&& dev
->power
.async_suspend
))
245 wait_for_completion(&dev
->power
.completion
);
248 static int dpm_wait_fn(struct device
*dev
, void *async_ptr
)
250 dpm_wait(dev
, *((bool *)async_ptr
));
254 static void dpm_wait_for_children(struct device
*dev
, bool async
)
256 device_for_each_child(dev
, &async
, dpm_wait_fn
);
259 static void dpm_wait_for_suppliers(struct device
*dev
, bool async
)
261 struct device_link
*link
;
264 idx
= device_links_read_lock();
267 * If the supplier goes away right after we've checked the link to it,
268 * we'll wait for its completion to change the state, but that's fine,
269 * because the only things that will block as a result are the SRCU
270 * callbacks freeing the link objects for the links in the list we're
273 list_for_each_entry_rcu_locked(link
, &dev
->links
.suppliers
, c_node
)
274 if (READ_ONCE(link
->status
) != DL_STATE_DORMANT
)
275 dpm_wait(link
->supplier
, async
);
277 device_links_read_unlock(idx
);
280 static bool dpm_wait_for_superior(struct device
*dev
, bool async
)
282 struct device
*parent
;
285 * If the device is resumed asynchronously and the parent's callback
286 * deletes both the device and the parent itself, the parent object may
287 * be freed while this function is running, so avoid that by reference
288 * counting the parent once more unless the device has been deleted
289 * already (in which case return right away).
291 mutex_lock(&dpm_list_mtx
);
293 if (!device_pm_initialized(dev
)) {
294 mutex_unlock(&dpm_list_mtx
);
298 parent
= get_device(dev
->parent
);
300 mutex_unlock(&dpm_list_mtx
);
302 dpm_wait(parent
, async
);
305 dpm_wait_for_suppliers(dev
, async
);
308 * If the parent's callback has deleted the device, attempting to resume
309 * it would be invalid, so avoid doing that then.
311 return device_pm_initialized(dev
);
314 static void dpm_wait_for_consumers(struct device
*dev
, bool async
)
316 struct device_link
*link
;
319 idx
= device_links_read_lock();
322 * The status of a device link can only be changed from "dormant" by a
323 * probe, but that cannot happen during system suspend/resume. In
324 * theory it can change to "dormant" at that time, but then it is
325 * reasonable to wait for the target device anyway (eg. if it goes
326 * away, it's better to wait for it to go away completely and then
327 * continue instead of trying to continue in parallel with its
330 list_for_each_entry_rcu_locked(link
, &dev
->links
.consumers
, s_node
)
331 if (READ_ONCE(link
->status
) != DL_STATE_DORMANT
)
332 dpm_wait(link
->consumer
, async
);
334 device_links_read_unlock(idx
);
337 static void dpm_wait_for_subordinate(struct device
*dev
, bool async
)
339 dpm_wait_for_children(dev
, async
);
340 dpm_wait_for_consumers(dev
, async
);
344 * pm_op - Return the PM operation appropriate for given PM event.
345 * @ops: PM operations to choose from.
346 * @state: PM transition of the system being carried out.
348 static pm_callback_t
pm_op(const struct dev_pm_ops
*ops
, pm_message_t state
)
350 switch (state
.event
) {
351 #ifdef CONFIG_SUSPEND
352 case PM_EVENT_SUSPEND
:
354 case PM_EVENT_RESUME
:
356 #endif /* CONFIG_SUSPEND */
357 #ifdef CONFIG_HIBERNATE_CALLBACKS
358 case PM_EVENT_FREEZE
:
359 case PM_EVENT_QUIESCE
:
361 case PM_EVENT_HIBERNATE
:
362 return ops
->poweroff
;
364 case PM_EVENT_RECOVER
:
367 case PM_EVENT_RESTORE
:
369 #endif /* CONFIG_HIBERNATE_CALLBACKS */
376 * pm_late_early_op - Return the PM operation appropriate for given PM event.
377 * @ops: PM operations to choose from.
378 * @state: PM transition of the system being carried out.
380 * Runtime PM is disabled for @dev while this function is being executed.
382 static pm_callback_t
pm_late_early_op(const struct dev_pm_ops
*ops
,
385 switch (state
.event
) {
386 #ifdef CONFIG_SUSPEND
387 case PM_EVENT_SUSPEND
:
388 return ops
->suspend_late
;
389 case PM_EVENT_RESUME
:
390 return ops
->resume_early
;
391 #endif /* CONFIG_SUSPEND */
392 #ifdef CONFIG_HIBERNATE_CALLBACKS
393 case PM_EVENT_FREEZE
:
394 case PM_EVENT_QUIESCE
:
395 return ops
->freeze_late
;
396 case PM_EVENT_HIBERNATE
:
397 return ops
->poweroff_late
;
399 case PM_EVENT_RECOVER
:
400 return ops
->thaw_early
;
401 case PM_EVENT_RESTORE
:
402 return ops
->restore_early
;
403 #endif /* CONFIG_HIBERNATE_CALLBACKS */
410 * pm_noirq_op - Return the PM operation appropriate for given PM event.
411 * @ops: PM operations to choose from.
412 * @state: PM transition of the system being carried out.
414 * The driver of @dev will not receive interrupts while this function is being
417 static pm_callback_t
pm_noirq_op(const struct dev_pm_ops
*ops
, pm_message_t state
)
419 switch (state
.event
) {
420 #ifdef CONFIG_SUSPEND
421 case PM_EVENT_SUSPEND
:
422 return ops
->suspend_noirq
;
423 case PM_EVENT_RESUME
:
424 return ops
->resume_noirq
;
425 #endif /* CONFIG_SUSPEND */
426 #ifdef CONFIG_HIBERNATE_CALLBACKS
427 case PM_EVENT_FREEZE
:
428 case PM_EVENT_QUIESCE
:
429 return ops
->freeze_noirq
;
430 case PM_EVENT_HIBERNATE
:
431 return ops
->poweroff_noirq
;
433 case PM_EVENT_RECOVER
:
434 return ops
->thaw_noirq
;
435 case PM_EVENT_RESTORE
:
436 return ops
->restore_noirq
;
437 #endif /* CONFIG_HIBERNATE_CALLBACKS */
443 static void pm_dev_dbg(struct device
*dev
, pm_message_t state
, const char *info
)
445 dev_dbg(dev
, "%s%s%s\n", info
, pm_verb(state
.event
),
446 ((state
.event
& PM_EVENT_SLEEP
) && device_may_wakeup(dev
)) ?
447 ", may wakeup" : "");
450 static void pm_dev_err(struct device
*dev
, pm_message_t state
, const char *info
,
453 pr_err("Device %s failed to %s%s: error %d\n",
454 dev_name(dev
), pm_verb(state
.event
), info
, error
);
457 static void dpm_show_time(ktime_t starttime
, pm_message_t state
, int error
,
464 calltime
= ktime_get();
465 usecs64
= ktime_to_ns(ktime_sub(calltime
, starttime
));
466 do_div(usecs64
, NSEC_PER_USEC
);
471 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
472 info
?: "", info
? " " : "", pm_verb(state
.event
),
473 error
? "aborted" : "complete",
474 usecs
/ USEC_PER_MSEC
, usecs
% USEC_PER_MSEC
);
477 static int dpm_run_callback(pm_callback_t cb
, struct device
*dev
,
478 pm_message_t state
, const char *info
)
486 calltime
= initcall_debug_start(dev
, cb
);
488 pm_dev_dbg(dev
, state
, info
);
489 trace_device_pm_callback_start(dev
, info
, state
.event
);
491 trace_device_pm_callback_end(dev
, error
);
492 suspend_report_result(cb
, error
);
494 initcall_debug_report(dev
, calltime
, cb
, error
);
499 #ifdef CONFIG_DPM_WATCHDOG
500 struct dpm_watchdog
{
502 struct task_struct
*tsk
;
503 struct timer_list timer
;
506 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
507 struct dpm_watchdog wd
510 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
511 * @t: The timer that PM watchdog depends on.
513 * Called when a driver has timed out suspending or resuming.
514 * There's not much we can do here to recover so panic() to
515 * capture a crash-dump in pstore.
517 static void dpm_watchdog_handler(struct timer_list
*t
)
519 struct dpm_watchdog
*wd
= from_timer(wd
, t
, timer
);
521 dev_emerg(wd
->dev
, "**** DPM device timeout ****\n");
522 show_stack(wd
->tsk
, NULL
);
523 panic("%s %s: unrecoverable failure\n",
524 dev_driver_string(wd
->dev
), dev_name(wd
->dev
));
528 * dpm_watchdog_set - Enable pm watchdog for given device.
529 * @wd: Watchdog. Must be allocated on the stack.
530 * @dev: Device to handle.
532 static void dpm_watchdog_set(struct dpm_watchdog
*wd
, struct device
*dev
)
534 struct timer_list
*timer
= &wd
->timer
;
539 timer_setup_on_stack(timer
, dpm_watchdog_handler
, 0);
540 /* use same timeout value for both suspend and resume */
541 timer
->expires
= jiffies
+ HZ
* CONFIG_DPM_WATCHDOG_TIMEOUT
;
546 * dpm_watchdog_clear - Disable suspend/resume watchdog.
547 * @wd: Watchdog to disable.
549 static void dpm_watchdog_clear(struct dpm_watchdog
*wd
)
551 struct timer_list
*timer
= &wd
->timer
;
553 del_timer_sync(timer
);
554 destroy_timer_on_stack(timer
);
557 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
558 #define dpm_watchdog_set(x, y)
559 #define dpm_watchdog_clear(x)
562 /*------------------------- Resume routines -------------------------*/
565 * suspend_event - Return a "suspend" message for given "resume" one.
566 * @resume_msg: PM message representing a system-wide resume transition.
568 static pm_message_t
suspend_event(pm_message_t resume_msg
)
570 switch (resume_msg
.event
) {
571 case PM_EVENT_RESUME
:
574 case PM_EVENT_RESTORE
:
576 case PM_EVENT_RECOVER
:
577 return PMSG_HIBERNATE
;
583 * dev_pm_may_skip_resume - System-wide device resume optimization check.
584 * @dev: Target device.
586 * Checks whether or not the device may be left in suspend after a system-wide
587 * transition to the working state.
589 bool dev_pm_may_skip_resume(struct device
*dev
)
591 return !dev
->power
.must_resume
&& pm_transition
.event
!= PM_EVENT_RESTORE
;
594 static pm_callback_t
dpm_subsys_resume_noirq_cb(struct device
*dev
,
598 pm_callback_t callback
;
601 if (dev
->pm_domain
) {
602 info
= "noirq power domain ";
603 callback
= pm_noirq_op(&dev
->pm_domain
->ops
, state
);
604 } else if (dev
->type
&& dev
->type
->pm
) {
605 info
= "noirq type ";
606 callback
= pm_noirq_op(dev
->type
->pm
, state
);
607 } else if (dev
->class && dev
->class->pm
) {
608 info
= "noirq class ";
609 callback
= pm_noirq_op(dev
->class->pm
, state
);
610 } else if (dev
->bus
&& dev
->bus
->pm
) {
612 callback
= pm_noirq_op(dev
->bus
->pm
, state
);
623 static pm_callback_t
dpm_subsys_suspend_noirq_cb(struct device
*dev
,
625 const char **info_p
);
627 static pm_callback_t
dpm_subsys_suspend_late_cb(struct device
*dev
,
629 const char **info_p
);
632 * device_resume_noirq - Execute a "noirq resume" callback for given device.
633 * @dev: Device to handle.
634 * @state: PM transition of the system being carried out.
635 * @async: If true, the device is being resumed asynchronously.
637 * The driver of @dev will not receive interrupts while this function is being
640 static int device_resume_noirq(struct device
*dev
, pm_message_t state
, bool async
)
642 pm_callback_t callback
;
650 if (dev
->power
.syscore
|| dev
->power
.direct_complete
)
653 if (!dev
->power
.is_noirq_suspended
)
656 if (!dpm_wait_for_superior(dev
, async
))
659 skip_resume
= dev_pm_may_skip_resume(dev
);
661 callback
= dpm_subsys_resume_noirq_cb(dev
, state
, &info
);
668 if (dev_pm_smart_suspend_and_suspended(dev
)) {
669 pm_message_t suspend_msg
= suspend_event(state
);
672 * If "freeze" callbacks have been skipped during a transition
673 * related to hibernation, the subsequent "thaw" callbacks must
674 * be skipped too or bad things may happen. Otherwise, resume
675 * callbacks are going to be run for the device, so its runtime
676 * PM status must be changed to reflect the new state after the
677 * transition under way.
679 if (!dpm_subsys_suspend_late_cb(dev
, suspend_msg
, NULL
) &&
680 !dpm_subsys_suspend_noirq_cb(dev
, suspend_msg
, NULL
)) {
681 if (state
.event
== PM_EVENT_THAW
) {
685 pm_runtime_set_active(dev
);
690 if (dev
->driver
&& dev
->driver
->pm
) {
691 info
= "noirq driver ";
692 callback
= pm_noirq_op(dev
->driver
->pm
, state
);
696 error
= dpm_run_callback(callback
, dev
, state
, info
);
699 dev
->power
.is_noirq_suspended
= false;
702 /* Make the next phases of resume skip the device. */
703 dev
->power
.is_late_suspended
= false;
704 dev
->power
.is_suspended
= false;
706 * The device is going to be left in suspend, but it might not
707 * have been in runtime suspend before the system suspended, so
708 * its runtime PM status needs to be updated to avoid confusing
709 * the runtime PM framework when runtime PM is enabled for the
712 pm_runtime_set_suspended(dev
);
716 complete_all(&dev
->power
.completion
);
721 static bool is_async(struct device
*dev
)
723 return dev
->power
.async_suspend
&& pm_async_enabled
724 && !pm_trace_is_enabled();
727 static bool dpm_async_fn(struct device
*dev
, async_func_t func
)
729 reinit_completion(&dev
->power
.completion
);
733 async_schedule_dev(func
, dev
);
740 static void async_resume_noirq(void *data
, async_cookie_t cookie
)
742 struct device
*dev
= (struct device
*)data
;
745 error
= device_resume_noirq(dev
, pm_transition
, true);
747 pm_dev_err(dev
, pm_transition
, " async", error
);
752 static void dpm_noirq_resume_devices(pm_message_t state
)
755 ktime_t starttime
= ktime_get();
757 trace_suspend_resume(TPS("dpm_resume_noirq"), state
.event
, true);
758 mutex_lock(&dpm_list_mtx
);
759 pm_transition
= state
;
762 * Advanced the async threads upfront,
763 * in case the starting of async threads is
764 * delayed by non-async resuming devices.
766 list_for_each_entry(dev
, &dpm_noirq_list
, power
.entry
)
767 dpm_async_fn(dev
, async_resume_noirq
);
769 while (!list_empty(&dpm_noirq_list
)) {
770 dev
= to_device(dpm_noirq_list
.next
);
772 list_move_tail(&dev
->power
.entry
, &dpm_late_early_list
);
773 mutex_unlock(&dpm_list_mtx
);
775 if (!is_async(dev
)) {
778 error
= device_resume_noirq(dev
, state
, false);
780 suspend_stats
.failed_resume_noirq
++;
781 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ
);
782 dpm_save_failed_dev(dev_name(dev
));
783 pm_dev_err(dev
, state
, " noirq", error
);
787 mutex_lock(&dpm_list_mtx
);
790 mutex_unlock(&dpm_list_mtx
);
791 async_synchronize_full();
792 dpm_show_time(starttime
, state
, 0, "noirq");
793 trace_suspend_resume(TPS("dpm_resume_noirq"), state
.event
, false);
797 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
798 * @state: PM transition of the system being carried out.
800 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
801 * allow device drivers' interrupt handlers to be called.
803 void dpm_resume_noirq(pm_message_t state
)
805 dpm_noirq_resume_devices(state
);
807 resume_device_irqs();
808 device_wakeup_disarm_wake_irqs();
813 static pm_callback_t
dpm_subsys_resume_early_cb(struct device
*dev
,
817 pm_callback_t callback
;
820 if (dev
->pm_domain
) {
821 info
= "early power domain ";
822 callback
= pm_late_early_op(&dev
->pm_domain
->ops
, state
);
823 } else if (dev
->type
&& dev
->type
->pm
) {
824 info
= "early type ";
825 callback
= pm_late_early_op(dev
->type
->pm
, state
);
826 } else if (dev
->class && dev
->class->pm
) {
827 info
= "early class ";
828 callback
= pm_late_early_op(dev
->class->pm
, state
);
829 } else if (dev
->bus
&& dev
->bus
->pm
) {
831 callback
= pm_late_early_op(dev
->bus
->pm
, state
);
843 * device_resume_early - Execute an "early resume" callback for given device.
844 * @dev: Device to handle.
845 * @state: PM transition of the system being carried out.
846 * @async: If true, the device is being resumed asynchronously.
848 * Runtime PM is disabled for @dev while this function is being executed.
850 static int device_resume_early(struct device
*dev
, pm_message_t state
, bool async
)
852 pm_callback_t callback
;
859 if (dev
->power
.syscore
|| dev
->power
.direct_complete
)
862 if (!dev
->power
.is_late_suspended
)
865 if (!dpm_wait_for_superior(dev
, async
))
868 callback
= dpm_subsys_resume_early_cb(dev
, state
, &info
);
870 if (!callback
&& dev
->driver
&& dev
->driver
->pm
) {
871 info
= "early driver ";
872 callback
= pm_late_early_op(dev
->driver
->pm
, state
);
875 error
= dpm_run_callback(callback
, dev
, state
, info
);
876 dev
->power
.is_late_suspended
= false;
881 pm_runtime_enable(dev
);
882 complete_all(&dev
->power
.completion
);
886 static void async_resume_early(void *data
, async_cookie_t cookie
)
888 struct device
*dev
= (struct device
*)data
;
891 error
= device_resume_early(dev
, pm_transition
, true);
893 pm_dev_err(dev
, pm_transition
, " async", error
);
899 * dpm_resume_early - Execute "early resume" callbacks for all devices.
900 * @state: PM transition of the system being carried out.
902 void dpm_resume_early(pm_message_t state
)
905 ktime_t starttime
= ktime_get();
907 trace_suspend_resume(TPS("dpm_resume_early"), state
.event
, true);
908 mutex_lock(&dpm_list_mtx
);
909 pm_transition
= state
;
912 * Advanced the async threads upfront,
913 * in case the starting of async threads is
914 * delayed by non-async resuming devices.
916 list_for_each_entry(dev
, &dpm_late_early_list
, power
.entry
)
917 dpm_async_fn(dev
, async_resume_early
);
919 while (!list_empty(&dpm_late_early_list
)) {
920 dev
= to_device(dpm_late_early_list
.next
);
922 list_move_tail(&dev
->power
.entry
, &dpm_suspended_list
);
923 mutex_unlock(&dpm_list_mtx
);
925 if (!is_async(dev
)) {
928 error
= device_resume_early(dev
, state
, false);
930 suspend_stats
.failed_resume_early
++;
931 dpm_save_failed_step(SUSPEND_RESUME_EARLY
);
932 dpm_save_failed_dev(dev_name(dev
));
933 pm_dev_err(dev
, state
, " early", error
);
936 mutex_lock(&dpm_list_mtx
);
939 mutex_unlock(&dpm_list_mtx
);
940 async_synchronize_full();
941 dpm_show_time(starttime
, state
, 0, "early");
942 trace_suspend_resume(TPS("dpm_resume_early"), state
.event
, false);
946 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
947 * @state: PM transition of the system being carried out.
949 void dpm_resume_start(pm_message_t state
)
951 dpm_resume_noirq(state
);
952 dpm_resume_early(state
);
954 EXPORT_SYMBOL_GPL(dpm_resume_start
);
957 * device_resume - Execute "resume" callbacks for given device.
958 * @dev: Device to handle.
959 * @state: PM transition of the system being carried out.
960 * @async: If true, the device is being resumed asynchronously.
962 static int device_resume(struct device
*dev
, pm_message_t state
, bool async
)
964 pm_callback_t callback
= NULL
;
965 const char *info
= NULL
;
967 DECLARE_DPM_WATCHDOG_ON_STACK(wd
);
972 if (dev
->power
.syscore
)
975 if (dev
->power
.direct_complete
) {
976 /* Match the pm_runtime_disable() in __device_suspend(). */
977 pm_runtime_enable(dev
);
981 if (!dpm_wait_for_superior(dev
, async
))
984 dpm_watchdog_set(&wd
, dev
);
988 * This is a fib. But we'll allow new children to be added below
989 * a resumed device, even if the device hasn't been completed yet.
991 dev
->power
.is_prepared
= false;
993 if (!dev
->power
.is_suspended
)
996 if (dev
->pm_domain
) {
997 info
= "power domain ";
998 callback
= pm_op(&dev
->pm_domain
->ops
, state
);
1002 if (dev
->type
&& dev
->type
->pm
) {
1004 callback
= pm_op(dev
->type
->pm
, state
);
1008 if (dev
->class && dev
->class->pm
) {
1010 callback
= pm_op(dev
->class->pm
, state
);
1017 callback
= pm_op(dev
->bus
->pm
, state
);
1018 } else if (dev
->bus
->resume
) {
1019 info
= "legacy bus ";
1020 callback
= dev
->bus
->resume
;
1026 if (!callback
&& dev
->driver
&& dev
->driver
->pm
) {
1028 callback
= pm_op(dev
->driver
->pm
, state
);
1032 error
= dpm_run_callback(callback
, dev
, state
, info
);
1033 dev
->power
.is_suspended
= false;
1037 dpm_watchdog_clear(&wd
);
1040 complete_all(&dev
->power
.completion
);
1042 TRACE_RESUME(error
);
1047 static void async_resume(void *data
, async_cookie_t cookie
)
1049 struct device
*dev
= (struct device
*)data
;
1052 error
= device_resume(dev
, pm_transition
, true);
1054 pm_dev_err(dev
, pm_transition
, " async", error
);
1059 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1060 * @state: PM transition of the system being carried out.
1062 * Execute the appropriate "resume" callback for all devices whose status
1063 * indicates that they are suspended.
1065 void dpm_resume(pm_message_t state
)
1068 ktime_t starttime
= ktime_get();
1070 trace_suspend_resume(TPS("dpm_resume"), state
.event
, true);
1073 mutex_lock(&dpm_list_mtx
);
1074 pm_transition
= state
;
1077 list_for_each_entry(dev
, &dpm_suspended_list
, power
.entry
)
1078 dpm_async_fn(dev
, async_resume
);
1080 while (!list_empty(&dpm_suspended_list
)) {
1081 dev
= to_device(dpm_suspended_list
.next
);
1083 if (!is_async(dev
)) {
1086 mutex_unlock(&dpm_list_mtx
);
1088 error
= device_resume(dev
, state
, false);
1090 suspend_stats
.failed_resume
++;
1091 dpm_save_failed_step(SUSPEND_RESUME
);
1092 dpm_save_failed_dev(dev_name(dev
));
1093 pm_dev_err(dev
, state
, "", error
);
1096 mutex_lock(&dpm_list_mtx
);
1098 if (!list_empty(&dev
->power
.entry
))
1099 list_move_tail(&dev
->power
.entry
, &dpm_prepared_list
);
1102 mutex_unlock(&dpm_list_mtx
);
1103 async_synchronize_full();
1104 dpm_show_time(starttime
, state
, 0, NULL
);
1108 trace_suspend_resume(TPS("dpm_resume"), state
.event
, false);
1112 * device_complete - Complete a PM transition for given device.
1113 * @dev: Device to handle.
1114 * @state: PM transition of the system being carried out.
1116 static void device_complete(struct device
*dev
, pm_message_t state
)
1118 void (*callback
)(struct device
*) = NULL
;
1119 const char *info
= NULL
;
1121 if (dev
->power
.syscore
)
1126 if (dev
->pm_domain
) {
1127 info
= "completing power domain ";
1128 callback
= dev
->pm_domain
->ops
.complete
;
1129 } else if (dev
->type
&& dev
->type
->pm
) {
1130 info
= "completing type ";
1131 callback
= dev
->type
->pm
->complete
;
1132 } else if (dev
->class && dev
->class->pm
) {
1133 info
= "completing class ";
1134 callback
= dev
->class->pm
->complete
;
1135 } else if (dev
->bus
&& dev
->bus
->pm
) {
1136 info
= "completing bus ";
1137 callback
= dev
->bus
->pm
->complete
;
1140 if (!callback
&& dev
->driver
&& dev
->driver
->pm
) {
1141 info
= "completing driver ";
1142 callback
= dev
->driver
->pm
->complete
;
1146 pm_dev_dbg(dev
, state
, info
);
1152 pm_runtime_put(dev
);
1156 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1157 * @state: PM transition of the system being carried out.
1159 * Execute the ->complete() callbacks for all devices whose PM status is not
1160 * DPM_ON (this allows new devices to be registered).
1162 void dpm_complete(pm_message_t state
)
1164 struct list_head list
;
1166 trace_suspend_resume(TPS("dpm_complete"), state
.event
, true);
1169 INIT_LIST_HEAD(&list
);
1170 mutex_lock(&dpm_list_mtx
);
1171 while (!list_empty(&dpm_prepared_list
)) {
1172 struct device
*dev
= to_device(dpm_prepared_list
.prev
);
1175 dev
->power
.is_prepared
= false;
1176 list_move(&dev
->power
.entry
, &list
);
1177 mutex_unlock(&dpm_list_mtx
);
1179 trace_device_pm_callback_start(dev
, "", state
.event
);
1180 device_complete(dev
, state
);
1181 trace_device_pm_callback_end(dev
, 0);
1183 mutex_lock(&dpm_list_mtx
);
1186 list_splice(&list
, &dpm_list
);
1187 mutex_unlock(&dpm_list_mtx
);
1189 /* Allow device probing and trigger re-probing of deferred devices */
1190 device_unblock_probing();
1191 trace_suspend_resume(TPS("dpm_complete"), state
.event
, false);
1195 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1196 * @state: PM transition of the system being carried out.
1198 * Execute "resume" callbacks for all devices and complete the PM transition of
1201 void dpm_resume_end(pm_message_t state
)
1204 dpm_complete(state
);
1206 EXPORT_SYMBOL_GPL(dpm_resume_end
);
1209 /*------------------------- Suspend routines -------------------------*/
1212 * resume_event - Return a "resume" message for given "suspend" sleep state.
1213 * @sleep_state: PM message representing a sleep state.
1215 * Return a PM message representing the resume event corresponding to given
1218 static pm_message_t
resume_event(pm_message_t sleep_state
)
1220 switch (sleep_state
.event
) {
1221 case PM_EVENT_SUSPEND
:
1223 case PM_EVENT_FREEZE
:
1224 case PM_EVENT_QUIESCE
:
1225 return PMSG_RECOVER
;
1226 case PM_EVENT_HIBERNATE
:
1227 return PMSG_RESTORE
;
1232 static void dpm_superior_set_must_resume(struct device
*dev
)
1234 struct device_link
*link
;
1238 dev
->parent
->power
.must_resume
= true;
1240 idx
= device_links_read_lock();
1242 list_for_each_entry_rcu_locked(link
, &dev
->links
.suppliers
, c_node
)
1243 link
->supplier
->power
.must_resume
= true;
1245 device_links_read_unlock(idx
);
1248 static pm_callback_t
dpm_subsys_suspend_noirq_cb(struct device
*dev
,
1250 const char **info_p
)
1252 pm_callback_t callback
;
1255 if (dev
->pm_domain
) {
1256 info
= "noirq power domain ";
1257 callback
= pm_noirq_op(&dev
->pm_domain
->ops
, state
);
1258 } else if (dev
->type
&& dev
->type
->pm
) {
1259 info
= "noirq type ";
1260 callback
= pm_noirq_op(dev
->type
->pm
, state
);
1261 } else if (dev
->class && dev
->class->pm
) {
1262 info
= "noirq class ";
1263 callback
= pm_noirq_op(dev
->class->pm
, state
);
1264 } else if (dev
->bus
&& dev
->bus
->pm
) {
1265 info
= "noirq bus ";
1266 callback
= pm_noirq_op(dev
->bus
->pm
, state
);
1277 static bool device_must_resume(struct device
*dev
, pm_message_t state
,
1278 bool no_subsys_suspend_noirq
)
1280 pm_message_t resume_msg
= resume_event(state
);
1283 * If all of the device driver's "noirq", "late" and "early" callbacks
1284 * are invoked directly by the core, the decision to allow the device to
1285 * stay in suspend can be based on its current runtime PM status and its
1288 if (no_subsys_suspend_noirq
&&
1289 !dpm_subsys_suspend_late_cb(dev
, state
, NULL
) &&
1290 !dpm_subsys_resume_early_cb(dev
, resume_msg
, NULL
) &&
1291 !dpm_subsys_resume_noirq_cb(dev
, resume_msg
, NULL
))
1292 return !pm_runtime_status_suspended(dev
) &&
1293 (resume_msg
.event
!= PM_EVENT_RESUME
||
1294 (device_can_wakeup(dev
) && !device_may_wakeup(dev
)));
1297 * The only safe strategy here is to require that if the device may not
1298 * be left in suspend, resume callbacks must be invoked for it.
1300 return !dev
->power
.may_skip_resume
;
1304 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1305 * @dev: Device to handle.
1306 * @state: PM transition of the system being carried out.
1307 * @async: If true, the device is being suspended asynchronously.
1309 * The driver of @dev will not receive interrupts while this function is being
1312 static int __device_suspend_noirq(struct device
*dev
, pm_message_t state
, bool async
)
1314 pm_callback_t callback
;
1316 bool no_subsys_cb
= false;
1322 dpm_wait_for_subordinate(dev
, async
);
1327 if (dev
->power
.syscore
|| dev
->power
.direct_complete
)
1330 callback
= dpm_subsys_suspend_noirq_cb(dev
, state
, &info
);
1334 no_subsys_cb
= !dpm_subsys_suspend_late_cb(dev
, state
, NULL
);
1336 if (dev_pm_smart_suspend_and_suspended(dev
) && no_subsys_cb
)
1339 if (dev
->driver
&& dev
->driver
->pm
) {
1340 info
= "noirq driver ";
1341 callback
= pm_noirq_op(dev
->driver
->pm
, state
);
1345 error
= dpm_run_callback(callback
, dev
, state
, info
);
1347 async_error
= error
;
1352 dev
->power
.is_noirq_suspended
= true;
1354 if (dev_pm_test_driver_flags(dev
, DPM_FLAG_LEAVE_SUSPENDED
)) {
1355 dev
->power
.must_resume
= dev
->power
.must_resume
||
1356 atomic_read(&dev
->power
.usage_count
) > 1 ||
1357 device_must_resume(dev
, state
, no_subsys_cb
);
1359 dev
->power
.must_resume
= true;
1362 if (dev
->power
.must_resume
)
1363 dpm_superior_set_must_resume(dev
);
1366 complete_all(&dev
->power
.completion
);
1367 TRACE_SUSPEND(error
);
1371 static void async_suspend_noirq(void *data
, async_cookie_t cookie
)
1373 struct device
*dev
= (struct device
*)data
;
1376 error
= __device_suspend_noirq(dev
, pm_transition
, true);
1378 dpm_save_failed_dev(dev_name(dev
));
1379 pm_dev_err(dev
, pm_transition
, " async", error
);
1385 static int device_suspend_noirq(struct device
*dev
)
1387 if (dpm_async_fn(dev
, async_suspend_noirq
))
1390 return __device_suspend_noirq(dev
, pm_transition
, false);
1393 static int dpm_noirq_suspend_devices(pm_message_t state
)
1395 ktime_t starttime
= ktime_get();
1398 trace_suspend_resume(TPS("dpm_suspend_noirq"), state
.event
, true);
1399 mutex_lock(&dpm_list_mtx
);
1400 pm_transition
= state
;
1403 while (!list_empty(&dpm_late_early_list
)) {
1404 struct device
*dev
= to_device(dpm_late_early_list
.prev
);
1407 mutex_unlock(&dpm_list_mtx
);
1409 error
= device_suspend_noirq(dev
);
1411 mutex_lock(&dpm_list_mtx
);
1413 pm_dev_err(dev
, state
, " noirq", error
);
1414 dpm_save_failed_dev(dev_name(dev
));
1418 if (!list_empty(&dev
->power
.entry
))
1419 list_move(&dev
->power
.entry
, &dpm_noirq_list
);
1425 mutex_unlock(&dpm_list_mtx
);
1426 async_synchronize_full();
1428 error
= async_error
;
1431 suspend_stats
.failed_suspend_noirq
++;
1432 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ
);
1434 dpm_show_time(starttime
, state
, error
, "noirq");
1435 trace_suspend_resume(TPS("dpm_suspend_noirq"), state
.event
, false);
1440 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1441 * @state: PM transition of the system being carried out.
1443 * Prevent device drivers' interrupt handlers from being called and invoke
1444 * "noirq" suspend callbacks for all non-sysdev devices.
1446 int dpm_suspend_noirq(pm_message_t state
)
1452 device_wakeup_arm_wake_irqs();
1453 suspend_device_irqs();
1455 ret
= dpm_noirq_suspend_devices(state
);
1457 dpm_resume_noirq(resume_event(state
));
1462 static void dpm_propagate_wakeup_to_parent(struct device
*dev
)
1464 struct device
*parent
= dev
->parent
;
1469 spin_lock_irq(&parent
->power
.lock
);
1471 if (dev
->power
.wakeup_path
&& !parent
->power
.ignore_children
)
1472 parent
->power
.wakeup_path
= true;
1474 spin_unlock_irq(&parent
->power
.lock
);
1477 static pm_callback_t
dpm_subsys_suspend_late_cb(struct device
*dev
,
1479 const char **info_p
)
1481 pm_callback_t callback
;
1484 if (dev
->pm_domain
) {
1485 info
= "late power domain ";
1486 callback
= pm_late_early_op(&dev
->pm_domain
->ops
, state
);
1487 } else if (dev
->type
&& dev
->type
->pm
) {
1488 info
= "late type ";
1489 callback
= pm_late_early_op(dev
->type
->pm
, state
);
1490 } else if (dev
->class && dev
->class->pm
) {
1491 info
= "late class ";
1492 callback
= pm_late_early_op(dev
->class->pm
, state
);
1493 } else if (dev
->bus
&& dev
->bus
->pm
) {
1495 callback
= pm_late_early_op(dev
->bus
->pm
, state
);
1507 * __device_suspend_late - Execute a "late suspend" callback for given device.
1508 * @dev: Device to handle.
1509 * @state: PM transition of the system being carried out.
1510 * @async: If true, the device is being suspended asynchronously.
1512 * Runtime PM is disabled for @dev while this function is being executed.
1514 static int __device_suspend_late(struct device
*dev
, pm_message_t state
, bool async
)
1516 pm_callback_t callback
;
1523 __pm_runtime_disable(dev
, false);
1525 dpm_wait_for_subordinate(dev
, async
);
1530 if (pm_wakeup_pending()) {
1531 async_error
= -EBUSY
;
1535 if (dev
->power
.syscore
|| dev
->power
.direct_complete
)
1538 callback
= dpm_subsys_suspend_late_cb(dev
, state
, &info
);
1542 if (dev_pm_smart_suspend_and_suspended(dev
) &&
1543 !dpm_subsys_suspend_noirq_cb(dev
, state
, NULL
))
1546 if (dev
->driver
&& dev
->driver
->pm
) {
1547 info
= "late driver ";
1548 callback
= pm_late_early_op(dev
->driver
->pm
, state
);
1552 error
= dpm_run_callback(callback
, dev
, state
, info
);
1554 async_error
= error
;
1557 dpm_propagate_wakeup_to_parent(dev
);
1560 dev
->power
.is_late_suspended
= true;
1563 TRACE_SUSPEND(error
);
1564 complete_all(&dev
->power
.completion
);
1568 static void async_suspend_late(void *data
, async_cookie_t cookie
)
1570 struct device
*dev
= (struct device
*)data
;
1573 error
= __device_suspend_late(dev
, pm_transition
, true);
1575 dpm_save_failed_dev(dev_name(dev
));
1576 pm_dev_err(dev
, pm_transition
, " async", error
);
1581 static int device_suspend_late(struct device
*dev
)
1583 if (dpm_async_fn(dev
, async_suspend_late
))
1586 return __device_suspend_late(dev
, pm_transition
, false);
1590 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1591 * @state: PM transition of the system being carried out.
1593 int dpm_suspend_late(pm_message_t state
)
1595 ktime_t starttime
= ktime_get();
1598 trace_suspend_resume(TPS("dpm_suspend_late"), state
.event
, true);
1599 mutex_lock(&dpm_list_mtx
);
1600 pm_transition
= state
;
1603 while (!list_empty(&dpm_suspended_list
)) {
1604 struct device
*dev
= to_device(dpm_suspended_list
.prev
);
1607 mutex_unlock(&dpm_list_mtx
);
1609 error
= device_suspend_late(dev
);
1611 mutex_lock(&dpm_list_mtx
);
1612 if (!list_empty(&dev
->power
.entry
))
1613 list_move(&dev
->power
.entry
, &dpm_late_early_list
);
1616 pm_dev_err(dev
, state
, " late", error
);
1617 dpm_save_failed_dev(dev_name(dev
));
1626 mutex_unlock(&dpm_list_mtx
);
1627 async_synchronize_full();
1629 error
= async_error
;
1631 suspend_stats
.failed_suspend_late
++;
1632 dpm_save_failed_step(SUSPEND_SUSPEND_LATE
);
1633 dpm_resume_early(resume_event(state
));
1635 dpm_show_time(starttime
, state
, error
, "late");
1636 trace_suspend_resume(TPS("dpm_suspend_late"), state
.event
, false);
1641 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1642 * @state: PM transition of the system being carried out.
1644 int dpm_suspend_end(pm_message_t state
)
1646 ktime_t starttime
= ktime_get();
1649 error
= dpm_suspend_late(state
);
1653 error
= dpm_suspend_noirq(state
);
1655 dpm_resume_early(resume_event(state
));
1658 dpm_show_time(starttime
, state
, error
, "end");
1661 EXPORT_SYMBOL_GPL(dpm_suspend_end
);
1664 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1665 * @dev: Device to suspend.
1666 * @state: PM transition of the system being carried out.
1667 * @cb: Suspend callback to execute.
1668 * @info: string description of caller.
1670 static int legacy_suspend(struct device
*dev
, pm_message_t state
,
1671 int (*cb
)(struct device
*dev
, pm_message_t state
),
1677 calltime
= initcall_debug_start(dev
, cb
);
1679 trace_device_pm_callback_start(dev
, info
, state
.event
);
1680 error
= cb(dev
, state
);
1681 trace_device_pm_callback_end(dev
, error
);
1682 suspend_report_result(cb
, error
);
1684 initcall_debug_report(dev
, calltime
, cb
, error
);
1689 static void dpm_clear_superiors_direct_complete(struct device
*dev
)
1691 struct device_link
*link
;
1695 spin_lock_irq(&dev
->parent
->power
.lock
);
1696 dev
->parent
->power
.direct_complete
= false;
1697 spin_unlock_irq(&dev
->parent
->power
.lock
);
1700 idx
= device_links_read_lock();
1702 list_for_each_entry_rcu_locked(link
, &dev
->links
.suppliers
, c_node
) {
1703 spin_lock_irq(&link
->supplier
->power
.lock
);
1704 link
->supplier
->power
.direct_complete
= false;
1705 spin_unlock_irq(&link
->supplier
->power
.lock
);
1708 device_links_read_unlock(idx
);
1712 * __device_suspend - Execute "suspend" callbacks for given device.
1713 * @dev: Device to handle.
1714 * @state: PM transition of the system being carried out.
1715 * @async: If true, the device is being suspended asynchronously.
1717 static int __device_suspend(struct device
*dev
, pm_message_t state
, bool async
)
1719 pm_callback_t callback
= NULL
;
1720 const char *info
= NULL
;
1722 DECLARE_DPM_WATCHDOG_ON_STACK(wd
);
1727 dpm_wait_for_subordinate(dev
, async
);
1730 dev
->power
.direct_complete
= false;
1735 * If a device configured to wake up the system from sleep states
1736 * has been suspended at run time and there's a resume request pending
1737 * for it, this is equivalent to the device signaling wakeup, so the
1738 * system suspend operation should be aborted.
1740 if (pm_runtime_barrier(dev
) && device_may_wakeup(dev
))
1741 pm_wakeup_event(dev
, 0);
1743 if (pm_wakeup_pending()) {
1744 dev
->power
.direct_complete
= false;
1745 async_error
= -EBUSY
;
1749 if (dev
->power
.syscore
)
1752 /* Avoid direct_complete to let wakeup_path propagate. */
1753 if (device_may_wakeup(dev
) || dev
->power
.wakeup_path
)
1754 dev
->power
.direct_complete
= false;
1756 if (dev
->power
.direct_complete
) {
1757 if (pm_runtime_status_suspended(dev
)) {
1758 pm_runtime_disable(dev
);
1759 if (pm_runtime_status_suspended(dev
)) {
1760 pm_dev_dbg(dev
, state
, "direct-complete ");
1764 pm_runtime_enable(dev
);
1766 dev
->power
.direct_complete
= false;
1769 dev
->power
.may_skip_resume
= false;
1770 dev
->power
.must_resume
= false;
1772 dpm_watchdog_set(&wd
, dev
);
1775 if (dev
->pm_domain
) {
1776 info
= "power domain ";
1777 callback
= pm_op(&dev
->pm_domain
->ops
, state
);
1781 if (dev
->type
&& dev
->type
->pm
) {
1783 callback
= pm_op(dev
->type
->pm
, state
);
1787 if (dev
->class && dev
->class->pm
) {
1789 callback
= pm_op(dev
->class->pm
, state
);
1796 callback
= pm_op(dev
->bus
->pm
, state
);
1797 } else if (dev
->bus
->suspend
) {
1798 pm_dev_dbg(dev
, state
, "legacy bus ");
1799 error
= legacy_suspend(dev
, state
, dev
->bus
->suspend
,
1806 if (!callback
&& dev
->driver
&& dev
->driver
->pm
) {
1808 callback
= pm_op(dev
->driver
->pm
, state
);
1811 error
= dpm_run_callback(callback
, dev
, state
, info
);
1815 dev
->power
.is_suspended
= true;
1816 if (device_may_wakeup(dev
))
1817 dev
->power
.wakeup_path
= true;
1819 dpm_propagate_wakeup_to_parent(dev
);
1820 dpm_clear_superiors_direct_complete(dev
);
1824 dpm_watchdog_clear(&wd
);
1828 async_error
= error
;
1830 complete_all(&dev
->power
.completion
);
1831 TRACE_SUSPEND(error
);
1835 static void async_suspend(void *data
, async_cookie_t cookie
)
1837 struct device
*dev
= (struct device
*)data
;
1840 error
= __device_suspend(dev
, pm_transition
, true);
1842 dpm_save_failed_dev(dev_name(dev
));
1843 pm_dev_err(dev
, pm_transition
, " async", error
);
1849 static int device_suspend(struct device
*dev
)
1851 if (dpm_async_fn(dev
, async_suspend
))
1854 return __device_suspend(dev
, pm_transition
, false);
1858 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1859 * @state: PM transition of the system being carried out.
1861 int dpm_suspend(pm_message_t state
)
1863 ktime_t starttime
= ktime_get();
1866 trace_suspend_resume(TPS("dpm_suspend"), state
.event
, true);
1872 mutex_lock(&dpm_list_mtx
);
1873 pm_transition
= state
;
1875 while (!list_empty(&dpm_prepared_list
)) {
1876 struct device
*dev
= to_device(dpm_prepared_list
.prev
);
1879 mutex_unlock(&dpm_list_mtx
);
1881 error
= device_suspend(dev
);
1883 mutex_lock(&dpm_list_mtx
);
1885 pm_dev_err(dev
, state
, "", error
);
1886 dpm_save_failed_dev(dev_name(dev
));
1890 if (!list_empty(&dev
->power
.entry
))
1891 list_move(&dev
->power
.entry
, &dpm_suspended_list
);
1896 mutex_unlock(&dpm_list_mtx
);
1897 async_synchronize_full();
1899 error
= async_error
;
1901 suspend_stats
.failed_suspend
++;
1902 dpm_save_failed_step(SUSPEND_SUSPEND
);
1904 dpm_show_time(starttime
, state
, error
, NULL
);
1905 trace_suspend_resume(TPS("dpm_suspend"), state
.event
, false);
1910 * device_prepare - Prepare a device for system power transition.
1911 * @dev: Device to handle.
1912 * @state: PM transition of the system being carried out.
1914 * Execute the ->prepare() callback(s) for given device. No new children of the
1915 * device may be registered after this function has returned.
1917 static int device_prepare(struct device
*dev
, pm_message_t state
)
1919 int (*callback
)(struct device
*) = NULL
;
1922 if (dev
->power
.syscore
)
1926 * If a device's parent goes into runtime suspend at the wrong time,
1927 * it won't be possible to resume the device. To prevent this we
1928 * block runtime suspend here, during the prepare phase, and allow
1929 * it again during the complete phase.
1931 pm_runtime_get_noresume(dev
);
1935 dev
->power
.wakeup_path
= false;
1937 if (dev
->power
.no_pm_callbacks
)
1941 callback
= dev
->pm_domain
->ops
.prepare
;
1942 else if (dev
->type
&& dev
->type
->pm
)
1943 callback
= dev
->type
->pm
->prepare
;
1944 else if (dev
->class && dev
->class->pm
)
1945 callback
= dev
->class->pm
->prepare
;
1946 else if (dev
->bus
&& dev
->bus
->pm
)
1947 callback
= dev
->bus
->pm
->prepare
;
1949 if (!callback
&& dev
->driver
&& dev
->driver
->pm
)
1950 callback
= dev
->driver
->pm
->prepare
;
1953 ret
= callback(dev
);
1959 suspend_report_result(callback
, ret
);
1960 pm_runtime_put(dev
);
1964 * A positive return value from ->prepare() means "this device appears
1965 * to be runtime-suspended and its state is fine, so if it really is
1966 * runtime-suspended, you can leave it in that state provided that you
1967 * will do the same thing with all of its descendants". This only
1968 * applies to suspend transitions, however.
1970 spin_lock_irq(&dev
->power
.lock
);
1971 dev
->power
.direct_complete
= state
.event
== PM_EVENT_SUSPEND
&&
1972 (ret
> 0 || dev
->power
.no_pm_callbacks
) &&
1973 !dev_pm_test_driver_flags(dev
, DPM_FLAG_NEVER_SKIP
);
1974 spin_unlock_irq(&dev
->power
.lock
);
1979 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1980 * @state: PM transition of the system being carried out.
1982 * Execute the ->prepare() callback(s) for all devices.
1984 int dpm_prepare(pm_message_t state
)
1988 trace_suspend_resume(TPS("dpm_prepare"), state
.event
, true);
1992 * Give a chance for the known devices to complete their probes, before
1993 * disable probing of devices. This sync point is important at least
1994 * at boot time + hibernation restore.
1996 wait_for_device_probe();
1998 * It is unsafe if probing of devices will happen during suspend or
1999 * hibernation and system behavior will be unpredictable in this case.
2000 * So, let's prohibit device's probing here and defer their probes
2001 * instead. The normal behavior will be restored in dpm_complete().
2003 device_block_probing();
2005 mutex_lock(&dpm_list_mtx
);
2006 while (!list_empty(&dpm_list
)) {
2007 struct device
*dev
= to_device(dpm_list
.next
);
2010 mutex_unlock(&dpm_list_mtx
);
2012 trace_device_pm_callback_start(dev
, "", state
.event
);
2013 error
= device_prepare(dev
, state
);
2014 trace_device_pm_callback_end(dev
, error
);
2016 mutex_lock(&dpm_list_mtx
);
2018 if (error
== -EAGAIN
) {
2023 pr_info("Device %s not prepared for power transition: code %d\n",
2024 dev_name(dev
), error
);
2028 dev
->power
.is_prepared
= true;
2029 if (!list_empty(&dev
->power
.entry
))
2030 list_move_tail(&dev
->power
.entry
, &dpm_prepared_list
);
2033 mutex_unlock(&dpm_list_mtx
);
2034 trace_suspend_resume(TPS("dpm_prepare"), state
.event
, false);
2039 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2040 * @state: PM transition of the system being carried out.
2042 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2043 * callbacks for them.
2045 int dpm_suspend_start(pm_message_t state
)
2047 ktime_t starttime
= ktime_get();
2050 error
= dpm_prepare(state
);
2052 suspend_stats
.failed_prepare
++;
2053 dpm_save_failed_step(SUSPEND_PREPARE
);
2055 error
= dpm_suspend(state
);
2056 dpm_show_time(starttime
, state
, error
, "start");
2059 EXPORT_SYMBOL_GPL(dpm_suspend_start
);
2061 void __suspend_report_result(const char *function
, void *fn
, int ret
)
2064 pr_err("%s(): %pS returns %d\n", function
, fn
, ret
);
2066 EXPORT_SYMBOL_GPL(__suspend_report_result
);
2069 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2070 * @subordinate: Device that needs to wait for @dev.
2071 * @dev: Device to wait for.
2073 int device_pm_wait_for_dev(struct device
*subordinate
, struct device
*dev
)
2075 dpm_wait(dev
, subordinate
->power
.async_suspend
);
2078 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev
);
2081 * dpm_for_each_dev - device iterator.
2082 * @data: data for the callback.
2083 * @fn: function to be called for each device.
2085 * Iterate over devices in dpm_list, and call @fn for each device,
2088 void dpm_for_each_dev(void *data
, void (*fn
)(struct device
*, void *))
2096 list_for_each_entry(dev
, &dpm_list
, power
.entry
)
2100 EXPORT_SYMBOL_GPL(dpm_for_each_dev
);
2102 static bool pm_ops_is_empty(const struct dev_pm_ops
*ops
)
2107 return !ops
->prepare
&&
2109 !ops
->suspend_late
&&
2110 !ops
->suspend_noirq
&&
2111 !ops
->resume_noirq
&&
2112 !ops
->resume_early
&&
2117 void device_pm_check_callbacks(struct device
*dev
)
2119 spin_lock_irq(&dev
->power
.lock
);
2120 dev
->power
.no_pm_callbacks
=
2121 (!dev
->bus
|| (pm_ops_is_empty(dev
->bus
->pm
) &&
2122 !dev
->bus
->suspend
&& !dev
->bus
->resume
)) &&
2123 (!dev
->class || pm_ops_is_empty(dev
->class->pm
)) &&
2124 (!dev
->type
|| pm_ops_is_empty(dev
->type
->pm
)) &&
2125 (!dev
->pm_domain
|| pm_ops_is_empty(&dev
->pm_domain
->ops
)) &&
2126 (!dev
->driver
|| (pm_ops_is_empty(dev
->driver
->pm
) &&
2127 !dev
->driver
->suspend
&& !dev
->driver
->resume
));
2128 spin_unlock_irq(&dev
->power
.lock
);
2131 bool dev_pm_smart_suspend_and_suspended(struct device
*dev
)
2133 return dev_pm_test_driver_flags(dev
, DPM_FLAG_SMART_SUSPEND
) &&
2134 pm_runtime_status_suspended(dev
);