]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/base/power/main.c
Merge tag 'drm/tegra/for-5.7-fixes' of git://anongit.freedesktop.org/tegra/linux...
[thirdparty/linux.git] / drivers / base / power / main.c
CommitLineData
5de363b6 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
1da177e4 8 * The driver model core calls device_pm_add() when a device is registered.
b595076a 9 * This will initialize the embedded device_pm_info object in the device
1da177e4
LT
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
1eede070
RW
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
1da177e4
LT
16 */
17
7a5bd127
JP
18#define pr_fmt(fmt) "PM: " fmt
19
1da177e4 20#include <linux/device.h>
1b6bc32f 21#include <linux/export.h>
11048dcf 22#include <linux/mutex.h>
cd59abfc 23#include <linux/pm.h>
5e928f77 24#include <linux/pm_runtime.h>
431d452a 25#include <linux/pm-trace.h>
4990d4fe 26#include <linux/pm_wakeirq.h>
2ed8d2b3 27#include <linux/interrupt.h>
f2511774 28#include <linux/sched.h>
b17b0153 29#include <linux/sched/debug.h>
5af84b82 30#include <linux/async.h>
1e75227e 31#include <linux/suspend.h>
53644677 32#include <trace/events/power.h>
2f0aea93 33#include <linux/cpufreq.h>
8651f97b 34#include <linux/cpuidle.h>
6e863844 35#include <linux/devfreq.h>
70fea60d
BG
36#include <linux/timer.h>
37
cd59abfc 38#include "../base.h"
1da177e4
LT
39#include "power.h"
40
9cf519d1
RW
41typedef int (*pm_callback_t)(struct device *);
42
42beb82e
MB
43#define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
46
775b64d2 47/*
1eede070 48 * The entries in the dpm_list list are in a depth first order, simply
775b64d2
RW
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
51 *
8e9394ce
GKH
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
775b64d2
RW
54 * dpm_list_mutex.
55 */
56
1eede070 57LIST_HEAD(dpm_list);
7664e969
SK
58static LIST_HEAD(dpm_prepared_list);
59static LIST_HEAD(dpm_suspended_list);
60static LIST_HEAD(dpm_late_early_list);
61static LIST_HEAD(dpm_noirq_list);
1da177e4 62
2a77c46d 63struct suspend_stats suspend_stats;
cd59abfc 64static DEFINE_MUTEX(dpm_list_mtx);
5af84b82 65static pm_message_t pm_transition;
1da177e4 66
098dff73
RW
67static int async_error;
68
952856db 69static const char *pm_verb(int event)
53644677
SK
70{
71 switch (event) {
72 case PM_EVENT_SUSPEND:
73 return "suspend";
74 case PM_EVENT_RESUME:
75 return "resume";
76 case PM_EVENT_FREEZE:
77 return "freeze";
78 case PM_EVENT_QUIESCE:
79 return "quiesce";
80 case PM_EVENT_HIBERNATE:
81 return "hibernate";
82 case PM_EVENT_THAW:
83 return "thaw";
84 case PM_EVENT_RESTORE:
85 return "restore";
86 case PM_EVENT_RECOVER:
87 return "recover";
88 default:
89 return "(unknown PM event)";
90 }
91}
92
5e928f77 93/**
e91c11b1 94 * device_pm_sleep_init - Initialize system suspend-related device fields.
5e928f77
RW
95 * @dev: Device object being initialized.
96 */
e91c11b1 97void device_pm_sleep_init(struct device *dev)
5e928f77 98{
f76b168b 99 dev->power.is_prepared = false;
6d0e0e84 100 dev->power.is_suspended = false;
3d2699bc
LC
101 dev->power.is_noirq_suspended = false;
102 dev->power.is_late_suspended = false;
5af84b82 103 init_completion(&dev->power.completion);
152e1d59 104 complete_all(&dev->power.completion);
074037ec 105 dev->power.wakeup = NULL;
22110faf 106 INIT_LIST_HEAD(&dev->power.entry);
5e928f77
RW
107}
108
1eede070 109/**
20d652d7 110 * device_pm_lock - Lock the list of active devices used by the PM core.
1eede070
RW
111 */
112void device_pm_lock(void)
113{
114 mutex_lock(&dpm_list_mtx);
115}
116
117/**
20d652d7 118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
1eede070
RW
119 */
120void device_pm_unlock(void)
121{
122 mutex_unlock(&dpm_list_mtx);
123}
075c1771 124
775b64d2 125/**
20d652d7
RW
126 * device_pm_add - Add a device to the PM core's list of active devices.
127 * @dev: Device to add to the list.
775b64d2 128 */
3b98aeaf 129void device_pm_add(struct device *dev)
1da177e4 130{
85945c28
SH
131 /* Skip PM setup/initialization. */
132 if (device_pm_not_required(dev))
133 return;
134
7a5bd127 135 pr_debug("Adding info for %s:%s\n",
5c1a07ab 136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
aa8e54b5 137 device_pm_check_callbacks(dev);
11048dcf 138 mutex_lock(&dpm_list_mtx);
f76b168b 139 if (dev->parent && dev->parent->power.is_prepared)
b64959e6
RW
140 dev_warn(dev, "parent %s should not be sleeping\n",
141 dev_name(dev->parent));
3b98aeaf 142 list_add_tail(&dev->power.entry, &dpm_list);
9ed98953 143 dev->power.in_dpm_list = true;
1a9a9152 144 mutex_unlock(&dpm_list_mtx);
1da177e4
LT
145}
146
775b64d2 147/**
20d652d7
RW
148 * device_pm_remove - Remove a device from the PM core's list of active devices.
149 * @dev: Device to be removed from the list.
775b64d2 150 */
9cddad77 151void device_pm_remove(struct device *dev)
1da177e4 152{
85945c28
SH
153 if (device_pm_not_required(dev))
154 return;
155
7a5bd127 156 pr_debug("Removing info for %s:%s\n",
5c1a07ab 157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
5af84b82 158 complete_all(&dev->power.completion);
11048dcf 159 mutex_lock(&dpm_list_mtx);
1da177e4 160 list_del_init(&dev->power.entry);
9ed98953 161 dev->power.in_dpm_list = false;
11048dcf 162 mutex_unlock(&dpm_list_mtx);
074037ec 163 device_wakeup_disable(dev);
5e928f77 164 pm_runtime_remove(dev);
aa8e54b5 165 device_pm_check_callbacks(dev);
775b64d2
RW
166}
167
ffa6a705 168/**
20d652d7
RW
169 * device_pm_move_before - Move device in the PM core's list of active devices.
170 * @deva: Device to move in dpm_list.
171 * @devb: Device @deva should come before.
ffa6a705
CH
172 */
173void device_pm_move_before(struct device *deva, struct device *devb)
174{
7a5bd127 175 pr_debug("Moving %s:%s before %s:%s\n",
5c1a07ab
RW
176 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
178 /* Delete deva from dpm_list and reinsert before devb. */
179 list_move_tail(&deva->power.entry, &devb->power.entry);
180}
181
182/**
20d652d7
RW
183 * device_pm_move_after - Move device in the PM core's list of active devices.
184 * @deva: Device to move in dpm_list.
185 * @devb: Device @deva should come after.
ffa6a705
CH
186 */
187void device_pm_move_after(struct device *deva, struct device *devb)
188{
7a5bd127 189 pr_debug("Moving %s:%s after %s:%s\n",
5c1a07ab
RW
190 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
192 /* Delete deva from dpm_list and reinsert after devb. */
193 list_move(&deva->power.entry, &devb->power.entry);
194}
195
196/**
20d652d7
RW
197 * device_pm_move_last - Move device to end of the PM core's list of devices.
198 * @dev: Device to move in dpm_list.
ffa6a705
CH
199 */
200void device_pm_move_last(struct device *dev)
201{
7a5bd127 202 pr_debug("Moving %s:%s to end of list\n",
5c1a07ab 203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
ffa6a705
CH
204 list_move_tail(&dev->power.entry, &dpm_list);
205}
206
7f817ba9 207static ktime_t initcall_debug_start(struct device *dev, void *cb)
875ab0b7 208{
143711f0
BH
209 if (!pm_print_times_enabled)
210 return 0;
875ab0b7 211
d75f773c 212 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
7f817ba9
BH
213 task_pid_nr(current),
214 dev->parent ? dev_name(dev->parent) : "none");
143711f0 215 return ktime_get();
875ab0b7
RW
216}
217
218static void initcall_debug_report(struct device *dev, ktime_t calltime,
7f817ba9 219 void *cb, int error)
875ab0b7 220{
53644677
SK
221 ktime_t rettime;
222 s64 nsecs;
223
143711f0
BH
224 if (!pm_print_times_enabled)
225 return;
226
53644677
SK
227 rettime = ktime_get();
228 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
875ab0b7 229
d75f773c 230 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
7f817ba9 231 (unsigned long long)nsecs >> 10);
875ab0b7
RW
232}
233
5af84b82
RW
234/**
235 * dpm_wait - Wait for a PM operation to complete.
236 * @dev: Device to wait for.
237 * @async: If unset, wait only if the device's power.async_suspend flag is set.
238 */
239static void dpm_wait(struct device *dev, bool async)
240{
241 if (!dev)
242 return;
243
0e06b4a8 244 if (async || (pm_async_enabled && dev->power.async_suspend))
5af84b82
RW
245 wait_for_completion(&dev->power.completion);
246}
247
248static int dpm_wait_fn(struct device *dev, void *async_ptr)
249{
250 dpm_wait(dev, *((bool *)async_ptr));
251 return 0;
252}
253
254static void dpm_wait_for_children(struct device *dev, bool async)
255{
256 device_for_each_child(dev, &async, dpm_wait_fn);
257}
258
8c73b428
RW
259static void dpm_wait_for_suppliers(struct device *dev, bool async)
260{
261 struct device_link *link;
262 int idx;
263
264 idx = device_links_read_lock();
265
266 /*
267 * If the supplier goes away right after we've checked the link to it,
268 * we'll wait for its completion to change the state, but that's fine,
269 * because the only things that will block as a result are the SRCU
270 * callbacks freeing the link objects for the links in the list we're
271 * walking.
272 */
42beb82e 273 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
8c73b428
RW
274 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
275 dpm_wait(link->supplier, async);
276
277 device_links_read_unlock(idx);
278}
279
0552e05f 280static bool dpm_wait_for_superior(struct device *dev, bool async)
8c73b428 281{
0552e05f
RW
282 struct device *parent;
283
284 /*
285 * If the device is resumed asynchronously and the parent's callback
286 * deletes both the device and the parent itself, the parent object may
287 * be freed while this function is running, so avoid that by reference
288 * counting the parent once more unless the device has been deleted
289 * already (in which case return right away).
290 */
291 mutex_lock(&dpm_list_mtx);
292
293 if (!device_pm_initialized(dev)) {
294 mutex_unlock(&dpm_list_mtx);
295 return false;
296 }
297
298 parent = get_device(dev->parent);
299
300 mutex_unlock(&dpm_list_mtx);
301
302 dpm_wait(parent, async);
303 put_device(parent);
304
8c73b428 305 dpm_wait_for_suppliers(dev, async);
0552e05f
RW
306
307 /*
308 * If the parent's callback has deleted the device, attempting to resume
309 * it would be invalid, so avoid doing that then.
310 */
311 return device_pm_initialized(dev);
8c73b428
RW
312}
313
314static void dpm_wait_for_consumers(struct device *dev, bool async)
315{
316 struct device_link *link;
317 int idx;
318
319 idx = device_links_read_lock();
320
321 /*
322 * The status of a device link can only be changed from "dormant" by a
323 * probe, but that cannot happen during system suspend/resume. In
324 * theory it can change to "dormant" at that time, but then it is
325 * reasonable to wait for the target device anyway (eg. if it goes
326 * away, it's better to wait for it to go away completely and then
327 * continue instead of trying to continue in parallel with its
328 * unregistration).
329 */
42beb82e 330 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
8c73b428
RW
331 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
332 dpm_wait(link->consumer, async);
333
334 device_links_read_unlock(idx);
335}
336
337static void dpm_wait_for_subordinate(struct device *dev, bool async)
338{
339 dpm_wait_for_children(dev, async);
340 dpm_wait_for_consumers(dev, async);
341}
342
1eede070 343/**
9cf519d1 344 * pm_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
345 * @ops: PM operations to choose from.
346 * @state: PM transition of the system being carried out.
1eede070 347 */
9cf519d1 348static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 349{
1eede070
RW
350 switch (state.event) {
351#ifdef CONFIG_SUSPEND
352 case PM_EVENT_SUSPEND:
9cf519d1 353 return ops->suspend;
1eede070 354 case PM_EVENT_RESUME:
9cf519d1 355 return ops->resume;
1eede070 356#endif /* CONFIG_SUSPEND */
1f112cee 357#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
358 case PM_EVENT_FREEZE:
359 case PM_EVENT_QUIESCE:
9cf519d1 360 return ops->freeze;
1eede070 361 case PM_EVENT_HIBERNATE:
9cf519d1 362 return ops->poweroff;
1eede070
RW
363 case PM_EVENT_THAW:
364 case PM_EVENT_RECOVER:
9cf519d1 365 return ops->thaw;
1eede070
RW
366 break;
367 case PM_EVENT_RESTORE:
9cf519d1 368 return ops->restore;
1f112cee 369#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 370 }
f2511774 371
9cf519d1 372 return NULL;
1eede070
RW
373}
374
cf579dfb
RW
375/**
376 * pm_late_early_op - Return the PM operation appropriate for given PM event.
377 * @ops: PM operations to choose from.
378 * @state: PM transition of the system being carried out.
379 *
380 * Runtime PM is disabled for @dev while this function is being executed.
381 */
382static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
383 pm_message_t state)
384{
385 switch (state.event) {
386#ifdef CONFIG_SUSPEND
387 case PM_EVENT_SUSPEND:
388 return ops->suspend_late;
389 case PM_EVENT_RESUME:
390 return ops->resume_early;
391#endif /* CONFIG_SUSPEND */
392#ifdef CONFIG_HIBERNATE_CALLBACKS
393 case PM_EVENT_FREEZE:
394 case PM_EVENT_QUIESCE:
395 return ops->freeze_late;
396 case PM_EVENT_HIBERNATE:
397 return ops->poweroff_late;
398 case PM_EVENT_THAW:
399 case PM_EVENT_RECOVER:
400 return ops->thaw_early;
401 case PM_EVENT_RESTORE:
402 return ops->restore_early;
403#endif /* CONFIG_HIBERNATE_CALLBACKS */
404 }
405
406 return NULL;
407}
408
1eede070 409/**
9cf519d1 410 * pm_noirq_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
411 * @ops: PM operations to choose from.
412 * @state: PM transition of the system being carried out.
1eede070 413 *
20d652d7
RW
414 * The driver of @dev will not receive interrupts while this function is being
415 * executed.
1eede070 416 */
9cf519d1 417static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 418{
1eede070
RW
419 switch (state.event) {
420#ifdef CONFIG_SUSPEND
421 case PM_EVENT_SUSPEND:
9cf519d1 422 return ops->suspend_noirq;
1eede070 423 case PM_EVENT_RESUME:
9cf519d1 424 return ops->resume_noirq;
1eede070 425#endif /* CONFIG_SUSPEND */
1f112cee 426#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
427 case PM_EVENT_FREEZE:
428 case PM_EVENT_QUIESCE:
9cf519d1 429 return ops->freeze_noirq;
1eede070 430 case PM_EVENT_HIBERNATE:
9cf519d1 431 return ops->poweroff_noirq;
1eede070
RW
432 case PM_EVENT_THAW:
433 case PM_EVENT_RECOVER:
9cf519d1 434 return ops->thaw_noirq;
1eede070 435 case PM_EVENT_RESTORE:
9cf519d1 436 return ops->restore_noirq;
1f112cee 437#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 438 }
f2511774 439
9cf519d1 440 return NULL;
1eede070
RW
441}
442
e3771fa9 443static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
1eede070
RW
444{
445 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
446 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
447 ", may wakeup" : "");
448}
449
e3771fa9 450static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
1eede070
RW
451 int error)
452{
7a5bd127
JP
453 pr_err("Device %s failed to %s%s: error %d\n",
454 dev_name(dev), pm_verb(state.event), info, error);
1eede070
RW
455}
456
48059c09 457static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
e3771fa9 458 const char *info)
ecf762b2
RW
459{
460 ktime_t calltime;
0702d9ee 461 u64 usecs64;
ecf762b2
RW
462 int usecs;
463
464 calltime = ktime_get();
465 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
466 do_div(usecs64, NSEC_PER_USEC);
467 usecs = usecs64;
468 if (usecs == 0)
469 usecs = 1;
8d8b2441 470
48059c09 471 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
8d8b2441 472 info ?: "", info ? " " : "", pm_verb(state.event),
48059c09 473 error ? "aborted" : "complete",
8d8b2441 474 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
ecf762b2
RW
475}
476
9cf519d1 477static int dpm_run_callback(pm_callback_t cb, struct device *dev,
e3771fa9 478 pm_message_t state, const char *info)
9cf519d1
RW
479{
480 ktime_t calltime;
481 int error;
482
483 if (!cb)
484 return 0;
485
7f817ba9 486 calltime = initcall_debug_start(dev, cb);
9cf519d1
RW
487
488 pm_dev_dbg(dev, state, info);
e8bca479 489 trace_device_pm_callback_start(dev, info, state.event);
9cf519d1 490 error = cb(dev);
e8bca479 491 trace_device_pm_callback_end(dev, error);
9cf519d1
RW
492 suspend_report_result(cb, error);
493
7f817ba9 494 initcall_debug_report(dev, calltime, cb, error);
9cf519d1
RW
495
496 return error;
497}
498
70fea60d
BG
499#ifdef CONFIG_DPM_WATCHDOG
500struct dpm_watchdog {
501 struct device *dev;
502 struct task_struct *tsk;
503 struct timer_list timer;
504};
505
506#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
507 struct dpm_watchdog wd
508
509/**
510 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
c4a586fd 511 * @t: The timer that PM watchdog depends on.
70fea60d
BG
512 *
513 * Called when a driver has timed out suspending or resuming.
514 * There's not much we can do here to recover so panic() to
515 * capture a crash-dump in pstore.
516 */
9c6c273a 517static void dpm_watchdog_handler(struct timer_list *t)
70fea60d 518{
9c6c273a 519 struct dpm_watchdog *wd = from_timer(wd, t, timer);
70fea60d
BG
520
521 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
522 show_stack(wd->tsk, NULL);
523 panic("%s %s: unrecoverable failure\n",
524 dev_driver_string(wd->dev), dev_name(wd->dev));
525}
526
527/**
528 * dpm_watchdog_set - Enable pm watchdog for given device.
529 * @wd: Watchdog. Must be allocated on the stack.
530 * @dev: Device to handle.
531 */
532static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
533{
534 struct timer_list *timer = &wd->timer;
535
536 wd->dev = dev;
537 wd->tsk = current;
538
9c6c273a 539 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
70fea60d
BG
540 /* use same timeout value for both suspend and resume */
541 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
70fea60d
BG
542 add_timer(timer);
543}
544
545/**
546 * dpm_watchdog_clear - Disable suspend/resume watchdog.
547 * @wd: Watchdog to disable.
548 */
549static void dpm_watchdog_clear(struct dpm_watchdog *wd)
550{
551 struct timer_list *timer = &wd->timer;
552
553 del_timer_sync(timer);
554 destroy_timer_on_stack(timer);
555}
556#else
557#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
558#define dpm_watchdog_set(x, y)
559#define dpm_watchdog_clear(x)
560#endif
561
cd59abfc
AS
562/*------------------------- Resume routines -------------------------*/
563
75e94645
RW
564/**
565 * suspend_event - Return a "suspend" message for given "resume" one.
566 * @resume_msg: PM message representing a system-wide resume transition.
567 */
568static pm_message_t suspend_event(pm_message_t resume_msg)
569{
570 switch (resume_msg.event) {
571 case PM_EVENT_RESUME:
572 return PMSG_SUSPEND;
573 case PM_EVENT_THAW:
574 case PM_EVENT_RESTORE:
575 return PMSG_FREEZE;
576 case PM_EVENT_RECOVER:
577 return PMSG_HIBERNATE;
578 }
579 return PMSG_ON;
580}
581
0d4b54c6
RW
582/**
583 * dev_pm_may_skip_resume - System-wide device resume optimization check.
584 * @dev: Target device.
585 *
586 * Checks whether or not the device may be left in suspend after a system-wide
587 * transition to the working state.
588 */
589bool dev_pm_may_skip_resume(struct device *dev)
590{
591 return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
592}
593
4fa3061a
RW
594static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
595 pm_message_t state,
596 const char **info_p)
597{
598 pm_callback_t callback;
599 const char *info;
600
601 if (dev->pm_domain) {
602 info = "noirq power domain ";
603 callback = pm_noirq_op(&dev->pm_domain->ops, state);
604 } else if (dev->type && dev->type->pm) {
605 info = "noirq type ";
606 callback = pm_noirq_op(dev->type->pm, state);
607 } else if (dev->class && dev->class->pm) {
608 info = "noirq class ";
609 callback = pm_noirq_op(dev->class->pm, state);
610 } else if (dev->bus && dev->bus->pm) {
611 info = "noirq bus ";
612 callback = pm_noirq_op(dev->bus->pm, state);
613 } else {
614 return NULL;
615 }
616
617 if (info_p)
618 *info_p = info;
619
620 return callback;
621}
622
75e94645
RW
623static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
624 pm_message_t state,
625 const char **info_p);
626
627static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
628 pm_message_t state,
629 const char **info_p);
630
cd59abfc 631/**
b082ddd8 632 * device_resume_noirq - Execute a "noirq resume" callback for given device.
20d652d7
RW
633 * @dev: Device to handle.
634 * @state: PM transition of the system being carried out.
58c256a3 635 * @async: If true, the device is being resumed asynchronously.
cd59abfc 636 *
20d652d7
RW
637 * The driver of @dev will not receive interrupts while this function is being
638 * executed.
cd59abfc 639 */
76569faa 640static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
cd59abfc 641{
4fa3061a
RW
642 pm_callback_t callback;
643 const char *info;
32bfa56a 644 bool skip_resume;
cd59abfc
AS
645 int error = 0;
646
647 TRACE_DEVICE(dev);
648 TRACE_RESUME(0);
649
aae4518b 650 if (dev->power.syscore || dev->power.direct_complete)
dbf37414
RW
651 goto Out;
652
3d2699bc
LC
653 if (!dev->power.is_noirq_suspended)
654 goto Out;
655
0552e05f
RW
656 if (!dpm_wait_for_superior(dev, async))
657 goto Out;
76569faa 658
32bfa56a
RW
659 skip_resume = dev_pm_may_skip_resume(dev);
660
4fa3061a 661 callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
75e94645
RW
662 if (callback)
663 goto Run;
e7176a37 664
32bfa56a
RW
665 if (skip_resume)
666 goto Skip;
667
75e94645
RW
668 if (dev_pm_smart_suspend_and_suspended(dev)) {
669 pm_message_t suspend_msg = suspend_event(state);
670
671 /*
672 * If "freeze" callbacks have been skipped during a transition
673 * related to hibernation, the subsequent "thaw" callbacks must
674 * be skipped too or bad things may happen. Otherwise, resume
675 * callbacks are going to be run for the device, so its runtime
676 * PM status must be changed to reflect the new state after the
677 * transition under way.
678 */
679 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
680 !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
681 if (state.event == PM_EVENT_THAW) {
32bfa56a 682 skip_resume = true;
75e94645
RW
683 goto Skip;
684 } else {
685 pm_runtime_set_active(dev);
686 }
687 }
688 }
689
690 if (dev->driver && dev->driver->pm) {
cf579dfb 691 info = "noirq driver ";
35cd133c
RW
692 callback = pm_noirq_op(dev->driver->pm, state);
693 }
694
75e94645 695Run:
9cf519d1 696 error = dpm_run_callback(callback, dev, state, info);
75e94645
RW
697
698Skip:
3d2699bc 699 dev->power.is_noirq_suspended = false;
9cf519d1 700
32bfa56a 701 if (skip_resume) {
02bd45a2
RW
702 /* Make the next phases of resume skip the device. */
703 dev->power.is_late_suspended = false;
704 dev->power.is_suspended = false;
0d4b54c6
RW
705 /*
706 * The device is going to be left in suspend, but it might not
707 * have been in runtime suspend before the system suspended, so
708 * its runtime PM status needs to be updated to avoid confusing
709 * the runtime PM framework when runtime PM is enabled for the
710 * device again.
711 */
712 pm_runtime_set_suspended(dev);
0d4b54c6
RW
713 }
714
75e94645 715Out:
76569faa 716 complete_all(&dev->power.completion);
775b64d2
RW
717 TRACE_RESUME(error);
718 return error;
719}
720
76569faa
LC
721static bool is_async(struct device *dev)
722{
723 return dev->power.async_suspend && pm_async_enabled
724 && !pm_trace_is_enabled();
725}
726
f2a424f6
YL
727static bool dpm_async_fn(struct device *dev, async_func_t func)
728{
729 reinit_completion(&dev->power.completion);
730
731 if (is_async(dev)) {
732 get_device(dev);
09beebd8 733 async_schedule_dev(func, dev);
f2a424f6
YL
734 return true;
735 }
736
737 return false;
738}
739
76569faa
LC
740static void async_resume_noirq(void *data, async_cookie_t cookie)
741{
742 struct device *dev = (struct device *)data;
743 int error;
744
745 error = device_resume_noirq(dev, pm_transition, true);
746 if (error)
747 pm_dev_err(dev, pm_transition, " async", error);
748
749 put_device(dev);
750}
751
b605c44c 752static void dpm_noirq_resume_devices(pm_message_t state)
775b64d2 753{
76569faa 754 struct device *dev;
ecf762b2 755 ktime_t starttime = ktime_get();
775b64d2 756
bb3632c6 757 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
32bdfac5 758 mutex_lock(&dpm_list_mtx);
76569faa 759 pm_transition = state;
d08a5ace 760
76569faa
LC
761 /*
762 * Advanced the async threads upfront,
763 * in case the starting of async threads is
764 * delayed by non-async resuming devices.
765 */
f2a424f6
YL
766 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
767 dpm_async_fn(dev, async_resume_noirq);
76569faa
LC
768
769 while (!list_empty(&dpm_noirq_list)) {
770 dev = to_device(dpm_noirq_list.next);
d08a5ace 771 get_device(dev);
cf579dfb 772 list_move_tail(&dev->power.entry, &dpm_late_early_list);
5b219a51 773 mutex_unlock(&dpm_list_mtx);
d08a5ace 774
76569faa
LC
775 if (!is_async(dev)) {
776 int error;
777
778 error = device_resume_noirq(dev, state, false);
779 if (error) {
780 suspend_stats.failed_resume_noirq++;
781 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
782 dpm_save_failed_dev(dev_name(dev));
783 pm_dev_err(dev, state, " noirq", error);
784 }
cf579dfb
RW
785 }
786
787 mutex_lock(&dpm_list_mtx);
788 put_device(dev);
789 }
790 mutex_unlock(&dpm_list_mtx);
76569faa 791 async_synchronize_full();
48059c09 792 dpm_show_time(starttime, state, 0, "noirq");
786f41fb
RW
793 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
794}
795
786f41fb
RW
796/**
797 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
798 * @state: PM transition of the system being carried out.
799 *
800 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
801 * allow device drivers' interrupt handlers to be called.
802 */
803void dpm_resume_noirq(pm_message_t state)
804{
805 dpm_noirq_resume_devices(state);
b605c44c
RW
806
807 resume_device_irqs();
808 device_wakeup_disarm_wake_irqs();
809
810 cpuidle_resume();
cf579dfb
RW
811}
812
4fa3061a
RW
813static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
814 pm_message_t state,
815 const char **info_p)
816{
817 pm_callback_t callback;
818 const char *info;
819
820 if (dev->pm_domain) {
821 info = "early power domain ";
822 callback = pm_late_early_op(&dev->pm_domain->ops, state);
823 } else if (dev->type && dev->type->pm) {
824 info = "early type ";
825 callback = pm_late_early_op(dev->type->pm, state);
826 } else if (dev->class && dev->class->pm) {
827 info = "early class ";
828 callback = pm_late_early_op(dev->class->pm, state);
829 } else if (dev->bus && dev->bus->pm) {
830 info = "early bus ";
831 callback = pm_late_early_op(dev->bus->pm, state);
832 } else {
833 return NULL;
834 }
835
836 if (info_p)
837 *info_p = info;
838
839 return callback;
840}
841
cf579dfb
RW
842/**
843 * device_resume_early - Execute an "early resume" callback for given device.
844 * @dev: Device to handle.
845 * @state: PM transition of the system being carried out.
58c256a3 846 * @async: If true, the device is being resumed asynchronously.
cf579dfb
RW
847 *
848 * Runtime PM is disabled for @dev while this function is being executed.
849 */
9e5e7910 850static int device_resume_early(struct device *dev, pm_message_t state, bool async)
cf579dfb 851{
4fa3061a
RW
852 pm_callback_t callback;
853 const char *info;
cf579dfb
RW
854 int error = 0;
855
856 TRACE_DEVICE(dev);
857 TRACE_RESUME(0);
858
aae4518b 859 if (dev->power.syscore || dev->power.direct_complete)
dbf37414
RW
860 goto Out;
861
3d2699bc
LC
862 if (!dev->power.is_late_suspended)
863 goto Out;
864
0552e05f
RW
865 if (!dpm_wait_for_superior(dev, async))
866 goto Out;
9e5e7910 867
4fa3061a 868 callback = dpm_subsys_resume_early_cb(dev, state, &info);
cf579dfb
RW
869
870 if (!callback && dev->driver && dev->driver->pm) {
871 info = "early driver ";
872 callback = pm_late_early_op(dev->driver->pm, state);
873 }
874
875 error = dpm_run_callback(callback, dev, state, info);
3d2699bc 876 dev->power.is_late_suspended = false;
cf579dfb 877
dbf37414 878 Out:
cf579dfb 879 TRACE_RESUME(error);
9f6d8f6a
RW
880
881 pm_runtime_enable(dev);
9e5e7910 882 complete_all(&dev->power.completion);
cf579dfb
RW
883 return error;
884}
885
9e5e7910
LC
886static void async_resume_early(void *data, async_cookie_t cookie)
887{
888 struct device *dev = (struct device *)data;
889 int error;
890
891 error = device_resume_early(dev, pm_transition, true);
892 if (error)
893 pm_dev_err(dev, pm_transition, " async", error);
894
895 put_device(dev);
896}
897
cf579dfb
RW
898/**
899 * dpm_resume_early - Execute "early resume" callbacks for all devices.
900 * @state: PM transition of the system being carried out.
901 */
2a8a8ce6 902void dpm_resume_early(pm_message_t state)
cf579dfb 903{
9e5e7910 904 struct device *dev;
cf579dfb
RW
905 ktime_t starttime = ktime_get();
906
bb3632c6 907 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
cf579dfb 908 mutex_lock(&dpm_list_mtx);
9e5e7910
LC
909 pm_transition = state;
910
911 /*
912 * Advanced the async threads upfront,
913 * in case the starting of async threads is
914 * delayed by non-async resuming devices.
915 */
f2a424f6
YL
916 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
917 dpm_async_fn(dev, async_resume_early);
cf579dfb 918
9e5e7910
LC
919 while (!list_empty(&dpm_late_early_list)) {
920 dev = to_device(dpm_late_early_list.next);
cf579dfb
RW
921 get_device(dev);
922 list_move_tail(&dev->power.entry, &dpm_suspended_list);
923 mutex_unlock(&dpm_list_mtx);
924
9e5e7910
LC
925 if (!is_async(dev)) {
926 int error;
d08a5ace 927
9e5e7910
LC
928 error = device_resume_early(dev, state, false);
929 if (error) {
930 suspend_stats.failed_resume_early++;
931 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
932 dpm_save_failed_dev(dev_name(dev));
933 pm_dev_err(dev, state, " early", error);
934 }
935 }
5b219a51 936 mutex_lock(&dpm_list_mtx);
d08a5ace
RW
937 put_device(dev);
938 }
32bdfac5 939 mutex_unlock(&dpm_list_mtx);
9e5e7910 940 async_synchronize_full();
48059c09 941 dpm_show_time(starttime, state, 0, "early");
bb3632c6 942 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
775b64d2 943}
cf579dfb
RW
944
945/**
946 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
947 * @state: PM transition of the system being carried out.
948 */
949void dpm_resume_start(pm_message_t state)
950{
951 dpm_resume_noirq(state);
952 dpm_resume_early(state);
953}
954EXPORT_SYMBOL_GPL(dpm_resume_start);
775b64d2
RW
955
956/**
97df8c12 957 * device_resume - Execute "resume" callbacks for given device.
20d652d7
RW
958 * @dev: Device to handle.
959 * @state: PM transition of the system being carried out.
5af84b82 960 * @async: If true, the device is being resumed asynchronously.
775b64d2 961 */
97df8c12 962static int device_resume(struct device *dev, pm_message_t state, bool async)
775b64d2 963{
9cf519d1 964 pm_callback_t callback = NULL;
e3771fa9 965 const char *info = NULL;
775b64d2 966 int error = 0;
70fea60d 967 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
775b64d2
RW
968
969 TRACE_DEVICE(dev);
970 TRACE_RESUME(0);
cd59abfc 971
dbf37414
RW
972 if (dev->power.syscore)
973 goto Complete;
974
aae4518b
RW
975 if (dev->power.direct_complete) {
976 /* Match the pm_runtime_disable() in __device_suspend(). */
977 pm_runtime_enable(dev);
978 goto Complete;
979 }
980
0552e05f
RW
981 if (!dpm_wait_for_superior(dev, async))
982 goto Complete;
983
70fea60d 984 dpm_watchdog_set(&wd, dev);
8e9394ce 985 device_lock(dev);
7a8d37a3 986
f76b168b
AS
987 /*
988 * This is a fib. But we'll allow new children to be added below
989 * a resumed device, even if the device hasn't been completed yet.
990 */
991 dev->power.is_prepared = false;
97df8c12 992
6d0e0e84
AS
993 if (!dev->power.is_suspended)
994 goto Unlock;
995
564b905a 996 if (dev->pm_domain) {
9cf519d1
RW
997 info = "power domain ";
998 callback = pm_op(&dev->pm_domain->ops, state);
35cd133c 999 goto Driver;
7538e3db
RW
1000 }
1001
9659cc06 1002 if (dev->type && dev->type->pm) {
9cf519d1
RW
1003 info = "type ";
1004 callback = pm_op(dev->type->pm, state);
35cd133c 1005 goto Driver;
cd59abfc
AS
1006 }
1007
a380f2ed
RW
1008 if (dev->class && dev->class->pm) {
1009 info = "class ";
1010 callback = pm_op(dev->class->pm, state);
1011 goto Driver;
cd59abfc 1012 }
9659cc06
RW
1013
1014 if (dev->bus) {
1015 if (dev->bus->pm) {
35cd133c 1016 info = "bus ";
9cf519d1 1017 callback = pm_op(dev->bus->pm, state);
9659cc06 1018 } else if (dev->bus->resume) {
35cd133c 1019 info = "legacy bus ";
9cf519d1 1020 callback = dev->bus->resume;
35cd133c 1021 goto End;
9659cc06
RW
1022 }
1023 }
1024
35cd133c
RW
1025 Driver:
1026 if (!callback && dev->driver && dev->driver->pm) {
1027 info = "driver ";
1028 callback = pm_op(dev->driver->pm, state);
1029 }
1030
1eede070 1031 End:
9cf519d1 1032 error = dpm_run_callback(callback, dev, state, info);
6d0e0e84
AS
1033 dev->power.is_suspended = false;
1034
1035 Unlock:
8e9394ce 1036 device_unlock(dev);
70fea60d 1037 dpm_watchdog_clear(&wd);
dbf37414
RW
1038
1039 Complete:
5af84b82 1040 complete_all(&dev->power.completion);
7a8d37a3 1041
cd59abfc 1042 TRACE_RESUME(error);
1e2ef05b 1043
cd59abfc
AS
1044 return error;
1045}
1046
5af84b82
RW
1047static void async_resume(void *data, async_cookie_t cookie)
1048{
1049 struct device *dev = (struct device *)data;
1050 int error;
1051
97df8c12 1052 error = device_resume(dev, pm_transition, true);
5af84b82
RW
1053 if (error)
1054 pm_dev_err(dev, pm_transition, " async", error);
1055 put_device(dev);
1056}
1057
775b64d2 1058/**
20d652d7
RW
1059 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1060 * @state: PM transition of the system being carried out.
775b64d2 1061 *
20d652d7
RW
1062 * Execute the appropriate "resume" callback for all devices whose status
1063 * indicates that they are suspended.
1eede070 1064 */
91e7c75b 1065void dpm_resume(pm_message_t state)
1eede070 1066{
97df8c12 1067 struct device *dev;
ecf762b2 1068 ktime_t starttime = ktime_get();
1eede070 1069
bb3632c6 1070 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
91e7c75b
RW
1071 might_sleep();
1072
1eede070 1073 mutex_lock(&dpm_list_mtx);
5af84b82 1074 pm_transition = state;
098dff73 1075 async_error = 0;
1eede070 1076
f2a424f6
YL
1077 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1078 dpm_async_fn(dev, async_resume);
97df8c12 1079
8a43a9ab
RW
1080 while (!list_empty(&dpm_suspended_list)) {
1081 dev = to_device(dpm_suspended_list.next);
1eede070 1082 get_device(dev);
5b219a51 1083 if (!is_async(dev)) {
1eede070
RW
1084 int error;
1085
1eede070
RW
1086 mutex_unlock(&dpm_list_mtx);
1087
97df8c12 1088 error = device_resume(dev, state, false);
2a77c46d
SL
1089 if (error) {
1090 suspend_stats.failed_resume++;
1091 dpm_save_failed_step(SUSPEND_RESUME);
1092 dpm_save_failed_dev(dev_name(dev));
1eede070 1093 pm_dev_err(dev, state, "", error);
2a77c46d 1094 }
5b219a51
RW
1095
1096 mutex_lock(&dpm_list_mtx);
1eede070
RW
1097 }
1098 if (!list_empty(&dev->power.entry))
8a43a9ab 1099 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
1100 put_device(dev);
1101 }
1eede070 1102 mutex_unlock(&dpm_list_mtx);
5af84b82 1103 async_synchronize_full();
48059c09 1104 dpm_show_time(starttime, state, 0, NULL);
2f0aea93
VK
1105
1106 cpufreq_resume();
6e863844 1107 devfreq_resume();
bb3632c6 1108 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1eede070
RW
1109}
1110
1111/**
20d652d7
RW
1112 * device_complete - Complete a PM transition for given device.
1113 * @dev: Device to handle.
1114 * @state: PM transition of the system being carried out.
1eede070 1115 */
d1616302 1116static void device_complete(struct device *dev, pm_message_t state)
1eede070 1117{
35cd133c 1118 void (*callback)(struct device *) = NULL;
e3771fa9 1119 const char *info = NULL;
35cd133c 1120
dbf37414
RW
1121 if (dev->power.syscore)
1122 return;
1123
8e9394ce 1124 device_lock(dev);
1eede070 1125
564b905a 1126 if (dev->pm_domain) {
35cd133c
RW
1127 info = "completing power domain ";
1128 callback = dev->pm_domain->ops.complete;
4d27e9dc 1129 } else if (dev->type && dev->type->pm) {
35cd133c
RW
1130 info = "completing type ";
1131 callback = dev->type->pm->complete;
9659cc06 1132 } else if (dev->class && dev->class->pm) {
35cd133c
RW
1133 info = "completing class ";
1134 callback = dev->class->pm->complete;
9659cc06 1135 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
1136 info = "completing bus ";
1137 callback = dev->bus->pm->complete;
1138 }
1139
1140 if (!callback && dev->driver && dev->driver->pm) {
1141 info = "completing driver ";
1142 callback = dev->driver->pm->complete;
1143 }
1144
1145 if (callback) {
1146 pm_dev_dbg(dev, state, info);
1147 callback(dev);
1eede070
RW
1148 }
1149
8e9394ce 1150 device_unlock(dev);
88d26136 1151
af939339 1152 pm_runtime_put(dev);
1eede070
RW
1153}
1154
1155/**
20d652d7
RW
1156 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1157 * @state: PM transition of the system being carried out.
775b64d2 1158 *
20d652d7
RW
1159 * Execute the ->complete() callbacks for all devices whose PM status is not
1160 * DPM_ON (this allows new devices to be registered).
cd59abfc 1161 */
91e7c75b 1162void dpm_complete(pm_message_t state)
cd59abfc 1163{
1eede070
RW
1164 struct list_head list;
1165
bb3632c6 1166 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
91e7c75b
RW
1167 might_sleep();
1168
1eede070 1169 INIT_LIST_HEAD(&list);
cd59abfc 1170 mutex_lock(&dpm_list_mtx);
8a43a9ab
RW
1171 while (!list_empty(&dpm_prepared_list)) {
1172 struct device *dev = to_device(dpm_prepared_list.prev);
cd59abfc 1173
1eede070 1174 get_device(dev);
f76b168b 1175 dev->power.is_prepared = false;
5b219a51
RW
1176 list_move(&dev->power.entry, &list);
1177 mutex_unlock(&dpm_list_mtx);
1eede070 1178
32e8d689 1179 trace_device_pm_callback_start(dev, "", state.event);
5b219a51 1180 device_complete(dev, state);
32e8d689 1181 trace_device_pm_callback_end(dev, 0);
1eede070 1182
5b219a51 1183 mutex_lock(&dpm_list_mtx);
1eede070 1184 put_device(dev);
cd59abfc 1185 }
1eede070 1186 list_splice(&list, &dpm_list);
cd59abfc 1187 mutex_unlock(&dpm_list_mtx);
013c074f
SG
1188
1189 /* Allow device probing and trigger re-probing of deferred devices */
1190 device_unblock_probing();
bb3632c6 1191 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
cd59abfc
AS
1192}
1193
cd59abfc 1194/**
20d652d7
RW
1195 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1196 * @state: PM transition of the system being carried out.
cd59abfc 1197 *
20d652d7
RW
1198 * Execute "resume" callbacks for all devices and complete the PM transition of
1199 * the system.
cd59abfc 1200 */
d1616302 1201void dpm_resume_end(pm_message_t state)
cd59abfc 1202{
1eede070
RW
1203 dpm_resume(state);
1204 dpm_complete(state);
cd59abfc 1205}
d1616302 1206EXPORT_SYMBOL_GPL(dpm_resume_end);
cd59abfc
AS
1207
1208
1209/*------------------------- Suspend routines -------------------------*/
1210
1eede070 1211/**
20d652d7
RW
1212 * resume_event - Return a "resume" message for given "suspend" sleep state.
1213 * @sleep_state: PM message representing a sleep state.
1214 *
1215 * Return a PM message representing the resume event corresponding to given
1216 * sleep state.
1eede070
RW
1217 */
1218static pm_message_t resume_event(pm_message_t sleep_state)
cd59abfc 1219{
1eede070
RW
1220 switch (sleep_state.event) {
1221 case PM_EVENT_SUSPEND:
1222 return PMSG_RESUME;
1223 case PM_EVENT_FREEZE:
1224 case PM_EVENT_QUIESCE:
1225 return PMSG_RECOVER;
1226 case PM_EVENT_HIBERNATE:
1227 return PMSG_RESTORE;
cd59abfc 1228 }
1eede070 1229 return PMSG_ON;
cd59abfc
AS
1230}
1231
0d4b54c6
RW
1232static void dpm_superior_set_must_resume(struct device *dev)
1233{
1234 struct device_link *link;
1235 int idx;
1236
1237 if (dev->parent)
1238 dev->parent->power.must_resume = true;
1239
1240 idx = device_links_read_lock();
1241
42beb82e 1242 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
0d4b54c6
RW
1243 link->supplier->power.must_resume = true;
1244
1245 device_links_read_unlock(idx);
1246}
1247
4fa3061a
RW
1248static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1249 pm_message_t state,
1250 const char **info_p)
1251{
1252 pm_callback_t callback;
1253 const char *info;
1254
1255 if (dev->pm_domain) {
1256 info = "noirq power domain ";
1257 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1258 } else if (dev->type && dev->type->pm) {
1259 info = "noirq type ";
1260 callback = pm_noirq_op(dev->type->pm, state);
1261 } else if (dev->class && dev->class->pm) {
1262 info = "noirq class ";
1263 callback = pm_noirq_op(dev->class->pm, state);
1264 } else if (dev->bus && dev->bus->pm) {
1265 info = "noirq bus ";
1266 callback = pm_noirq_op(dev->bus->pm, state);
1267 } else {
1268 return NULL;
1269 }
1270
1271 if (info_p)
1272 *info_p = info;
1273
1274 return callback;
1275}
1276
32bfa56a
RW
1277static bool device_must_resume(struct device *dev, pm_message_t state,
1278 bool no_subsys_suspend_noirq)
1279{
1280 pm_message_t resume_msg = resume_event(state);
1281
1282 /*
1283 * If all of the device driver's "noirq", "late" and "early" callbacks
1284 * are invoked directly by the core, the decision to allow the device to
1285 * stay in suspend can be based on its current runtime PM status and its
1286 * wakeup settings.
1287 */
1288 if (no_subsys_suspend_noirq &&
1289 !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1290 !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1291 !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1292 return !pm_runtime_status_suspended(dev) &&
1293 (resume_msg.event != PM_EVENT_RESUME ||
1294 (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1295
1296 /*
1297 * The only safe strategy here is to require that if the device may not
1298 * be left in suspend, resume callbacks must be invoked for it.
1299 */
1300 return !dev->power.may_skip_resume;
1301}
1302
cd59abfc 1303/**
b082ddd8 1304 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
20d652d7
RW
1305 * @dev: Device to handle.
1306 * @state: PM transition of the system being carried out.
58c256a3 1307 * @async: If true, the device is being suspended asynchronously.
775b64d2 1308 *
20d652d7
RW
1309 * The driver of @dev will not receive interrupts while this function is being
1310 * executed.
cd59abfc 1311 */
28b6fd6e 1312static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
775b64d2 1313{
4fa3061a
RW
1314 pm_callback_t callback;
1315 const char *info;
32bfa56a 1316 bool no_subsys_cb = false;
28b6fd6e
LC
1317 int error = 0;
1318
431d452a
ZF
1319 TRACE_DEVICE(dev);
1320 TRACE_SUSPEND(0);
1321
098c3055 1322 dpm_wait_for_subordinate(dev, async);
6f75c3fd 1323
28b6fd6e
LC
1324 if (async_error)
1325 goto Complete;
1326
aae4518b 1327 if (dev->power.syscore || dev->power.direct_complete)
28b6fd6e
LC
1328 goto Complete;
1329
4fa3061a 1330 callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
75e94645
RW
1331 if (callback)
1332 goto Run;
7538e3db 1333
32bfa56a
RW
1334 no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1335
1336 if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
75e94645
RW
1337 goto Skip;
1338
1339 if (dev->driver && dev->driver->pm) {
cf579dfb 1340 info = "noirq driver ";
35cd133c
RW
1341 callback = pm_noirq_op(dev->driver->pm, state);
1342 }
1343
75e94645 1344Run:
3d2699bc 1345 error = dpm_run_callback(callback, dev, state, info);
0d4b54c6 1346 if (error) {
28b6fd6e 1347 async_error = error;
0d4b54c6
RW
1348 goto Complete;
1349 }
1350
75e94645 1351Skip:
0d4b54c6
RW
1352 dev->power.is_noirq_suspended = true;
1353
1354 if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
0d4b54c6 1355 dev->power.must_resume = dev->power.must_resume ||
32bfa56a
RW
1356 atomic_read(&dev->power.usage_count) > 1 ||
1357 device_must_resume(dev, state, no_subsys_cb);
0d4b54c6
RW
1358 } else {
1359 dev->power.must_resume = true;
1360 }
1361
1362 if (dev->power.must_resume)
1363 dpm_superior_set_must_resume(dev);
3d2699bc 1364
28b6fd6e
LC
1365Complete:
1366 complete_all(&dev->power.completion);
431d452a 1367 TRACE_SUSPEND(error);
3d2699bc 1368 return error;
775b64d2
RW
1369}
1370
28b6fd6e
LC
1371static void async_suspend_noirq(void *data, async_cookie_t cookie)
1372{
1373 struct device *dev = (struct device *)data;
1374 int error;
1375
1376 error = __device_suspend_noirq(dev, pm_transition, true);
1377 if (error) {
1378 dpm_save_failed_dev(dev_name(dev));
1379 pm_dev_err(dev, pm_transition, " async", error);
1380 }
1381
1382 put_device(dev);
1383}
1384
1385static int device_suspend_noirq(struct device *dev)
1386{
f2a424f6 1387 if (dpm_async_fn(dev, async_suspend_noirq))
28b6fd6e 1388 return 0;
f2a424f6 1389
28b6fd6e
LC
1390 return __device_suspend_noirq(dev, pm_transition, false);
1391}
1392
b605c44c 1393static int dpm_noirq_suspend_devices(pm_message_t state)
775b64d2 1394{
ecf762b2 1395 ktime_t starttime = ktime_get();
775b64d2
RW
1396 int error = 0;
1397
bb3632c6 1398 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
32bdfac5 1399 mutex_lock(&dpm_list_mtx);
28b6fd6e
LC
1400 pm_transition = state;
1401 async_error = 0;
1402
cf579dfb
RW
1403 while (!list_empty(&dpm_late_early_list)) {
1404 struct device *dev = to_device(dpm_late_early_list.prev);
d08a5ace
RW
1405
1406 get_device(dev);
1407 mutex_unlock(&dpm_list_mtx);
1408
28b6fd6e 1409 error = device_suspend_noirq(dev);
d08a5ace
RW
1410
1411 mutex_lock(&dpm_list_mtx);
775b64d2 1412 if (error) {
cf579dfb 1413 pm_dev_err(dev, state, " noirq", error);
2a77c46d 1414 dpm_save_failed_dev(dev_name(dev));
d08a5ace 1415 put_device(dev);
775b64d2
RW
1416 break;
1417 }
d08a5ace 1418 if (!list_empty(&dev->power.entry))
8a43a9ab 1419 list_move(&dev->power.entry, &dpm_noirq_list);
d08a5ace 1420 put_device(dev);
52d136cc 1421
28b6fd6e 1422 if (async_error)
52d136cc 1423 break;
775b64d2 1424 }
32bdfac5 1425 mutex_unlock(&dpm_list_mtx);
28b6fd6e
LC
1426 async_synchronize_full();
1427 if (!error)
1428 error = async_error;
1429
1430 if (error) {
1431 suspend_stats.failed_suspend_noirq++;
1432 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
28b6fd6e 1433 }
48059c09 1434 dpm_show_time(starttime, state, error, "noirq");
bb3632c6 1435 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
cf579dfb
RW
1436 return error;
1437}
1438
786f41fb
RW
1439/**
1440 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1441 * @state: PM transition of the system being carried out.
1442 *
1443 * Prevent device drivers' interrupt handlers from being called and invoke
1444 * "noirq" suspend callbacks for all non-sysdev devices.
1445 */
1446int dpm_suspend_noirq(pm_message_t state)
1447{
1448 int ret;
1449
b605c44c
RW
1450 cpuidle_pause();
1451
1452 device_wakeup_arm_wake_irqs();
1453 suspend_device_irqs();
1454
786f41fb
RW
1455 ret = dpm_noirq_suspend_devices(state);
1456 if (ret)
1457 dpm_resume_noirq(resume_event(state));
1458
1459 return ret;
1460}
1461
0a99d767
UH
1462static void dpm_propagate_wakeup_to_parent(struct device *dev)
1463{
1464 struct device *parent = dev->parent;
1465
1466 if (!parent)
1467 return;
1468
1469 spin_lock_irq(&parent->power.lock);
1470
1471 if (dev->power.wakeup_path && !parent->power.ignore_children)
1472 parent->power.wakeup_path = true;
1473
1474 spin_unlock_irq(&parent->power.lock);
1475}
1476
4fa3061a
RW
1477static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1478 pm_message_t state,
1479 const char **info_p)
1480{
1481 pm_callback_t callback;
1482 const char *info;
1483
1484 if (dev->pm_domain) {
1485 info = "late power domain ";
1486 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1487 } else if (dev->type && dev->type->pm) {
1488 info = "late type ";
1489 callback = pm_late_early_op(dev->type->pm, state);
1490 } else if (dev->class && dev->class->pm) {
1491 info = "late class ";
1492 callback = pm_late_early_op(dev->class->pm, state);
1493 } else if (dev->bus && dev->bus->pm) {
1494 info = "late bus ";
1495 callback = pm_late_early_op(dev->bus->pm, state);
1496 } else {
1497 return NULL;
1498 }
1499
1500 if (info_p)
1501 *info_p = info;
1502
1503 return callback;
1504}
1505
cf579dfb 1506/**
b082ddd8 1507 * __device_suspend_late - Execute a "late suspend" callback for given device.
cf579dfb
RW
1508 * @dev: Device to handle.
1509 * @state: PM transition of the system being carried out.
58c256a3 1510 * @async: If true, the device is being suspended asynchronously.
cf579dfb
RW
1511 *
1512 * Runtime PM is disabled for @dev while this function is being executed.
1513 */
de377b39 1514static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
cf579dfb 1515{
4fa3061a
RW
1516 pm_callback_t callback;
1517 const char *info;
de377b39 1518 int error = 0;
cf579dfb 1519
431d452a
ZF
1520 TRACE_DEVICE(dev);
1521 TRACE_SUSPEND(0);
1522
9f6d8f6a
RW
1523 __pm_runtime_disable(dev, false);
1524
098c3055 1525 dpm_wait_for_subordinate(dev, async);
6f75c3fd 1526
de377b39
LC
1527 if (async_error)
1528 goto Complete;
1529
1530 if (pm_wakeup_pending()) {
1531 async_error = -EBUSY;
1532 goto Complete;
1533 }
1534
aae4518b 1535 if (dev->power.syscore || dev->power.direct_complete)
de377b39
LC
1536 goto Complete;
1537
4fa3061a 1538 callback = dpm_subsys_suspend_late_cb(dev, state, &info);
75e94645
RW
1539 if (callback)
1540 goto Run;
cf579dfb 1541
75e94645
RW
1542 if (dev_pm_smart_suspend_and_suspended(dev) &&
1543 !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1544 goto Skip;
1545
1546 if (dev->driver && dev->driver->pm) {
cf579dfb
RW
1547 info = "late driver ";
1548 callback = pm_late_early_op(dev->driver->pm, state);
1549 }
1550
75e94645 1551Run:
3d2699bc 1552 error = dpm_run_callback(callback, dev, state, info);
75e94645 1553 if (error) {
de377b39 1554 async_error = error;
75e94645
RW
1555 goto Complete;
1556 }
0a99d767 1557 dpm_propagate_wakeup_to_parent(dev);
75e94645
RW
1558
1559Skip:
1560 dev->power.is_late_suspended = true;
3d2699bc 1561
de377b39 1562Complete:
431d452a 1563 TRACE_SUSPEND(error);
de377b39 1564 complete_all(&dev->power.completion);
3d2699bc 1565 return error;
cf579dfb
RW
1566}
1567
de377b39
LC
1568static void async_suspend_late(void *data, async_cookie_t cookie)
1569{
1570 struct device *dev = (struct device *)data;
1571 int error;
1572
1573 error = __device_suspend_late(dev, pm_transition, true);
1574 if (error) {
1575 dpm_save_failed_dev(dev_name(dev));
1576 pm_dev_err(dev, pm_transition, " async", error);
1577 }
1578 put_device(dev);
1579}
1580
1581static int device_suspend_late(struct device *dev)
1582{
f2a424f6 1583 if (dpm_async_fn(dev, async_suspend_late))
de377b39 1584 return 0;
de377b39
LC
1585
1586 return __device_suspend_late(dev, pm_transition, false);
1587}
1588
cf579dfb
RW
1589/**
1590 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1591 * @state: PM transition of the system being carried out.
1592 */
2a8a8ce6 1593int dpm_suspend_late(pm_message_t state)
cf579dfb
RW
1594{
1595 ktime_t starttime = ktime_get();
1596 int error = 0;
1597
bb3632c6 1598 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
cf579dfb 1599 mutex_lock(&dpm_list_mtx);
de377b39
LC
1600 pm_transition = state;
1601 async_error = 0;
1602
cf579dfb
RW
1603 while (!list_empty(&dpm_suspended_list)) {
1604 struct device *dev = to_device(dpm_suspended_list.prev);
1605
1606 get_device(dev);
1607 mutex_unlock(&dpm_list_mtx);
1608
de377b39 1609 error = device_suspend_late(dev);
cf579dfb
RW
1610
1611 mutex_lock(&dpm_list_mtx);
3a17fb32
RW
1612 if (!list_empty(&dev->power.entry))
1613 list_move(&dev->power.entry, &dpm_late_early_list);
1614
cf579dfb
RW
1615 if (error) {
1616 pm_dev_err(dev, state, " late", error);
cf579dfb
RW
1617 dpm_save_failed_dev(dev_name(dev));
1618 put_device(dev);
1619 break;
1620 }
cf579dfb 1621 put_device(dev);
52d136cc 1622
de377b39 1623 if (async_error)
52d136cc 1624 break;
cf579dfb
RW
1625 }
1626 mutex_unlock(&dpm_list_mtx);
de377b39 1627 async_synchronize_full();
246ef766
ID
1628 if (!error)
1629 error = async_error;
de377b39
LC
1630 if (error) {
1631 suspend_stats.failed_suspend_late++;
1632 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
cf579dfb 1633 dpm_resume_early(resume_event(state));
de377b39 1634 }
48059c09 1635 dpm_show_time(starttime, state, error, "late");
bb3632c6 1636 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
775b64d2
RW
1637 return error;
1638}
cf579dfb
RW
1639
1640/**
1641 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1642 * @state: PM transition of the system being carried out.
1643 */
1644int dpm_suspend_end(pm_message_t state)
1645{
3540d38d
BVA
1646 ktime_t starttime = ktime_get();
1647 int error;
1648
1649 error = dpm_suspend_late(state);
064b021f 1650 if (error)
3540d38d 1651 goto out;
064b021f
CC
1652
1653 error = dpm_suspend_noirq(state);
3540d38d 1654 if (error)
997a0311 1655 dpm_resume_early(resume_event(state));
cf579dfb 1656
3540d38d
BVA
1657out:
1658 dpm_show_time(starttime, state, error, "end");
1659 return error;
cf579dfb
RW
1660}
1661EXPORT_SYMBOL_GPL(dpm_suspend_end);
775b64d2 1662
875ab0b7
RW
1663/**
1664 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
0a884223
RD
1665 * @dev: Device to suspend.
1666 * @state: PM transition of the system being carried out.
1667 * @cb: Suspend callback to execute.
58c256a3 1668 * @info: string description of caller.
875ab0b7
RW
1669 */
1670static int legacy_suspend(struct device *dev, pm_message_t state,
53644677 1671 int (*cb)(struct device *dev, pm_message_t state),
e3771fa9 1672 const char *info)
875ab0b7
RW
1673{
1674 int error;
1675 ktime_t calltime;
1676
7f817ba9 1677 calltime = initcall_debug_start(dev, cb);
875ab0b7 1678
e8bca479 1679 trace_device_pm_callback_start(dev, info, state.event);
875ab0b7 1680 error = cb(dev, state);
e8bca479 1681 trace_device_pm_callback_end(dev, error);
875ab0b7
RW
1682 suspend_report_result(cb, error);
1683
7f817ba9 1684 initcall_debug_report(dev, calltime, cb, error);
875ab0b7
RW
1685
1686 return error;
1172ee31
UH
1687}
1688
c23bd387 1689static void dpm_clear_superiors_direct_complete(struct device *dev)
8c73b428
RW
1690{
1691 struct device_link *link;
1692 int idx;
1693
c23bd387
UH
1694 if (dev->parent) {
1695 spin_lock_irq(&dev->parent->power.lock);
1696 dev->parent->power.direct_complete = false;
1697 spin_unlock_irq(&dev->parent->power.lock);
1698 }
1699
8c73b428
RW
1700 idx = device_links_read_lock();
1701
42beb82e 1702 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
8c73b428
RW
1703 spin_lock_irq(&link->supplier->power.lock);
1704 link->supplier->power.direct_complete = false;
1705 spin_unlock_irq(&link->supplier->power.lock);
1706 }
1707
1708 device_links_read_unlock(idx);
1709}
1710
775b64d2 1711/**
b082ddd8 1712 * __device_suspend - Execute "suspend" callbacks for given device.
20d652d7
RW
1713 * @dev: Device to handle.
1714 * @state: PM transition of the system being carried out.
5af84b82 1715 * @async: If true, the device is being suspended asynchronously.
775b64d2 1716 */
5af84b82 1717static int __device_suspend(struct device *dev, pm_message_t state, bool async)
cd59abfc 1718{
9cf519d1 1719 pm_callback_t callback = NULL;
e3771fa9 1720 const char *info = NULL;
cd59abfc 1721 int error = 0;
70fea60d 1722 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
cd59abfc 1723
431d452a
ZF
1724 TRACE_DEVICE(dev);
1725 TRACE_SUSPEND(0);
1726
8c73b428 1727 dpm_wait_for_subordinate(dev, async);
7a8d37a3 1728
69e445ab
RW
1729 if (async_error) {
1730 dev->power.direct_complete = false;
1f758b23 1731 goto Complete;
69e445ab 1732 }
1e2ef05b 1733
88d26136
AS
1734 /*
1735 * If a device configured to wake up the system from sleep states
1736 * has been suspended at run time and there's a resume request pending
1737 * for it, this is equivalent to the device signaling wakeup, so the
1738 * system suspend operation should be aborted.
1739 */
1e2ef05b
RW
1740 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1741 pm_wakeup_event(dev, 0);
5af84b82 1742
d83f905e 1743 if (pm_wakeup_pending()) {
69e445ab 1744 dev->power.direct_complete = false;
d83f905e 1745 async_error = -EBUSY;
1f758b23 1746 goto Complete;
d83f905e
RW
1747 }
1748
dbf37414
RW
1749 if (dev->power.syscore)
1750 goto Complete;
1751
dc351d4c
UH
1752 /* Avoid direct_complete to let wakeup_path propagate. */
1753 if (device_may_wakeup(dev) || dev->power.wakeup_path)
1754 dev->power.direct_complete = false;
1755
aae4518b
RW
1756 if (dev->power.direct_complete) {
1757 if (pm_runtime_status_suspended(dev)) {
1758 pm_runtime_disable(dev);
4a0fa9f9
RW
1759 if (pm_runtime_status_suspended(dev)) {
1760 pm_dev_dbg(dev, state, "direct-complete ");
aae4518b 1761 goto Complete;
4a0fa9f9 1762 }
aae4518b
RW
1763
1764 pm_runtime_enable(dev);
1765 }
1766 dev->power.direct_complete = false;
1767 }
1768
0d4b54c6
RW
1769 dev->power.may_skip_resume = false;
1770 dev->power.must_resume = false;
1771
70fea60d 1772 dpm_watchdog_set(&wd, dev);
1e2ef05b
RW
1773 device_lock(dev);
1774
564b905a 1775 if (dev->pm_domain) {
9cf519d1
RW
1776 info = "power domain ";
1777 callback = pm_op(&dev->pm_domain->ops, state);
1778 goto Run;
4d27e9dc
RW
1779 }
1780
9659cc06 1781 if (dev->type && dev->type->pm) {
9cf519d1
RW
1782 info = "type ";
1783 callback = pm_op(dev->type->pm, state);
1784 goto Run;
9659cc06
RW
1785 }
1786
a380f2ed
RW
1787 if (dev->class && dev->class->pm) {
1788 info = "class ";
1789 callback = pm_op(dev->class->pm, state);
1790 goto Run;
cd59abfc
AS
1791 }
1792
1eede070
RW
1793 if (dev->bus) {
1794 if (dev->bus->pm) {
35cd133c 1795 info = "bus ";
9cf519d1 1796 callback = pm_op(dev->bus->pm, state);
1eede070 1797 } else if (dev->bus->suspend) {
35cd133c 1798 pm_dev_dbg(dev, state, "legacy bus ");
53644677
SK
1799 error = legacy_suspend(dev, state, dev->bus->suspend,
1800 "legacy bus ");
9cf519d1 1801 goto End;
1eede070 1802 }
7538e3db
RW
1803 }
1804
9cf519d1 1805 Run:
35cd133c
RW
1806 if (!callback && dev->driver && dev->driver->pm) {
1807 info = "driver ";
1808 callback = pm_op(dev->driver->pm, state);
1809 }
1810
9cf519d1
RW
1811 error = dpm_run_callback(callback, dev, state, info);
1812
1eede070 1813 End:
4ca46ff3
RW
1814 if (!error) {
1815 dev->power.is_suspended = true;
8512220c
UH
1816 if (device_may_wakeup(dev))
1817 dev->power.wakeup_path = true;
1818
c23bd387
UH
1819 dpm_propagate_wakeup_to_parent(dev);
1820 dpm_clear_superiors_direct_complete(dev);
4ca46ff3 1821 }
6d0e0e84 1822
8e9394ce 1823 device_unlock(dev);
70fea60d 1824 dpm_watchdog_clear(&wd);
1f758b23
MSB
1825
1826 Complete:
88d26136 1827 if (error)
098dff73
RW
1828 async_error = error;
1829
05a92622 1830 complete_all(&dev->power.completion);
431d452a 1831 TRACE_SUSPEND(error);
cd59abfc
AS
1832 return error;
1833}
1834
5af84b82
RW
1835static void async_suspend(void *data, async_cookie_t cookie)
1836{
1837 struct device *dev = (struct device *)data;
1838 int error;
1839
1840 error = __device_suspend(dev, pm_transition, true);
2a77c46d
SL
1841 if (error) {
1842 dpm_save_failed_dev(dev_name(dev));
5af84b82 1843 pm_dev_err(dev, pm_transition, " async", error);
2a77c46d 1844 }
5af84b82
RW
1845
1846 put_device(dev);
1847}
1848
1849static int device_suspend(struct device *dev)
1850{
f2a424f6 1851 if (dpm_async_fn(dev, async_suspend))
5af84b82 1852 return 0;
5af84b82
RW
1853
1854 return __device_suspend(dev, pm_transition, false);
1855}
1856
cd59abfc 1857/**
20d652d7
RW
1858 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1859 * @state: PM transition of the system being carried out.
cd59abfc 1860 */
91e7c75b 1861int dpm_suspend(pm_message_t state)
cd59abfc 1862{
ecf762b2 1863 ktime_t starttime = ktime_get();
cd59abfc
AS
1864 int error = 0;
1865
bb3632c6 1866 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
91e7c75b
RW
1867 might_sleep();
1868
6e863844 1869 devfreq_suspend();
2f0aea93
VK
1870 cpufreq_suspend();
1871
cd59abfc 1872 mutex_lock(&dpm_list_mtx);
5af84b82
RW
1873 pm_transition = state;
1874 async_error = 0;
8a43a9ab
RW
1875 while (!list_empty(&dpm_prepared_list)) {
1876 struct device *dev = to_device(dpm_prepared_list.prev);
58aca232 1877
1eede070 1878 get_device(dev);
cd59abfc 1879 mutex_unlock(&dpm_list_mtx);
1eede070 1880
5af84b82 1881 error = device_suspend(dev);
1eede070 1882
1b3cbec1 1883 mutex_lock(&dpm_list_mtx);
775b64d2 1884 if (error) {
1eede070 1885 pm_dev_err(dev, state, "", error);
2a77c46d 1886 dpm_save_failed_dev(dev_name(dev));
1eede070 1887 put_device(dev);
775b64d2
RW
1888 break;
1889 }
7a8d37a3 1890 if (!list_empty(&dev->power.entry))
8a43a9ab 1891 list_move(&dev->power.entry, &dpm_suspended_list);
1eede070 1892 put_device(dev);
5af84b82
RW
1893 if (async_error)
1894 break;
cd59abfc
AS
1895 }
1896 mutex_unlock(&dpm_list_mtx);
5af84b82
RW
1897 async_synchronize_full();
1898 if (!error)
1899 error = async_error;
2a77c46d
SL
1900 if (error) {
1901 suspend_stats.failed_suspend++;
1902 dpm_save_failed_step(SUSPEND_SUSPEND);
48059c09
RW
1903 }
1904 dpm_show_time(starttime, state, error, NULL);
bb3632c6 1905 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1eede070
RW
1906 return error;
1907}
1908
1909/**
20d652d7
RW
1910 * device_prepare - Prepare a device for system power transition.
1911 * @dev: Device to handle.
1912 * @state: PM transition of the system being carried out.
1913 *
1914 * Execute the ->prepare() callback(s) for given device. No new children of the
1915 * device may be registered after this function has returned.
1eede070 1916 */
d1616302 1917static int device_prepare(struct device *dev, pm_message_t state)
1eede070 1918{
35cd133c 1919 int (*callback)(struct device *) = NULL;
aae4518b 1920 int ret = 0;
1eede070 1921
dbf37414
RW
1922 if (dev->power.syscore)
1923 return 0;
1924
88d26136
AS
1925 /*
1926 * If a device's parent goes into runtime suspend at the wrong time,
1927 * it won't be possible to resume the device. To prevent this we
1928 * block runtime suspend here, during the prepare phase, and allow
1929 * it again during the complete phase.
1930 */
1931 pm_runtime_get_noresume(dev);
1932
8e9394ce 1933 device_lock(dev);
1eede070 1934
8512220c 1935 dev->power.wakeup_path = false;
4ca46ff3 1936
c62ec461 1937 if (dev->power.no_pm_callbacks)
aa8e54b5 1938 goto unlock;
aa8e54b5 1939
fba1fbf5 1940 if (dev->pm_domain)
35cd133c 1941 callback = dev->pm_domain->ops.prepare;
fba1fbf5 1942 else if (dev->type && dev->type->pm)
35cd133c 1943 callback = dev->type->pm->prepare;
fba1fbf5 1944 else if (dev->class && dev->class->pm)
35cd133c 1945 callback = dev->class->pm->prepare;
fba1fbf5 1946 else if (dev->bus && dev->bus->pm)
35cd133c 1947 callback = dev->bus->pm->prepare;
35cd133c 1948
fba1fbf5 1949 if (!callback && dev->driver && dev->driver->pm)
35cd133c 1950 callback = dev->driver->pm->prepare;
35cd133c 1951
32e8d689 1952 if (callback)
aae4518b 1953 ret = callback(dev);
7538e3db 1954
aa8e54b5 1955unlock:
8e9394ce 1956 device_unlock(dev);
1eede070 1957
aae4518b
RW
1958 if (ret < 0) {
1959 suspend_report_result(callback, ret);
aa1b9f13 1960 pm_runtime_put(dev);
aae4518b
RW
1961 return ret;
1962 }
1963 /*
1964 * A positive return value from ->prepare() means "this device appears
1965 * to be runtime-suspended and its state is fine, so if it really is
1966 * runtime-suspended, you can leave it in that state provided that you
1967 * will do the same thing with all of its descendants". This only
1968 * applies to suspend transitions, however.
1969 */
1970 spin_lock_irq(&dev->power.lock);
08810a41 1971 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
b5252a6c 1972 (ret > 0 || dev->power.no_pm_callbacks) &&
08810a41 1973 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
aae4518b
RW
1974 spin_unlock_irq(&dev->power.lock);
1975 return 0;
1eede070 1976}
cd59abfc 1977
1eede070 1978/**
20d652d7
RW
1979 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1980 * @state: PM transition of the system being carried out.
1eede070 1981 *
20d652d7 1982 * Execute the ->prepare() callback(s) for all devices.
1eede070 1983 */
91e7c75b 1984int dpm_prepare(pm_message_t state)
1eede070 1985{
1eede070
RW
1986 int error = 0;
1987
bb3632c6 1988 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
91e7c75b
RW
1989 might_sleep();
1990
013c074f
SG
1991 /*
1992 * Give a chance for the known devices to complete their probes, before
1993 * disable probing of devices. This sync point is important at least
1994 * at boot time + hibernation restore.
1995 */
1996 wait_for_device_probe();
1997 /*
1998 * It is unsafe if probing of devices will happen during suspend or
1999 * hibernation and system behavior will be unpredictable in this case.
2000 * So, let's prohibit device's probing here and defer their probes
2001 * instead. The normal behavior will be restored in dpm_complete().
2002 */
2003 device_block_probing();
2004
1eede070 2005 mutex_lock(&dpm_list_mtx);
1eede070
RW
2006 while (!list_empty(&dpm_list)) {
2007 struct device *dev = to_device(dpm_list.next);
2008
2009 get_device(dev);
1eede070
RW
2010 mutex_unlock(&dpm_list_mtx);
2011
32e8d689 2012 trace_device_pm_callback_start(dev, "", state.event);
1e2ef05b 2013 error = device_prepare(dev, state);
32e8d689 2014 trace_device_pm_callback_end(dev, error);
1eede070
RW
2015
2016 mutex_lock(&dpm_list_mtx);
2017 if (error) {
1eede070
RW
2018 if (error == -EAGAIN) {
2019 put_device(dev);
886a7a33 2020 error = 0;
1eede070
RW
2021 continue;
2022 }
7a5bd127 2023 pr_info("Device %s not prepared for power transition: code %d\n",
5c1a07ab 2024 dev_name(dev), error);
1eede070
RW
2025 put_device(dev);
2026 break;
2027 }
f76b168b 2028 dev->power.is_prepared = true;
1eede070 2029 if (!list_empty(&dev->power.entry))
8a43a9ab 2030 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
2031 put_device(dev);
2032 }
1eede070 2033 mutex_unlock(&dpm_list_mtx);
bb3632c6 2034 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
cd59abfc
AS
2035 return error;
2036}
2037
775b64d2 2038/**
20d652d7
RW
2039 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2040 * @state: PM transition of the system being carried out.
775b64d2 2041 *
20d652d7
RW
2042 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2043 * callbacks for them.
775b64d2 2044 */
d1616302 2045int dpm_suspend_start(pm_message_t state)
775b64d2 2046{
3540d38d 2047 ktime_t starttime = ktime_get();
775b64d2 2048 int error;
cd59abfc 2049
1eede070 2050 error = dpm_prepare(state);
2a77c46d
SL
2051 if (error) {
2052 suspend_stats.failed_prepare++;
2053 dpm_save_failed_step(SUSPEND_PREPARE);
2054 } else
1eede070 2055 error = dpm_suspend(state);
3540d38d 2056 dpm_show_time(starttime, state, error, "start");
cd59abfc 2057 return error;
cd59abfc 2058}
d1616302 2059EXPORT_SYMBOL_GPL(dpm_suspend_start);
cd59abfc
AS
2060
2061void __suspend_report_result(const char *function, void *fn, int ret)
2062{
c80cfb04 2063 if (ret)
09686219 2064 pr_err("%s(): %pS returns %d\n", function, fn, ret);
cd59abfc
AS
2065}
2066EXPORT_SYMBOL_GPL(__suspend_report_result);
f8824cee
RW
2067
2068/**
2069 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
f8824cee 2070 * @subordinate: Device that needs to wait for @dev.
0b237cb2 2071 * @dev: Device to wait for.
f8824cee 2072 */
098dff73 2073int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
f8824cee
RW
2074{
2075 dpm_wait(dev, subordinate->power.async_suspend);
098dff73 2076 return async_error;
f8824cee
RW
2077}
2078EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
dfe3212e
ML
2079
2080/**
2081 * dpm_for_each_dev - device iterator.
2082 * @data: data for the callback.
2083 * @fn: function to be called for each device.
2084 *
2085 * Iterate over devices in dpm_list, and call @fn for each device,
2086 * passing it @data.
2087 */
2088void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2089{
2090 struct device *dev;
2091
2092 if (!fn)
2093 return;
2094
2095 device_pm_lock();
2096 list_for_each_entry(dev, &dpm_list, power.entry)
2097 fn(dev, data);
2098 device_pm_unlock();
2099}
2100EXPORT_SYMBOL_GPL(dpm_for_each_dev);
aa8e54b5
TV
2101
2102static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2103{
2104 if (!ops)
2105 return true;
2106
2107 return !ops->prepare &&
2108 !ops->suspend &&
2109 !ops->suspend_late &&
2110 !ops->suspend_noirq &&
2111 !ops->resume_noirq &&
2112 !ops->resume_early &&
2113 !ops->resume &&
2114 !ops->complete;
2115}
2116
2117void device_pm_check_callbacks(struct device *dev)
2118{
2119 spin_lock_irq(&dev->power.lock);
2120 dev->power.no_pm_callbacks =
157c460e
RW
2121 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2122 !dev->bus->suspend && !dev->bus->resume)) &&
a380f2ed 2123 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
aa8e54b5
TV
2124 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2125 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
157c460e
RW
2126 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2127 !dev->driver->suspend && !dev->driver->resume));
aa8e54b5
TV
2128 spin_unlock_irq(&dev->power.lock);
2129}
c4b65157
RW
2130
2131bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2132{
2133 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2134 pm_runtime_status_suspended(dev);
2135}