f81232: switch to ->get_serial()
[linux/fpc-iii.git] / drivers / base / power / main.c
blob3f68e2919dc5da70bb29fb1468038c76e3d61d8c
1 /*
2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
37 #include "../base.h"
38 #include "power.h"
40 typedef int (*pm_callback_t)(struct device *);
43 * The entries in the dpm_list list are in a depth first order, simply
44 * because children are guaranteed to be discovered after parents, and
45 * are inserted at the back of the list on discovery.
47 * Since device_pm_add() may be called with a device lock held,
48 * we must never try to acquire a device lock while holding
49 * dpm_list_mutex.
52 LIST_HEAD(dpm_list);
53 static LIST_HEAD(dpm_prepared_list);
54 static LIST_HEAD(dpm_suspended_list);
55 static LIST_HEAD(dpm_late_early_list);
56 static LIST_HEAD(dpm_noirq_list);
58 struct suspend_stats suspend_stats;
59 static DEFINE_MUTEX(dpm_list_mtx);
60 static pm_message_t pm_transition;
62 static int async_error;
64 static const char *pm_verb(int event)
66 switch (event) {
67 case PM_EVENT_SUSPEND:
68 return "suspend";
69 case PM_EVENT_RESUME:
70 return "resume";
71 case PM_EVENT_FREEZE:
72 return "freeze";
73 case PM_EVENT_QUIESCE:
74 return "quiesce";
75 case PM_EVENT_HIBERNATE:
76 return "hibernate";
77 case PM_EVENT_THAW:
78 return "thaw";
79 case PM_EVENT_RESTORE:
80 return "restore";
81 case PM_EVENT_RECOVER:
82 return "recover";
83 default:
84 return "(unknown PM event)";
88 /**
89 * device_pm_sleep_init - Initialize system suspend-related device fields.
90 * @dev: Device object being initialized.
92 void device_pm_sleep_init(struct device *dev)
94 dev->power.is_prepared = false;
95 dev->power.is_suspended = false;
96 dev->power.is_noirq_suspended = false;
97 dev->power.is_late_suspended = false;
98 init_completion(&dev->power.completion);
99 complete_all(&dev->power.completion);
100 dev->power.wakeup = NULL;
101 INIT_LIST_HEAD(&dev->power.entry);
105 * device_pm_lock - Lock the list of active devices used by the PM core.
107 void device_pm_lock(void)
109 mutex_lock(&dpm_list_mtx);
113 * device_pm_unlock - Unlock the list of active devices used by the PM core.
115 void device_pm_unlock(void)
117 mutex_unlock(&dpm_list_mtx);
121 * device_pm_add - Add a device to the PM core's list of active devices.
122 * @dev: Device to add to the list.
124 void device_pm_add(struct device *dev)
126 pr_debug("PM: Adding info for %s:%s\n",
127 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
128 device_pm_check_callbacks(dev);
129 mutex_lock(&dpm_list_mtx);
130 if (dev->parent && dev->parent->power.is_prepared)
131 dev_warn(dev, "parent %s should not be sleeping\n",
132 dev_name(dev->parent));
133 list_add_tail(&dev->power.entry, &dpm_list);
134 dev->power.in_dpm_list = true;
135 mutex_unlock(&dpm_list_mtx);
139 * device_pm_remove - Remove a device from the PM core's list of active devices.
140 * @dev: Device to be removed from the list.
142 void device_pm_remove(struct device *dev)
144 pr_debug("PM: Removing info for %s:%s\n",
145 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
146 complete_all(&dev->power.completion);
147 mutex_lock(&dpm_list_mtx);
148 list_del_init(&dev->power.entry);
149 dev->power.in_dpm_list = false;
150 mutex_unlock(&dpm_list_mtx);
151 device_wakeup_disable(dev);
152 pm_runtime_remove(dev);
153 device_pm_check_callbacks(dev);
157 * device_pm_move_before - Move device in the PM core's list of active devices.
158 * @deva: Device to move in dpm_list.
159 * @devb: Device @deva should come before.
161 void device_pm_move_before(struct device *deva, struct device *devb)
163 pr_debug("PM: Moving %s:%s before %s:%s\n",
164 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
165 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
166 /* Delete deva from dpm_list and reinsert before devb. */
167 list_move_tail(&deva->power.entry, &devb->power.entry);
171 * device_pm_move_after - Move device in the PM core's list of active devices.
172 * @deva: Device to move in dpm_list.
173 * @devb: Device @deva should come after.
175 void device_pm_move_after(struct device *deva, struct device *devb)
177 pr_debug("PM: Moving %s:%s after %s:%s\n",
178 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
179 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
180 /* Delete deva from dpm_list and reinsert after devb. */
181 list_move(&deva->power.entry, &devb->power.entry);
185 * device_pm_move_last - Move device to end of the PM core's list of devices.
186 * @dev: Device to move in dpm_list.
188 void device_pm_move_last(struct device *dev)
190 pr_debug("PM: Moving %s:%s to end of list\n",
191 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
192 list_move_tail(&dev->power.entry, &dpm_list);
195 static ktime_t initcall_debug_start(struct device *dev, void *cb)
197 if (!pm_print_times_enabled)
198 return 0;
200 dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
201 task_pid_nr(current),
202 dev->parent ? dev_name(dev->parent) : "none");
203 return ktime_get();
206 static void initcall_debug_report(struct device *dev, ktime_t calltime,
207 void *cb, int error)
209 ktime_t rettime;
210 s64 nsecs;
212 if (!pm_print_times_enabled)
213 return;
215 rettime = ktime_get();
216 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
218 dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
219 (unsigned long long)nsecs >> 10);
223 * dpm_wait - Wait for a PM operation to complete.
224 * @dev: Device to wait for.
225 * @async: If unset, wait only if the device's power.async_suspend flag is set.
227 static void dpm_wait(struct device *dev, bool async)
229 if (!dev)
230 return;
232 if (async || (pm_async_enabled && dev->power.async_suspend))
233 wait_for_completion(&dev->power.completion);
236 static int dpm_wait_fn(struct device *dev, void *async_ptr)
238 dpm_wait(dev, *((bool *)async_ptr));
239 return 0;
242 static void dpm_wait_for_children(struct device *dev, bool async)
244 device_for_each_child(dev, &async, dpm_wait_fn);
247 static void dpm_wait_for_suppliers(struct device *dev, bool async)
249 struct device_link *link;
250 int idx;
252 idx = device_links_read_lock();
255 * If the supplier goes away right after we've checked the link to it,
256 * we'll wait for its completion to change the state, but that's fine,
257 * because the only things that will block as a result are the SRCU
258 * callbacks freeing the link objects for the links in the list we're
259 * walking.
261 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
262 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
263 dpm_wait(link->supplier, async);
265 device_links_read_unlock(idx);
268 static void dpm_wait_for_superior(struct device *dev, bool async)
270 dpm_wait(dev->parent, async);
271 dpm_wait_for_suppliers(dev, async);
274 static void dpm_wait_for_consumers(struct device *dev, bool async)
276 struct device_link *link;
277 int idx;
279 idx = device_links_read_lock();
282 * The status of a device link can only be changed from "dormant" by a
283 * probe, but that cannot happen during system suspend/resume. In
284 * theory it can change to "dormant" at that time, but then it is
285 * reasonable to wait for the target device anyway (eg. if it goes
286 * away, it's better to wait for it to go away completely and then
287 * continue instead of trying to continue in parallel with its
288 * unregistration).
290 list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
291 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
292 dpm_wait(link->consumer, async);
294 device_links_read_unlock(idx);
297 static void dpm_wait_for_subordinate(struct device *dev, bool async)
299 dpm_wait_for_children(dev, async);
300 dpm_wait_for_consumers(dev, async);
304 * pm_op - Return the PM operation appropriate for given PM event.
305 * @ops: PM operations to choose from.
306 * @state: PM transition of the system being carried out.
308 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
310 switch (state.event) {
311 #ifdef CONFIG_SUSPEND
312 case PM_EVENT_SUSPEND:
313 return ops->suspend;
314 case PM_EVENT_RESUME:
315 return ops->resume;
316 #endif /* CONFIG_SUSPEND */
317 #ifdef CONFIG_HIBERNATE_CALLBACKS
318 case PM_EVENT_FREEZE:
319 case PM_EVENT_QUIESCE:
320 return ops->freeze;
321 case PM_EVENT_HIBERNATE:
322 return ops->poweroff;
323 case PM_EVENT_THAW:
324 case PM_EVENT_RECOVER:
325 return ops->thaw;
326 break;
327 case PM_EVENT_RESTORE:
328 return ops->restore;
329 #endif /* CONFIG_HIBERNATE_CALLBACKS */
332 return NULL;
336 * pm_late_early_op - Return the PM operation appropriate for given PM event.
337 * @ops: PM operations to choose from.
338 * @state: PM transition of the system being carried out.
340 * Runtime PM is disabled for @dev while this function is being executed.
342 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
343 pm_message_t state)
345 switch (state.event) {
346 #ifdef CONFIG_SUSPEND
347 case PM_EVENT_SUSPEND:
348 return ops->suspend_late;
349 case PM_EVENT_RESUME:
350 return ops->resume_early;
351 #endif /* CONFIG_SUSPEND */
352 #ifdef CONFIG_HIBERNATE_CALLBACKS
353 case PM_EVENT_FREEZE:
354 case PM_EVENT_QUIESCE:
355 return ops->freeze_late;
356 case PM_EVENT_HIBERNATE:
357 return ops->poweroff_late;
358 case PM_EVENT_THAW:
359 case PM_EVENT_RECOVER:
360 return ops->thaw_early;
361 case PM_EVENT_RESTORE:
362 return ops->restore_early;
363 #endif /* CONFIG_HIBERNATE_CALLBACKS */
366 return NULL;
370 * pm_noirq_op - Return the PM operation appropriate for given PM event.
371 * @ops: PM operations to choose from.
372 * @state: PM transition of the system being carried out.
374 * The driver of @dev will not receive interrupts while this function is being
375 * executed.
377 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
379 switch (state.event) {
380 #ifdef CONFIG_SUSPEND
381 case PM_EVENT_SUSPEND:
382 return ops->suspend_noirq;
383 case PM_EVENT_RESUME:
384 return ops->resume_noirq;
385 #endif /* CONFIG_SUSPEND */
386 #ifdef CONFIG_HIBERNATE_CALLBACKS
387 case PM_EVENT_FREEZE:
388 case PM_EVENT_QUIESCE:
389 return ops->freeze_noirq;
390 case PM_EVENT_HIBERNATE:
391 return ops->poweroff_noirq;
392 case PM_EVENT_THAW:
393 case PM_EVENT_RECOVER:
394 return ops->thaw_noirq;
395 case PM_EVENT_RESTORE:
396 return ops->restore_noirq;
397 #endif /* CONFIG_HIBERNATE_CALLBACKS */
400 return NULL;
403 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
405 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
406 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
407 ", may wakeup" : "");
410 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
411 int error)
413 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
414 dev_name(dev), pm_verb(state.event), info, error);
417 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
418 const char *info)
420 ktime_t calltime;
421 u64 usecs64;
422 int usecs;
424 calltime = ktime_get();
425 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
426 do_div(usecs64, NSEC_PER_USEC);
427 usecs = usecs64;
428 if (usecs == 0)
429 usecs = 1;
431 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
432 info ?: "", info ? " " : "", pm_verb(state.event),
433 error ? "aborted" : "complete",
434 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
437 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
438 pm_message_t state, const char *info)
440 ktime_t calltime;
441 int error;
443 if (!cb)
444 return 0;
446 calltime = initcall_debug_start(dev, cb);
448 pm_dev_dbg(dev, state, info);
449 trace_device_pm_callback_start(dev, info, state.event);
450 error = cb(dev);
451 trace_device_pm_callback_end(dev, error);
452 suspend_report_result(cb, error);
454 initcall_debug_report(dev, calltime, cb, error);
456 return error;
459 #ifdef CONFIG_DPM_WATCHDOG
460 struct dpm_watchdog {
461 struct device *dev;
462 struct task_struct *tsk;
463 struct timer_list timer;
466 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
467 struct dpm_watchdog wd
470 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
471 * @data: Watchdog object address.
473 * Called when a driver has timed out suspending or resuming.
474 * There's not much we can do here to recover so panic() to
475 * capture a crash-dump in pstore.
477 static void dpm_watchdog_handler(struct timer_list *t)
479 struct dpm_watchdog *wd = from_timer(wd, t, timer);
481 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
482 show_stack(wd->tsk, NULL);
483 panic("%s %s: unrecoverable failure\n",
484 dev_driver_string(wd->dev), dev_name(wd->dev));
488 * dpm_watchdog_set - Enable pm watchdog for given device.
489 * @wd: Watchdog. Must be allocated on the stack.
490 * @dev: Device to handle.
492 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
494 struct timer_list *timer = &wd->timer;
496 wd->dev = dev;
497 wd->tsk = current;
499 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
500 /* use same timeout value for both suspend and resume */
501 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
502 add_timer(timer);
506 * dpm_watchdog_clear - Disable suspend/resume watchdog.
507 * @wd: Watchdog to disable.
509 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
511 struct timer_list *timer = &wd->timer;
513 del_timer_sync(timer);
514 destroy_timer_on_stack(timer);
516 #else
517 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
518 #define dpm_watchdog_set(x, y)
519 #define dpm_watchdog_clear(x)
520 #endif
522 /*------------------------- Resume routines -------------------------*/
525 * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
526 * @dev: Target device.
528 * Make the core skip the "early resume" and "resume" phases for @dev.
530 * This function can be called by middle-layer code during the "noirq" phase of
531 * system resume if necessary, but not by device drivers.
533 void dev_pm_skip_next_resume_phases(struct device *dev)
535 dev->power.is_late_suspended = false;
536 dev->power.is_suspended = false;
540 * suspend_event - Return a "suspend" message for given "resume" one.
541 * @resume_msg: PM message representing a system-wide resume transition.
543 static pm_message_t suspend_event(pm_message_t resume_msg)
545 switch (resume_msg.event) {
546 case PM_EVENT_RESUME:
547 return PMSG_SUSPEND;
548 case PM_EVENT_THAW:
549 case PM_EVENT_RESTORE:
550 return PMSG_FREEZE;
551 case PM_EVENT_RECOVER:
552 return PMSG_HIBERNATE;
554 return PMSG_ON;
558 * dev_pm_may_skip_resume - System-wide device resume optimization check.
559 * @dev: Target device.
561 * Checks whether or not the device may be left in suspend after a system-wide
562 * transition to the working state.
564 bool dev_pm_may_skip_resume(struct device *dev)
566 return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
569 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
570 pm_message_t state,
571 const char **info_p)
573 pm_callback_t callback;
574 const char *info;
576 if (dev->pm_domain) {
577 info = "noirq power domain ";
578 callback = pm_noirq_op(&dev->pm_domain->ops, state);
579 } else if (dev->type && dev->type->pm) {
580 info = "noirq type ";
581 callback = pm_noirq_op(dev->type->pm, state);
582 } else if (dev->class && dev->class->pm) {
583 info = "noirq class ";
584 callback = pm_noirq_op(dev->class->pm, state);
585 } else if (dev->bus && dev->bus->pm) {
586 info = "noirq bus ";
587 callback = pm_noirq_op(dev->bus->pm, state);
588 } else {
589 return NULL;
592 if (info_p)
593 *info_p = info;
595 return callback;
598 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
599 pm_message_t state,
600 const char **info_p);
602 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
603 pm_message_t state,
604 const char **info_p);
607 * device_resume_noirq - Execute a "noirq resume" callback for given device.
608 * @dev: Device to handle.
609 * @state: PM transition of the system being carried out.
610 * @async: If true, the device is being resumed asynchronously.
612 * The driver of @dev will not receive interrupts while this function is being
613 * executed.
615 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
617 pm_callback_t callback;
618 const char *info;
619 bool skip_resume;
620 int error = 0;
622 TRACE_DEVICE(dev);
623 TRACE_RESUME(0);
625 if (dev->power.syscore || dev->power.direct_complete)
626 goto Out;
628 if (!dev->power.is_noirq_suspended)
629 goto Out;
631 dpm_wait_for_superior(dev, async);
633 skip_resume = dev_pm_may_skip_resume(dev);
635 callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
636 if (callback)
637 goto Run;
639 if (skip_resume)
640 goto Skip;
642 if (dev_pm_smart_suspend_and_suspended(dev)) {
643 pm_message_t suspend_msg = suspend_event(state);
646 * If "freeze" callbacks have been skipped during a transition
647 * related to hibernation, the subsequent "thaw" callbacks must
648 * be skipped too or bad things may happen. Otherwise, resume
649 * callbacks are going to be run for the device, so its runtime
650 * PM status must be changed to reflect the new state after the
651 * transition under way.
653 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
654 !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
655 if (state.event == PM_EVENT_THAW) {
656 skip_resume = true;
657 goto Skip;
658 } else {
659 pm_runtime_set_active(dev);
664 if (dev->driver && dev->driver->pm) {
665 info = "noirq driver ";
666 callback = pm_noirq_op(dev->driver->pm, state);
669 Run:
670 error = dpm_run_callback(callback, dev, state, info);
672 Skip:
673 dev->power.is_noirq_suspended = false;
675 if (skip_resume) {
677 * The device is going to be left in suspend, but it might not
678 * have been in runtime suspend before the system suspended, so
679 * its runtime PM status needs to be updated to avoid confusing
680 * the runtime PM framework when runtime PM is enabled for the
681 * device again.
683 pm_runtime_set_suspended(dev);
684 dev_pm_skip_next_resume_phases(dev);
687 Out:
688 complete_all(&dev->power.completion);
689 TRACE_RESUME(error);
690 return error;
693 static bool is_async(struct device *dev)
695 return dev->power.async_suspend && pm_async_enabled
696 && !pm_trace_is_enabled();
699 static void async_resume_noirq(void *data, async_cookie_t cookie)
701 struct device *dev = (struct device *)data;
702 int error;
704 error = device_resume_noirq(dev, pm_transition, true);
705 if (error)
706 pm_dev_err(dev, pm_transition, " async", error);
708 put_device(dev);
711 void dpm_noirq_resume_devices(pm_message_t state)
713 struct device *dev;
714 ktime_t starttime = ktime_get();
716 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
717 mutex_lock(&dpm_list_mtx);
718 pm_transition = state;
721 * Advanced the async threads upfront,
722 * in case the starting of async threads is
723 * delayed by non-async resuming devices.
725 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
726 reinit_completion(&dev->power.completion);
727 if (is_async(dev)) {
728 get_device(dev);
729 async_schedule(async_resume_noirq, dev);
733 while (!list_empty(&dpm_noirq_list)) {
734 dev = to_device(dpm_noirq_list.next);
735 get_device(dev);
736 list_move_tail(&dev->power.entry, &dpm_late_early_list);
737 mutex_unlock(&dpm_list_mtx);
739 if (!is_async(dev)) {
740 int error;
742 error = device_resume_noirq(dev, state, false);
743 if (error) {
744 suspend_stats.failed_resume_noirq++;
745 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
746 dpm_save_failed_dev(dev_name(dev));
747 pm_dev_err(dev, state, " noirq", error);
751 mutex_lock(&dpm_list_mtx);
752 put_device(dev);
754 mutex_unlock(&dpm_list_mtx);
755 async_synchronize_full();
756 dpm_show_time(starttime, state, 0, "noirq");
757 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
760 void dpm_noirq_end(void)
762 resume_device_irqs();
763 device_wakeup_disarm_wake_irqs();
764 cpuidle_resume();
768 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
769 * @state: PM transition of the system being carried out.
771 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
772 * allow device drivers' interrupt handlers to be called.
774 void dpm_resume_noirq(pm_message_t state)
776 dpm_noirq_resume_devices(state);
777 dpm_noirq_end();
780 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
781 pm_message_t state,
782 const char **info_p)
784 pm_callback_t callback;
785 const char *info;
787 if (dev->pm_domain) {
788 info = "early power domain ";
789 callback = pm_late_early_op(&dev->pm_domain->ops, state);
790 } else if (dev->type && dev->type->pm) {
791 info = "early type ";
792 callback = pm_late_early_op(dev->type->pm, state);
793 } else if (dev->class && dev->class->pm) {
794 info = "early class ";
795 callback = pm_late_early_op(dev->class->pm, state);
796 } else if (dev->bus && dev->bus->pm) {
797 info = "early bus ";
798 callback = pm_late_early_op(dev->bus->pm, state);
799 } else {
800 return NULL;
803 if (info_p)
804 *info_p = info;
806 return callback;
810 * device_resume_early - Execute an "early resume" callback for given device.
811 * @dev: Device to handle.
812 * @state: PM transition of the system being carried out.
813 * @async: If true, the device is being resumed asynchronously.
815 * Runtime PM is disabled for @dev while this function is being executed.
817 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
819 pm_callback_t callback;
820 const char *info;
821 int error = 0;
823 TRACE_DEVICE(dev);
824 TRACE_RESUME(0);
826 if (dev->power.syscore || dev->power.direct_complete)
827 goto Out;
829 if (!dev->power.is_late_suspended)
830 goto Out;
832 dpm_wait_for_superior(dev, async);
834 callback = dpm_subsys_resume_early_cb(dev, state, &info);
836 if (!callback && dev->driver && dev->driver->pm) {
837 info = "early driver ";
838 callback = pm_late_early_op(dev->driver->pm, state);
841 error = dpm_run_callback(callback, dev, state, info);
842 dev->power.is_late_suspended = false;
844 Out:
845 TRACE_RESUME(error);
847 pm_runtime_enable(dev);
848 complete_all(&dev->power.completion);
849 return error;
852 static void async_resume_early(void *data, async_cookie_t cookie)
854 struct device *dev = (struct device *)data;
855 int error;
857 error = device_resume_early(dev, pm_transition, true);
858 if (error)
859 pm_dev_err(dev, pm_transition, " async", error);
861 put_device(dev);
865 * dpm_resume_early - Execute "early resume" callbacks for all devices.
866 * @state: PM transition of the system being carried out.
868 void dpm_resume_early(pm_message_t state)
870 struct device *dev;
871 ktime_t starttime = ktime_get();
873 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
874 mutex_lock(&dpm_list_mtx);
875 pm_transition = state;
878 * Advanced the async threads upfront,
879 * in case the starting of async threads is
880 * delayed by non-async resuming devices.
882 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
883 reinit_completion(&dev->power.completion);
884 if (is_async(dev)) {
885 get_device(dev);
886 async_schedule(async_resume_early, dev);
890 while (!list_empty(&dpm_late_early_list)) {
891 dev = to_device(dpm_late_early_list.next);
892 get_device(dev);
893 list_move_tail(&dev->power.entry, &dpm_suspended_list);
894 mutex_unlock(&dpm_list_mtx);
896 if (!is_async(dev)) {
897 int error;
899 error = device_resume_early(dev, state, false);
900 if (error) {
901 suspend_stats.failed_resume_early++;
902 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
903 dpm_save_failed_dev(dev_name(dev));
904 pm_dev_err(dev, state, " early", error);
907 mutex_lock(&dpm_list_mtx);
908 put_device(dev);
910 mutex_unlock(&dpm_list_mtx);
911 async_synchronize_full();
912 dpm_show_time(starttime, state, 0, "early");
913 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
917 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
918 * @state: PM transition of the system being carried out.
920 void dpm_resume_start(pm_message_t state)
922 dpm_resume_noirq(state);
923 dpm_resume_early(state);
925 EXPORT_SYMBOL_GPL(dpm_resume_start);
928 * device_resume - Execute "resume" callbacks for given device.
929 * @dev: Device to handle.
930 * @state: PM transition of the system being carried out.
931 * @async: If true, the device is being resumed asynchronously.
933 static int device_resume(struct device *dev, pm_message_t state, bool async)
935 pm_callback_t callback = NULL;
936 const char *info = NULL;
937 int error = 0;
938 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
940 TRACE_DEVICE(dev);
941 TRACE_RESUME(0);
943 if (dev->power.syscore)
944 goto Complete;
946 if (dev->power.direct_complete) {
947 /* Match the pm_runtime_disable() in __device_suspend(). */
948 pm_runtime_enable(dev);
949 goto Complete;
952 dpm_wait_for_superior(dev, async);
953 dpm_watchdog_set(&wd, dev);
954 device_lock(dev);
957 * This is a fib. But we'll allow new children to be added below
958 * a resumed device, even if the device hasn't been completed yet.
960 dev->power.is_prepared = false;
962 if (!dev->power.is_suspended)
963 goto Unlock;
965 if (dev->pm_domain) {
966 info = "power domain ";
967 callback = pm_op(&dev->pm_domain->ops, state);
968 goto Driver;
971 if (dev->type && dev->type->pm) {
972 info = "type ";
973 callback = pm_op(dev->type->pm, state);
974 goto Driver;
977 if (dev->class && dev->class->pm) {
978 info = "class ";
979 callback = pm_op(dev->class->pm, state);
980 goto Driver;
983 if (dev->bus) {
984 if (dev->bus->pm) {
985 info = "bus ";
986 callback = pm_op(dev->bus->pm, state);
987 } else if (dev->bus->resume) {
988 info = "legacy bus ";
989 callback = dev->bus->resume;
990 goto End;
994 Driver:
995 if (!callback && dev->driver && dev->driver->pm) {
996 info = "driver ";
997 callback = pm_op(dev->driver->pm, state);
1000 End:
1001 error = dpm_run_callback(callback, dev, state, info);
1002 dev->power.is_suspended = false;
1004 Unlock:
1005 device_unlock(dev);
1006 dpm_watchdog_clear(&wd);
1008 Complete:
1009 complete_all(&dev->power.completion);
1011 TRACE_RESUME(error);
1013 return error;
1016 static void async_resume(void *data, async_cookie_t cookie)
1018 struct device *dev = (struct device *)data;
1019 int error;
1021 error = device_resume(dev, pm_transition, true);
1022 if (error)
1023 pm_dev_err(dev, pm_transition, " async", error);
1024 put_device(dev);
1028 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1029 * @state: PM transition of the system being carried out.
1031 * Execute the appropriate "resume" callback for all devices whose status
1032 * indicates that they are suspended.
1034 void dpm_resume(pm_message_t state)
1036 struct device *dev;
1037 ktime_t starttime = ktime_get();
1039 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1040 might_sleep();
1042 mutex_lock(&dpm_list_mtx);
1043 pm_transition = state;
1044 async_error = 0;
1046 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1047 reinit_completion(&dev->power.completion);
1048 if (is_async(dev)) {
1049 get_device(dev);
1050 async_schedule(async_resume, dev);
1054 while (!list_empty(&dpm_suspended_list)) {
1055 dev = to_device(dpm_suspended_list.next);
1056 get_device(dev);
1057 if (!is_async(dev)) {
1058 int error;
1060 mutex_unlock(&dpm_list_mtx);
1062 error = device_resume(dev, state, false);
1063 if (error) {
1064 suspend_stats.failed_resume++;
1065 dpm_save_failed_step(SUSPEND_RESUME);
1066 dpm_save_failed_dev(dev_name(dev));
1067 pm_dev_err(dev, state, "", error);
1070 mutex_lock(&dpm_list_mtx);
1072 if (!list_empty(&dev->power.entry))
1073 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1074 put_device(dev);
1076 mutex_unlock(&dpm_list_mtx);
1077 async_synchronize_full();
1078 dpm_show_time(starttime, state, 0, NULL);
1080 cpufreq_resume();
1081 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1085 * device_complete - Complete a PM transition for given device.
1086 * @dev: Device to handle.
1087 * @state: PM transition of the system being carried out.
1089 static void device_complete(struct device *dev, pm_message_t state)
1091 void (*callback)(struct device *) = NULL;
1092 const char *info = NULL;
1094 if (dev->power.syscore)
1095 return;
1097 device_lock(dev);
1099 if (dev->pm_domain) {
1100 info = "completing power domain ";
1101 callback = dev->pm_domain->ops.complete;
1102 } else if (dev->type && dev->type->pm) {
1103 info = "completing type ";
1104 callback = dev->type->pm->complete;
1105 } else if (dev->class && dev->class->pm) {
1106 info = "completing class ";
1107 callback = dev->class->pm->complete;
1108 } else if (dev->bus && dev->bus->pm) {
1109 info = "completing bus ";
1110 callback = dev->bus->pm->complete;
1113 if (!callback && dev->driver && dev->driver->pm) {
1114 info = "completing driver ";
1115 callback = dev->driver->pm->complete;
1118 if (callback) {
1119 pm_dev_dbg(dev, state, info);
1120 callback(dev);
1123 device_unlock(dev);
1125 pm_runtime_put(dev);
1129 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1130 * @state: PM transition of the system being carried out.
1132 * Execute the ->complete() callbacks for all devices whose PM status is not
1133 * DPM_ON (this allows new devices to be registered).
1135 void dpm_complete(pm_message_t state)
1137 struct list_head list;
1139 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1140 might_sleep();
1142 INIT_LIST_HEAD(&list);
1143 mutex_lock(&dpm_list_mtx);
1144 while (!list_empty(&dpm_prepared_list)) {
1145 struct device *dev = to_device(dpm_prepared_list.prev);
1147 get_device(dev);
1148 dev->power.is_prepared = false;
1149 list_move(&dev->power.entry, &list);
1150 mutex_unlock(&dpm_list_mtx);
1152 trace_device_pm_callback_start(dev, "", state.event);
1153 device_complete(dev, state);
1154 trace_device_pm_callback_end(dev, 0);
1156 mutex_lock(&dpm_list_mtx);
1157 put_device(dev);
1159 list_splice(&list, &dpm_list);
1160 mutex_unlock(&dpm_list_mtx);
1162 /* Allow device probing and trigger re-probing of deferred devices */
1163 device_unblock_probing();
1164 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1168 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1169 * @state: PM transition of the system being carried out.
1171 * Execute "resume" callbacks for all devices and complete the PM transition of
1172 * the system.
1174 void dpm_resume_end(pm_message_t state)
1176 dpm_resume(state);
1177 dpm_complete(state);
1179 EXPORT_SYMBOL_GPL(dpm_resume_end);
1182 /*------------------------- Suspend routines -------------------------*/
1185 * resume_event - Return a "resume" message for given "suspend" sleep state.
1186 * @sleep_state: PM message representing a sleep state.
1188 * Return a PM message representing the resume event corresponding to given
1189 * sleep state.
1191 static pm_message_t resume_event(pm_message_t sleep_state)
1193 switch (sleep_state.event) {
1194 case PM_EVENT_SUSPEND:
1195 return PMSG_RESUME;
1196 case PM_EVENT_FREEZE:
1197 case PM_EVENT_QUIESCE:
1198 return PMSG_RECOVER;
1199 case PM_EVENT_HIBERNATE:
1200 return PMSG_RESTORE;
1202 return PMSG_ON;
1205 static void dpm_superior_set_must_resume(struct device *dev)
1207 struct device_link *link;
1208 int idx;
1210 if (dev->parent)
1211 dev->parent->power.must_resume = true;
1213 idx = device_links_read_lock();
1215 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1216 link->supplier->power.must_resume = true;
1218 device_links_read_unlock(idx);
1221 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1222 pm_message_t state,
1223 const char **info_p)
1225 pm_callback_t callback;
1226 const char *info;
1228 if (dev->pm_domain) {
1229 info = "noirq power domain ";
1230 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1231 } else if (dev->type && dev->type->pm) {
1232 info = "noirq type ";
1233 callback = pm_noirq_op(dev->type->pm, state);
1234 } else if (dev->class && dev->class->pm) {
1235 info = "noirq class ";
1236 callback = pm_noirq_op(dev->class->pm, state);
1237 } else if (dev->bus && dev->bus->pm) {
1238 info = "noirq bus ";
1239 callback = pm_noirq_op(dev->bus->pm, state);
1240 } else {
1241 return NULL;
1244 if (info_p)
1245 *info_p = info;
1247 return callback;
1250 static bool device_must_resume(struct device *dev, pm_message_t state,
1251 bool no_subsys_suspend_noirq)
1253 pm_message_t resume_msg = resume_event(state);
1256 * If all of the device driver's "noirq", "late" and "early" callbacks
1257 * are invoked directly by the core, the decision to allow the device to
1258 * stay in suspend can be based on its current runtime PM status and its
1259 * wakeup settings.
1261 if (no_subsys_suspend_noirq &&
1262 !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1263 !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1264 !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1265 return !pm_runtime_status_suspended(dev) &&
1266 (resume_msg.event != PM_EVENT_RESUME ||
1267 (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1270 * The only safe strategy here is to require that if the device may not
1271 * be left in suspend, resume callbacks must be invoked for it.
1273 return !dev->power.may_skip_resume;
1277 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1278 * @dev: Device to handle.
1279 * @state: PM transition of the system being carried out.
1280 * @async: If true, the device is being suspended asynchronously.
1282 * The driver of @dev will not receive interrupts while this function is being
1283 * executed.
1285 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1287 pm_callback_t callback;
1288 const char *info;
1289 bool no_subsys_cb = false;
1290 int error = 0;
1292 TRACE_DEVICE(dev);
1293 TRACE_SUSPEND(0);
1295 dpm_wait_for_subordinate(dev, async);
1297 if (async_error)
1298 goto Complete;
1300 if (pm_wakeup_pending()) {
1301 async_error = -EBUSY;
1302 goto Complete;
1305 if (dev->power.syscore || dev->power.direct_complete)
1306 goto Complete;
1308 callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1309 if (callback)
1310 goto Run;
1312 no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1314 if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1315 goto Skip;
1317 if (dev->driver && dev->driver->pm) {
1318 info = "noirq driver ";
1319 callback = pm_noirq_op(dev->driver->pm, state);
1322 Run:
1323 error = dpm_run_callback(callback, dev, state, info);
1324 if (error) {
1325 async_error = error;
1326 goto Complete;
1329 Skip:
1330 dev->power.is_noirq_suspended = true;
1332 if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1333 dev->power.must_resume = dev->power.must_resume ||
1334 atomic_read(&dev->power.usage_count) > 1 ||
1335 device_must_resume(dev, state, no_subsys_cb);
1336 } else {
1337 dev->power.must_resume = true;
1340 if (dev->power.must_resume)
1341 dpm_superior_set_must_resume(dev);
1343 Complete:
1344 complete_all(&dev->power.completion);
1345 TRACE_SUSPEND(error);
1346 return error;
1349 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1351 struct device *dev = (struct device *)data;
1352 int error;
1354 error = __device_suspend_noirq(dev, pm_transition, true);
1355 if (error) {
1356 dpm_save_failed_dev(dev_name(dev));
1357 pm_dev_err(dev, pm_transition, " async", error);
1360 put_device(dev);
1363 static int device_suspend_noirq(struct device *dev)
1365 reinit_completion(&dev->power.completion);
1367 if (is_async(dev)) {
1368 get_device(dev);
1369 async_schedule(async_suspend_noirq, dev);
1370 return 0;
1372 return __device_suspend_noirq(dev, pm_transition, false);
1375 void dpm_noirq_begin(void)
1377 cpuidle_pause();
1378 device_wakeup_arm_wake_irqs();
1379 suspend_device_irqs();
1382 int dpm_noirq_suspend_devices(pm_message_t state)
1384 ktime_t starttime = ktime_get();
1385 int error = 0;
1387 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1388 mutex_lock(&dpm_list_mtx);
1389 pm_transition = state;
1390 async_error = 0;
1392 while (!list_empty(&dpm_late_early_list)) {
1393 struct device *dev = to_device(dpm_late_early_list.prev);
1395 get_device(dev);
1396 mutex_unlock(&dpm_list_mtx);
1398 error = device_suspend_noirq(dev);
1400 mutex_lock(&dpm_list_mtx);
1401 if (error) {
1402 pm_dev_err(dev, state, " noirq", error);
1403 dpm_save_failed_dev(dev_name(dev));
1404 put_device(dev);
1405 break;
1407 if (!list_empty(&dev->power.entry))
1408 list_move(&dev->power.entry, &dpm_noirq_list);
1409 put_device(dev);
1411 if (async_error)
1412 break;
1414 mutex_unlock(&dpm_list_mtx);
1415 async_synchronize_full();
1416 if (!error)
1417 error = async_error;
1419 if (error) {
1420 suspend_stats.failed_suspend_noirq++;
1421 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1423 dpm_show_time(starttime, state, error, "noirq");
1424 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1425 return error;
1429 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1430 * @state: PM transition of the system being carried out.
1432 * Prevent device drivers' interrupt handlers from being called and invoke
1433 * "noirq" suspend callbacks for all non-sysdev devices.
1435 int dpm_suspend_noirq(pm_message_t state)
1437 int ret;
1439 dpm_noirq_begin();
1440 ret = dpm_noirq_suspend_devices(state);
1441 if (ret)
1442 dpm_resume_noirq(resume_event(state));
1444 return ret;
1447 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1449 struct device *parent = dev->parent;
1451 if (!parent)
1452 return;
1454 spin_lock_irq(&parent->power.lock);
1456 if (dev->power.wakeup_path && !parent->power.ignore_children)
1457 parent->power.wakeup_path = true;
1459 spin_unlock_irq(&parent->power.lock);
1462 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1463 pm_message_t state,
1464 const char **info_p)
1466 pm_callback_t callback;
1467 const char *info;
1469 if (dev->pm_domain) {
1470 info = "late power domain ";
1471 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1472 } else if (dev->type && dev->type->pm) {
1473 info = "late type ";
1474 callback = pm_late_early_op(dev->type->pm, state);
1475 } else if (dev->class && dev->class->pm) {
1476 info = "late class ";
1477 callback = pm_late_early_op(dev->class->pm, state);
1478 } else if (dev->bus && dev->bus->pm) {
1479 info = "late bus ";
1480 callback = pm_late_early_op(dev->bus->pm, state);
1481 } else {
1482 return NULL;
1485 if (info_p)
1486 *info_p = info;
1488 return callback;
1492 * __device_suspend_late - Execute a "late suspend" callback for given device.
1493 * @dev: Device to handle.
1494 * @state: PM transition of the system being carried out.
1495 * @async: If true, the device is being suspended asynchronously.
1497 * Runtime PM is disabled for @dev while this function is being executed.
1499 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1501 pm_callback_t callback;
1502 const char *info;
1503 int error = 0;
1505 TRACE_DEVICE(dev);
1506 TRACE_SUSPEND(0);
1508 __pm_runtime_disable(dev, false);
1510 dpm_wait_for_subordinate(dev, async);
1512 if (async_error)
1513 goto Complete;
1515 if (pm_wakeup_pending()) {
1516 async_error = -EBUSY;
1517 goto Complete;
1520 if (dev->power.syscore || dev->power.direct_complete)
1521 goto Complete;
1523 callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1524 if (callback)
1525 goto Run;
1527 if (dev_pm_smart_suspend_and_suspended(dev) &&
1528 !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1529 goto Skip;
1531 if (dev->driver && dev->driver->pm) {
1532 info = "late driver ";
1533 callback = pm_late_early_op(dev->driver->pm, state);
1536 Run:
1537 error = dpm_run_callback(callback, dev, state, info);
1538 if (error) {
1539 async_error = error;
1540 goto Complete;
1542 dpm_propagate_wakeup_to_parent(dev);
1544 Skip:
1545 dev->power.is_late_suspended = true;
1547 Complete:
1548 TRACE_SUSPEND(error);
1549 complete_all(&dev->power.completion);
1550 return error;
1553 static void async_suspend_late(void *data, async_cookie_t cookie)
1555 struct device *dev = (struct device *)data;
1556 int error;
1558 error = __device_suspend_late(dev, pm_transition, true);
1559 if (error) {
1560 dpm_save_failed_dev(dev_name(dev));
1561 pm_dev_err(dev, pm_transition, " async", error);
1563 put_device(dev);
1566 static int device_suspend_late(struct device *dev)
1568 reinit_completion(&dev->power.completion);
1570 if (is_async(dev)) {
1571 get_device(dev);
1572 async_schedule(async_suspend_late, dev);
1573 return 0;
1576 return __device_suspend_late(dev, pm_transition, false);
1580 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1581 * @state: PM transition of the system being carried out.
1583 int dpm_suspend_late(pm_message_t state)
1585 ktime_t starttime = ktime_get();
1586 int error = 0;
1588 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1589 mutex_lock(&dpm_list_mtx);
1590 pm_transition = state;
1591 async_error = 0;
1593 while (!list_empty(&dpm_suspended_list)) {
1594 struct device *dev = to_device(dpm_suspended_list.prev);
1596 get_device(dev);
1597 mutex_unlock(&dpm_list_mtx);
1599 error = device_suspend_late(dev);
1601 mutex_lock(&dpm_list_mtx);
1602 if (!list_empty(&dev->power.entry))
1603 list_move(&dev->power.entry, &dpm_late_early_list);
1605 if (error) {
1606 pm_dev_err(dev, state, " late", error);
1607 dpm_save_failed_dev(dev_name(dev));
1608 put_device(dev);
1609 break;
1611 put_device(dev);
1613 if (async_error)
1614 break;
1616 mutex_unlock(&dpm_list_mtx);
1617 async_synchronize_full();
1618 if (!error)
1619 error = async_error;
1620 if (error) {
1621 suspend_stats.failed_suspend_late++;
1622 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1623 dpm_resume_early(resume_event(state));
1625 dpm_show_time(starttime, state, error, "late");
1626 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1627 return error;
1631 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1632 * @state: PM transition of the system being carried out.
1634 int dpm_suspend_end(pm_message_t state)
1636 int error = dpm_suspend_late(state);
1637 if (error)
1638 return error;
1640 error = dpm_suspend_noirq(state);
1641 if (error) {
1642 dpm_resume_early(resume_event(state));
1643 return error;
1646 return 0;
1648 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1651 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1652 * @dev: Device to suspend.
1653 * @state: PM transition of the system being carried out.
1654 * @cb: Suspend callback to execute.
1655 * @info: string description of caller.
1657 static int legacy_suspend(struct device *dev, pm_message_t state,
1658 int (*cb)(struct device *dev, pm_message_t state),
1659 const char *info)
1661 int error;
1662 ktime_t calltime;
1664 calltime = initcall_debug_start(dev, cb);
1666 trace_device_pm_callback_start(dev, info, state.event);
1667 error = cb(dev, state);
1668 trace_device_pm_callback_end(dev, error);
1669 suspend_report_result(cb, error);
1671 initcall_debug_report(dev, calltime, cb, error);
1673 return error;
1676 static void dpm_clear_superiors_direct_complete(struct device *dev)
1678 struct device_link *link;
1679 int idx;
1681 if (dev->parent) {
1682 spin_lock_irq(&dev->parent->power.lock);
1683 dev->parent->power.direct_complete = false;
1684 spin_unlock_irq(&dev->parent->power.lock);
1687 idx = device_links_read_lock();
1689 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1690 spin_lock_irq(&link->supplier->power.lock);
1691 link->supplier->power.direct_complete = false;
1692 spin_unlock_irq(&link->supplier->power.lock);
1695 device_links_read_unlock(idx);
1699 * __device_suspend - Execute "suspend" callbacks for given device.
1700 * @dev: Device to handle.
1701 * @state: PM transition of the system being carried out.
1702 * @async: If true, the device is being suspended asynchronously.
1704 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1706 pm_callback_t callback = NULL;
1707 const char *info = NULL;
1708 int error = 0;
1709 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1711 TRACE_DEVICE(dev);
1712 TRACE_SUSPEND(0);
1714 dpm_wait_for_subordinate(dev, async);
1716 if (async_error)
1717 goto Complete;
1720 * If a device configured to wake up the system from sleep states
1721 * has been suspended at run time and there's a resume request pending
1722 * for it, this is equivalent to the device signaling wakeup, so the
1723 * system suspend operation should be aborted.
1725 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1726 pm_wakeup_event(dev, 0);
1728 if (pm_wakeup_pending()) {
1729 async_error = -EBUSY;
1730 goto Complete;
1733 if (dev->power.syscore)
1734 goto Complete;
1736 if (dev->power.direct_complete) {
1737 if (pm_runtime_status_suspended(dev)) {
1738 pm_runtime_disable(dev);
1739 if (pm_runtime_status_suspended(dev))
1740 goto Complete;
1742 pm_runtime_enable(dev);
1744 dev->power.direct_complete = false;
1747 dev->power.may_skip_resume = false;
1748 dev->power.must_resume = false;
1750 dpm_watchdog_set(&wd, dev);
1751 device_lock(dev);
1753 if (dev->pm_domain) {
1754 info = "power domain ";
1755 callback = pm_op(&dev->pm_domain->ops, state);
1756 goto Run;
1759 if (dev->type && dev->type->pm) {
1760 info = "type ";
1761 callback = pm_op(dev->type->pm, state);
1762 goto Run;
1765 if (dev->class && dev->class->pm) {
1766 info = "class ";
1767 callback = pm_op(dev->class->pm, state);
1768 goto Run;
1771 if (dev->bus) {
1772 if (dev->bus->pm) {
1773 info = "bus ";
1774 callback = pm_op(dev->bus->pm, state);
1775 } else if (dev->bus->suspend) {
1776 pm_dev_dbg(dev, state, "legacy bus ");
1777 error = legacy_suspend(dev, state, dev->bus->suspend,
1778 "legacy bus ");
1779 goto End;
1783 Run:
1784 if (!callback && dev->driver && dev->driver->pm) {
1785 info = "driver ";
1786 callback = pm_op(dev->driver->pm, state);
1789 error = dpm_run_callback(callback, dev, state, info);
1791 End:
1792 if (!error) {
1793 dev->power.is_suspended = true;
1794 if (device_may_wakeup(dev))
1795 dev->power.wakeup_path = true;
1797 dpm_propagate_wakeup_to_parent(dev);
1798 dpm_clear_superiors_direct_complete(dev);
1801 device_unlock(dev);
1802 dpm_watchdog_clear(&wd);
1804 Complete:
1805 if (error)
1806 async_error = error;
1808 complete_all(&dev->power.completion);
1809 TRACE_SUSPEND(error);
1810 return error;
1813 static void async_suspend(void *data, async_cookie_t cookie)
1815 struct device *dev = (struct device *)data;
1816 int error;
1818 error = __device_suspend(dev, pm_transition, true);
1819 if (error) {
1820 dpm_save_failed_dev(dev_name(dev));
1821 pm_dev_err(dev, pm_transition, " async", error);
1824 put_device(dev);
1827 static int device_suspend(struct device *dev)
1829 reinit_completion(&dev->power.completion);
1831 if (is_async(dev)) {
1832 get_device(dev);
1833 async_schedule(async_suspend, dev);
1834 return 0;
1837 return __device_suspend(dev, pm_transition, false);
1841 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1842 * @state: PM transition of the system being carried out.
1844 int dpm_suspend(pm_message_t state)
1846 ktime_t starttime = ktime_get();
1847 int error = 0;
1849 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1850 might_sleep();
1852 cpufreq_suspend();
1854 mutex_lock(&dpm_list_mtx);
1855 pm_transition = state;
1856 async_error = 0;
1857 while (!list_empty(&dpm_prepared_list)) {
1858 struct device *dev = to_device(dpm_prepared_list.prev);
1860 get_device(dev);
1861 mutex_unlock(&dpm_list_mtx);
1863 error = device_suspend(dev);
1865 mutex_lock(&dpm_list_mtx);
1866 if (error) {
1867 pm_dev_err(dev, state, "", error);
1868 dpm_save_failed_dev(dev_name(dev));
1869 put_device(dev);
1870 break;
1872 if (!list_empty(&dev->power.entry))
1873 list_move(&dev->power.entry, &dpm_suspended_list);
1874 put_device(dev);
1875 if (async_error)
1876 break;
1878 mutex_unlock(&dpm_list_mtx);
1879 async_synchronize_full();
1880 if (!error)
1881 error = async_error;
1882 if (error) {
1883 suspend_stats.failed_suspend++;
1884 dpm_save_failed_step(SUSPEND_SUSPEND);
1886 dpm_show_time(starttime, state, error, NULL);
1887 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1888 return error;
1892 * device_prepare - Prepare a device for system power transition.
1893 * @dev: Device to handle.
1894 * @state: PM transition of the system being carried out.
1896 * Execute the ->prepare() callback(s) for given device. No new children of the
1897 * device may be registered after this function has returned.
1899 static int device_prepare(struct device *dev, pm_message_t state)
1901 int (*callback)(struct device *) = NULL;
1902 int ret = 0;
1904 if (dev->power.syscore)
1905 return 0;
1907 WARN_ON(!pm_runtime_enabled(dev) &&
1908 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1909 DPM_FLAG_LEAVE_SUSPENDED));
1912 * If a device's parent goes into runtime suspend at the wrong time,
1913 * it won't be possible to resume the device. To prevent this we
1914 * block runtime suspend here, during the prepare phase, and allow
1915 * it again during the complete phase.
1917 pm_runtime_get_noresume(dev);
1919 device_lock(dev);
1921 dev->power.wakeup_path = false;
1923 if (dev->power.no_pm_callbacks)
1924 goto unlock;
1926 if (dev->pm_domain)
1927 callback = dev->pm_domain->ops.prepare;
1928 else if (dev->type && dev->type->pm)
1929 callback = dev->type->pm->prepare;
1930 else if (dev->class && dev->class->pm)
1931 callback = dev->class->pm->prepare;
1932 else if (dev->bus && dev->bus->pm)
1933 callback = dev->bus->pm->prepare;
1935 if (!callback && dev->driver && dev->driver->pm)
1936 callback = dev->driver->pm->prepare;
1938 if (callback)
1939 ret = callback(dev);
1941 unlock:
1942 device_unlock(dev);
1944 if (ret < 0) {
1945 suspend_report_result(callback, ret);
1946 pm_runtime_put(dev);
1947 return ret;
1950 * A positive return value from ->prepare() means "this device appears
1951 * to be runtime-suspended and its state is fine, so if it really is
1952 * runtime-suspended, you can leave it in that state provided that you
1953 * will do the same thing with all of its descendants". This only
1954 * applies to suspend transitions, however.
1956 spin_lock_irq(&dev->power.lock);
1957 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1958 ((pm_runtime_suspended(dev) && ret > 0) ||
1959 dev->power.no_pm_callbacks) &&
1960 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1961 spin_unlock_irq(&dev->power.lock);
1962 return 0;
1966 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1967 * @state: PM transition of the system being carried out.
1969 * Execute the ->prepare() callback(s) for all devices.
1971 int dpm_prepare(pm_message_t state)
1973 int error = 0;
1975 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1976 might_sleep();
1979 * Give a chance for the known devices to complete their probes, before
1980 * disable probing of devices. This sync point is important at least
1981 * at boot time + hibernation restore.
1983 wait_for_device_probe();
1985 * It is unsafe if probing of devices will happen during suspend or
1986 * hibernation and system behavior will be unpredictable in this case.
1987 * So, let's prohibit device's probing here and defer their probes
1988 * instead. The normal behavior will be restored in dpm_complete().
1990 device_block_probing();
1992 mutex_lock(&dpm_list_mtx);
1993 while (!list_empty(&dpm_list)) {
1994 struct device *dev = to_device(dpm_list.next);
1996 get_device(dev);
1997 mutex_unlock(&dpm_list_mtx);
1999 trace_device_pm_callback_start(dev, "", state.event);
2000 error = device_prepare(dev, state);
2001 trace_device_pm_callback_end(dev, error);
2003 mutex_lock(&dpm_list_mtx);
2004 if (error) {
2005 if (error == -EAGAIN) {
2006 put_device(dev);
2007 error = 0;
2008 continue;
2010 printk(KERN_INFO "PM: Device %s not prepared "
2011 "for power transition: code %d\n",
2012 dev_name(dev), error);
2013 put_device(dev);
2014 break;
2016 dev->power.is_prepared = true;
2017 if (!list_empty(&dev->power.entry))
2018 list_move_tail(&dev->power.entry, &dpm_prepared_list);
2019 put_device(dev);
2021 mutex_unlock(&dpm_list_mtx);
2022 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2023 return error;
2027 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2028 * @state: PM transition of the system being carried out.
2030 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2031 * callbacks for them.
2033 int dpm_suspend_start(pm_message_t state)
2035 int error;
2037 error = dpm_prepare(state);
2038 if (error) {
2039 suspend_stats.failed_prepare++;
2040 dpm_save_failed_step(SUSPEND_PREPARE);
2041 } else
2042 error = dpm_suspend(state);
2043 return error;
2045 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2047 void __suspend_report_result(const char *function, void *fn, int ret)
2049 if (ret)
2050 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
2052 EXPORT_SYMBOL_GPL(__suspend_report_result);
2055 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2056 * @dev: Device to wait for.
2057 * @subordinate: Device that needs to wait for @dev.
2059 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2061 dpm_wait(dev, subordinate->power.async_suspend);
2062 return async_error;
2064 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2067 * dpm_for_each_dev - device iterator.
2068 * @data: data for the callback.
2069 * @fn: function to be called for each device.
2071 * Iterate over devices in dpm_list, and call @fn for each device,
2072 * passing it @data.
2074 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2076 struct device *dev;
2078 if (!fn)
2079 return;
2081 device_pm_lock();
2082 list_for_each_entry(dev, &dpm_list, power.entry)
2083 fn(dev, data);
2084 device_pm_unlock();
2086 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2088 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2090 if (!ops)
2091 return true;
2093 return !ops->prepare &&
2094 !ops->suspend &&
2095 !ops->suspend_late &&
2096 !ops->suspend_noirq &&
2097 !ops->resume_noirq &&
2098 !ops->resume_early &&
2099 !ops->resume &&
2100 !ops->complete;
2103 void device_pm_check_callbacks(struct device *dev)
2105 spin_lock_irq(&dev->power.lock);
2106 dev->power.no_pm_callbacks =
2107 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2108 !dev->bus->suspend && !dev->bus->resume)) &&
2109 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2110 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2111 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2112 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2113 !dev->driver->suspend && !dev->driver->resume));
2114 spin_unlock_irq(&dev->power.lock);
2117 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2119 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2120 pm_runtime_status_suspended(dev);