Merge branch 'v4l_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[zen-stable.git] / drivers / base / power / main.c
blobc3d2dfcf438dd1735fcf401be9f8185440e9a1a9
1 /*
2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
32 #include "../base.h"
33 #include "power.h"
36 * The entries in the dpm_list list are in a depth first order, simply
37 * because children are guaranteed to be discovered after parents, and
38 * are inserted at the back of the list on discovery.
40 * Since device_pm_add() may be called with a device lock held,
41 * we must never try to acquire a device lock while holding
42 * dpm_list_mutex.
45 LIST_HEAD(dpm_list);
46 LIST_HEAD(dpm_prepared_list);
47 LIST_HEAD(dpm_suspended_list);
48 LIST_HEAD(dpm_noirq_list);
50 struct suspend_stats suspend_stats;
51 static DEFINE_MUTEX(dpm_list_mtx);
52 static pm_message_t pm_transition;
54 static int async_error;
56 /**
57 * device_pm_init - Initialize the PM-related part of a device object.
58 * @dev: Device object being initialized.
60 void device_pm_init(struct device *dev)
62 dev->power.is_prepared = false;
63 dev->power.is_suspended = false;
64 init_completion(&dev->power.completion);
65 complete_all(&dev->power.completion);
66 dev->power.wakeup = NULL;
67 spin_lock_init(&dev->power.lock);
68 pm_runtime_init(dev);
69 INIT_LIST_HEAD(&dev->power.entry);
70 dev->power.power_state = PMSG_INVALID;
73 /**
74 * device_pm_lock - Lock the list of active devices used by the PM core.
76 void device_pm_lock(void)
78 mutex_lock(&dpm_list_mtx);
81 /**
82 * device_pm_unlock - Unlock the list of active devices used by the PM core.
84 void device_pm_unlock(void)
86 mutex_unlock(&dpm_list_mtx);
89 /**
90 * device_pm_add - Add a device to the PM core's list of active devices.
91 * @dev: Device to add to the list.
93 void device_pm_add(struct device *dev)
95 pr_debug("PM: Adding info for %s:%s\n",
96 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
97 mutex_lock(&dpm_list_mtx);
98 if (dev->parent && dev->parent->power.is_prepared)
99 dev_warn(dev, "parent %s should not be sleeping\n",
100 dev_name(dev->parent));
101 list_add_tail(&dev->power.entry, &dpm_list);
102 dev_pm_qos_constraints_init(dev);
103 mutex_unlock(&dpm_list_mtx);
107 * device_pm_remove - Remove a device from the PM core's list of active devices.
108 * @dev: Device to be removed from the list.
110 void device_pm_remove(struct device *dev)
112 pr_debug("PM: Removing info for %s:%s\n",
113 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114 complete_all(&dev->power.completion);
115 mutex_lock(&dpm_list_mtx);
116 dev_pm_qos_constraints_destroy(dev);
117 list_del_init(&dev->power.entry);
118 mutex_unlock(&dpm_list_mtx);
119 device_wakeup_disable(dev);
120 pm_runtime_remove(dev);
124 * device_pm_move_before - Move device in the PM core's list of active devices.
125 * @deva: Device to move in dpm_list.
126 * @devb: Device @deva should come before.
128 void device_pm_move_before(struct device *deva, struct device *devb)
130 pr_debug("PM: Moving %s:%s before %s:%s\n",
131 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
132 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
133 /* Delete deva from dpm_list and reinsert before devb. */
134 list_move_tail(&deva->power.entry, &devb->power.entry);
138 * device_pm_move_after - Move device in the PM core's list of active devices.
139 * @deva: Device to move in dpm_list.
140 * @devb: Device @deva should come after.
142 void device_pm_move_after(struct device *deva, struct device *devb)
144 pr_debug("PM: Moving %s:%s after %s:%s\n",
145 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
146 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
147 /* Delete deva from dpm_list and reinsert after devb. */
148 list_move(&deva->power.entry, &devb->power.entry);
152 * device_pm_move_last - Move device to end of the PM core's list of devices.
153 * @dev: Device to move in dpm_list.
155 void device_pm_move_last(struct device *dev)
157 pr_debug("PM: Moving %s:%s to end of list\n",
158 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159 list_move_tail(&dev->power.entry, &dpm_list);
162 static ktime_t initcall_debug_start(struct device *dev)
164 ktime_t calltime = ktime_set(0, 0);
166 if (initcall_debug) {
167 pr_info("calling %s+ @ %i\n",
168 dev_name(dev), task_pid_nr(current));
169 calltime = ktime_get();
172 return calltime;
175 static void initcall_debug_report(struct device *dev, ktime_t calltime,
176 int error)
178 ktime_t delta, rettime;
180 if (initcall_debug) {
181 rettime = ktime_get();
182 delta = ktime_sub(rettime, calltime);
183 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
184 error, (unsigned long long)ktime_to_ns(delta) >> 10);
189 * dpm_wait - Wait for a PM operation to complete.
190 * @dev: Device to wait for.
191 * @async: If unset, wait only if the device's power.async_suspend flag is set.
193 static void dpm_wait(struct device *dev, bool async)
195 if (!dev)
196 return;
198 if (async || (pm_async_enabled && dev->power.async_suspend))
199 wait_for_completion(&dev->power.completion);
202 static int dpm_wait_fn(struct device *dev, void *async_ptr)
204 dpm_wait(dev, *((bool *)async_ptr));
205 return 0;
208 static void dpm_wait_for_children(struct device *dev, bool async)
210 device_for_each_child(dev, &async, dpm_wait_fn);
214 * pm_op - Execute the PM operation appropriate for given PM event.
215 * @dev: Device to handle.
216 * @ops: PM operations to choose from.
217 * @state: PM transition of the system being carried out.
219 static int pm_op(struct device *dev,
220 const struct dev_pm_ops *ops,
221 pm_message_t state)
223 int error = 0;
224 ktime_t calltime;
226 calltime = initcall_debug_start(dev);
228 switch (state.event) {
229 #ifdef CONFIG_SUSPEND
230 case PM_EVENT_SUSPEND:
231 if (ops->suspend) {
232 error = ops->suspend(dev);
233 suspend_report_result(ops->suspend, error);
235 break;
236 case PM_EVENT_RESUME:
237 if (ops->resume) {
238 error = ops->resume(dev);
239 suspend_report_result(ops->resume, error);
241 break;
242 #endif /* CONFIG_SUSPEND */
243 #ifdef CONFIG_HIBERNATE_CALLBACKS
244 case PM_EVENT_FREEZE:
245 case PM_EVENT_QUIESCE:
246 if (ops->freeze) {
247 error = ops->freeze(dev);
248 suspend_report_result(ops->freeze, error);
250 break;
251 case PM_EVENT_HIBERNATE:
252 if (ops->poweroff) {
253 error = ops->poweroff(dev);
254 suspend_report_result(ops->poweroff, error);
256 break;
257 case PM_EVENT_THAW:
258 case PM_EVENT_RECOVER:
259 if (ops->thaw) {
260 error = ops->thaw(dev);
261 suspend_report_result(ops->thaw, error);
263 break;
264 case PM_EVENT_RESTORE:
265 if (ops->restore) {
266 error = ops->restore(dev);
267 suspend_report_result(ops->restore, error);
269 break;
270 #endif /* CONFIG_HIBERNATE_CALLBACKS */
271 default:
272 error = -EINVAL;
275 initcall_debug_report(dev, calltime, error);
277 return error;
281 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
282 * @dev: Device to handle.
283 * @ops: PM operations to choose from.
284 * @state: PM transition of the system being carried out.
286 * The driver of @dev will not receive interrupts while this function is being
287 * executed.
289 static int pm_noirq_op(struct device *dev,
290 const struct dev_pm_ops *ops,
291 pm_message_t state)
293 int error = 0;
294 ktime_t calltime = ktime_set(0, 0), delta, rettime;
296 if (initcall_debug) {
297 pr_info("calling %s+ @ %i, parent: %s\n",
298 dev_name(dev), task_pid_nr(current),
299 dev->parent ? dev_name(dev->parent) : "none");
300 calltime = ktime_get();
303 switch (state.event) {
304 #ifdef CONFIG_SUSPEND
305 case PM_EVENT_SUSPEND:
306 if (ops->suspend_noirq) {
307 error = ops->suspend_noirq(dev);
308 suspend_report_result(ops->suspend_noirq, error);
310 break;
311 case PM_EVENT_RESUME:
312 if (ops->resume_noirq) {
313 error = ops->resume_noirq(dev);
314 suspend_report_result(ops->resume_noirq, error);
316 break;
317 #endif /* CONFIG_SUSPEND */
318 #ifdef CONFIG_HIBERNATE_CALLBACKS
319 case PM_EVENT_FREEZE:
320 case PM_EVENT_QUIESCE:
321 if (ops->freeze_noirq) {
322 error = ops->freeze_noirq(dev);
323 suspend_report_result(ops->freeze_noirq, error);
325 break;
326 case PM_EVENT_HIBERNATE:
327 if (ops->poweroff_noirq) {
328 error = ops->poweroff_noirq(dev);
329 suspend_report_result(ops->poweroff_noirq, error);
331 break;
332 case PM_EVENT_THAW:
333 case PM_EVENT_RECOVER:
334 if (ops->thaw_noirq) {
335 error = ops->thaw_noirq(dev);
336 suspend_report_result(ops->thaw_noirq, error);
338 break;
339 case PM_EVENT_RESTORE:
340 if (ops->restore_noirq) {
341 error = ops->restore_noirq(dev);
342 suspend_report_result(ops->restore_noirq, error);
344 break;
345 #endif /* CONFIG_HIBERNATE_CALLBACKS */
346 default:
347 error = -EINVAL;
350 if (initcall_debug) {
351 rettime = ktime_get();
352 delta = ktime_sub(rettime, calltime);
353 printk("initcall %s_i+ returned %d after %Ld usecs\n",
354 dev_name(dev), error,
355 (unsigned long long)ktime_to_ns(delta) >> 10);
358 return error;
361 static char *pm_verb(int event)
363 switch (event) {
364 case PM_EVENT_SUSPEND:
365 return "suspend";
366 case PM_EVENT_RESUME:
367 return "resume";
368 case PM_EVENT_FREEZE:
369 return "freeze";
370 case PM_EVENT_QUIESCE:
371 return "quiesce";
372 case PM_EVENT_HIBERNATE:
373 return "hibernate";
374 case PM_EVENT_THAW:
375 return "thaw";
376 case PM_EVENT_RESTORE:
377 return "restore";
378 case PM_EVENT_RECOVER:
379 return "recover";
380 default:
381 return "(unknown PM event)";
385 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
387 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
388 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
389 ", may wakeup" : "");
392 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
393 int error)
395 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
396 dev_name(dev), pm_verb(state.event), info, error);
399 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
401 ktime_t calltime;
402 u64 usecs64;
403 int usecs;
405 calltime = ktime_get();
406 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
407 do_div(usecs64, NSEC_PER_USEC);
408 usecs = usecs64;
409 if (usecs == 0)
410 usecs = 1;
411 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
412 info ?: "", info ? " " : "", pm_verb(state.event),
413 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
416 /*------------------------- Resume routines -------------------------*/
419 * device_resume_noirq - Execute an "early resume" callback for given device.
420 * @dev: Device to handle.
421 * @state: PM transition of the system being carried out.
423 * The driver of @dev will not receive interrupts while this function is being
424 * executed.
426 static int device_resume_noirq(struct device *dev, pm_message_t state)
428 int error = 0;
430 TRACE_DEVICE(dev);
431 TRACE_RESUME(0);
433 if (dev->pm_domain) {
434 pm_dev_dbg(dev, state, "EARLY power domain ");
435 error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
436 } else if (dev->type && dev->type->pm) {
437 pm_dev_dbg(dev, state, "EARLY type ");
438 error = pm_noirq_op(dev, dev->type->pm, state);
439 } else if (dev->class && dev->class->pm) {
440 pm_dev_dbg(dev, state, "EARLY class ");
441 error = pm_noirq_op(dev, dev->class->pm, state);
442 } else if (dev->bus && dev->bus->pm) {
443 pm_dev_dbg(dev, state, "EARLY ");
444 error = pm_noirq_op(dev, dev->bus->pm, state);
447 TRACE_RESUME(error);
448 return error;
452 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
453 * @state: PM transition of the system being carried out.
455 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
456 * enable device drivers to receive interrupts.
458 void dpm_resume_noirq(pm_message_t state)
460 ktime_t starttime = ktime_get();
462 mutex_lock(&dpm_list_mtx);
463 while (!list_empty(&dpm_noirq_list)) {
464 struct device *dev = to_device(dpm_noirq_list.next);
465 int error;
467 get_device(dev);
468 list_move_tail(&dev->power.entry, &dpm_suspended_list);
469 mutex_unlock(&dpm_list_mtx);
471 error = device_resume_noirq(dev, state);
472 if (error) {
473 suspend_stats.failed_resume_noirq++;
474 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
475 dpm_save_failed_dev(dev_name(dev));
476 pm_dev_err(dev, state, " early", error);
479 mutex_lock(&dpm_list_mtx);
480 put_device(dev);
482 mutex_unlock(&dpm_list_mtx);
483 dpm_show_time(starttime, state, "early");
484 resume_device_irqs();
486 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
489 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
490 * @dev: Device to resume.
491 * @cb: Resume callback to execute.
493 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
495 int error;
496 ktime_t calltime;
498 calltime = initcall_debug_start(dev);
500 error = cb(dev);
501 suspend_report_result(cb, error);
503 initcall_debug_report(dev, calltime, error);
505 return error;
509 * device_resume - Execute "resume" callbacks for given device.
510 * @dev: Device to handle.
511 * @state: PM transition of the system being carried out.
512 * @async: If true, the device is being resumed asynchronously.
514 static int device_resume(struct device *dev, pm_message_t state, bool async)
516 int error = 0;
517 bool put = false;
519 TRACE_DEVICE(dev);
520 TRACE_RESUME(0);
522 dpm_wait(dev->parent, async);
523 device_lock(dev);
526 * This is a fib. But we'll allow new children to be added below
527 * a resumed device, even if the device hasn't been completed yet.
529 dev->power.is_prepared = false;
531 if (!dev->power.is_suspended)
532 goto Unlock;
534 pm_runtime_enable(dev);
535 put = true;
537 if (dev->pm_domain) {
538 pm_dev_dbg(dev, state, "power domain ");
539 error = pm_op(dev, &dev->pm_domain->ops, state);
540 goto End;
543 if (dev->type && dev->type->pm) {
544 pm_dev_dbg(dev, state, "type ");
545 error = pm_op(dev, dev->type->pm, state);
546 goto End;
549 if (dev->class) {
550 if (dev->class->pm) {
551 pm_dev_dbg(dev, state, "class ");
552 error = pm_op(dev, dev->class->pm, state);
553 goto End;
554 } else if (dev->class->resume) {
555 pm_dev_dbg(dev, state, "legacy class ");
556 error = legacy_resume(dev, dev->class->resume);
557 goto End;
561 if (dev->bus) {
562 if (dev->bus->pm) {
563 pm_dev_dbg(dev, state, "");
564 error = pm_op(dev, dev->bus->pm, state);
565 } else if (dev->bus->resume) {
566 pm_dev_dbg(dev, state, "legacy ");
567 error = legacy_resume(dev, dev->bus->resume);
571 End:
572 dev->power.is_suspended = false;
574 Unlock:
575 device_unlock(dev);
576 complete_all(&dev->power.completion);
578 TRACE_RESUME(error);
580 if (put)
581 pm_runtime_put_sync(dev);
583 return error;
586 static void async_resume(void *data, async_cookie_t cookie)
588 struct device *dev = (struct device *)data;
589 int error;
591 error = device_resume(dev, pm_transition, true);
592 if (error)
593 pm_dev_err(dev, pm_transition, " async", error);
594 put_device(dev);
597 static bool is_async(struct device *dev)
599 return dev->power.async_suspend && pm_async_enabled
600 && !pm_trace_is_enabled();
604 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
605 * @state: PM transition of the system being carried out.
607 * Execute the appropriate "resume" callback for all devices whose status
608 * indicates that they are suspended.
610 void dpm_resume(pm_message_t state)
612 struct device *dev;
613 ktime_t starttime = ktime_get();
615 might_sleep();
617 mutex_lock(&dpm_list_mtx);
618 pm_transition = state;
619 async_error = 0;
621 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
622 INIT_COMPLETION(dev->power.completion);
623 if (is_async(dev)) {
624 get_device(dev);
625 async_schedule(async_resume, dev);
629 while (!list_empty(&dpm_suspended_list)) {
630 dev = to_device(dpm_suspended_list.next);
631 get_device(dev);
632 if (!is_async(dev)) {
633 int error;
635 mutex_unlock(&dpm_list_mtx);
637 error = device_resume(dev, state, false);
638 if (error) {
639 suspend_stats.failed_resume++;
640 dpm_save_failed_step(SUSPEND_RESUME);
641 dpm_save_failed_dev(dev_name(dev));
642 pm_dev_err(dev, state, "", error);
645 mutex_lock(&dpm_list_mtx);
647 if (!list_empty(&dev->power.entry))
648 list_move_tail(&dev->power.entry, &dpm_prepared_list);
649 put_device(dev);
651 mutex_unlock(&dpm_list_mtx);
652 async_synchronize_full();
653 dpm_show_time(starttime, state, NULL);
657 * device_complete - Complete a PM transition for given device.
658 * @dev: Device to handle.
659 * @state: PM transition of the system being carried out.
661 static void device_complete(struct device *dev, pm_message_t state)
663 device_lock(dev);
665 if (dev->pm_domain) {
666 pm_dev_dbg(dev, state, "completing power domain ");
667 if (dev->pm_domain->ops.complete)
668 dev->pm_domain->ops.complete(dev);
669 } else if (dev->type && dev->type->pm) {
670 pm_dev_dbg(dev, state, "completing type ");
671 if (dev->type->pm->complete)
672 dev->type->pm->complete(dev);
673 } else if (dev->class && dev->class->pm) {
674 pm_dev_dbg(dev, state, "completing class ");
675 if (dev->class->pm->complete)
676 dev->class->pm->complete(dev);
677 } else if (dev->bus && dev->bus->pm) {
678 pm_dev_dbg(dev, state, "completing ");
679 if (dev->bus->pm->complete)
680 dev->bus->pm->complete(dev);
683 device_unlock(dev);
687 * dpm_complete - Complete a PM transition for all non-sysdev devices.
688 * @state: PM transition of the system being carried out.
690 * Execute the ->complete() callbacks for all devices whose PM status is not
691 * DPM_ON (this allows new devices to be registered).
693 void dpm_complete(pm_message_t state)
695 struct list_head list;
697 might_sleep();
699 INIT_LIST_HEAD(&list);
700 mutex_lock(&dpm_list_mtx);
701 while (!list_empty(&dpm_prepared_list)) {
702 struct device *dev = to_device(dpm_prepared_list.prev);
704 get_device(dev);
705 dev->power.is_prepared = false;
706 list_move(&dev->power.entry, &list);
707 mutex_unlock(&dpm_list_mtx);
709 device_complete(dev, state);
711 mutex_lock(&dpm_list_mtx);
712 put_device(dev);
714 list_splice(&list, &dpm_list);
715 mutex_unlock(&dpm_list_mtx);
719 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
720 * @state: PM transition of the system being carried out.
722 * Execute "resume" callbacks for all devices and complete the PM transition of
723 * the system.
725 void dpm_resume_end(pm_message_t state)
727 dpm_resume(state);
728 dpm_complete(state);
730 EXPORT_SYMBOL_GPL(dpm_resume_end);
733 /*------------------------- Suspend routines -------------------------*/
736 * resume_event - Return a "resume" message for given "suspend" sleep state.
737 * @sleep_state: PM message representing a sleep state.
739 * Return a PM message representing the resume event corresponding to given
740 * sleep state.
742 static pm_message_t resume_event(pm_message_t sleep_state)
744 switch (sleep_state.event) {
745 case PM_EVENT_SUSPEND:
746 return PMSG_RESUME;
747 case PM_EVENT_FREEZE:
748 case PM_EVENT_QUIESCE:
749 return PMSG_RECOVER;
750 case PM_EVENT_HIBERNATE:
751 return PMSG_RESTORE;
753 return PMSG_ON;
757 * device_suspend_noirq - Execute a "late suspend" callback for given device.
758 * @dev: Device to handle.
759 * @state: PM transition of the system being carried out.
761 * The driver of @dev will not receive interrupts while this function is being
762 * executed.
764 static int device_suspend_noirq(struct device *dev, pm_message_t state)
766 int error;
768 if (dev->pm_domain) {
769 pm_dev_dbg(dev, state, "LATE power domain ");
770 error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
771 if (error)
772 return error;
773 } else if (dev->type && dev->type->pm) {
774 pm_dev_dbg(dev, state, "LATE type ");
775 error = pm_noirq_op(dev, dev->type->pm, state);
776 if (error)
777 return error;
778 } else if (dev->class && dev->class->pm) {
779 pm_dev_dbg(dev, state, "LATE class ");
780 error = pm_noirq_op(dev, dev->class->pm, state);
781 if (error)
782 return error;
783 } else if (dev->bus && dev->bus->pm) {
784 pm_dev_dbg(dev, state, "LATE ");
785 error = pm_noirq_op(dev, dev->bus->pm, state);
786 if (error)
787 return error;
790 return 0;
794 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
795 * @state: PM transition of the system being carried out.
797 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
798 * handlers for all non-sysdev devices.
800 int dpm_suspend_noirq(pm_message_t state)
802 ktime_t starttime = ktime_get();
803 int error = 0;
805 suspend_device_irqs();
806 mutex_lock(&dpm_list_mtx);
807 while (!list_empty(&dpm_suspended_list)) {
808 struct device *dev = to_device(dpm_suspended_list.prev);
810 get_device(dev);
811 mutex_unlock(&dpm_list_mtx);
813 error = device_suspend_noirq(dev, state);
815 mutex_lock(&dpm_list_mtx);
816 if (error) {
817 pm_dev_err(dev, state, " late", error);
818 suspend_stats.failed_suspend_noirq++;
819 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
820 dpm_save_failed_dev(dev_name(dev));
821 put_device(dev);
822 break;
824 if (!list_empty(&dev->power.entry))
825 list_move(&dev->power.entry, &dpm_noirq_list);
826 put_device(dev);
828 mutex_unlock(&dpm_list_mtx);
829 if (error)
830 dpm_resume_noirq(resume_event(state));
831 else
832 dpm_show_time(starttime, state, "late");
833 return error;
835 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
838 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
839 * @dev: Device to suspend.
840 * @state: PM transition of the system being carried out.
841 * @cb: Suspend callback to execute.
843 static int legacy_suspend(struct device *dev, pm_message_t state,
844 int (*cb)(struct device *dev, pm_message_t state))
846 int error;
847 ktime_t calltime;
849 calltime = initcall_debug_start(dev);
851 error = cb(dev, state);
852 suspend_report_result(cb, error);
854 initcall_debug_report(dev, calltime, error);
856 return error;
860 * device_suspend - Execute "suspend" callbacks for given device.
861 * @dev: Device to handle.
862 * @state: PM transition of the system being carried out.
863 * @async: If true, the device is being suspended asynchronously.
865 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
867 int error = 0;
869 dpm_wait_for_children(dev, async);
871 if (async_error)
872 return 0;
874 pm_runtime_get_noresume(dev);
875 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
876 pm_wakeup_event(dev, 0);
878 if (pm_wakeup_pending()) {
879 pm_runtime_put_sync(dev);
880 async_error = -EBUSY;
881 return 0;
884 device_lock(dev);
886 if (dev->pm_domain) {
887 pm_dev_dbg(dev, state, "power domain ");
888 error = pm_op(dev, &dev->pm_domain->ops, state);
889 goto End;
892 if (dev->type && dev->type->pm) {
893 pm_dev_dbg(dev, state, "type ");
894 error = pm_op(dev, dev->type->pm, state);
895 goto End;
898 if (dev->class) {
899 if (dev->class->pm) {
900 pm_dev_dbg(dev, state, "class ");
901 error = pm_op(dev, dev->class->pm, state);
902 goto End;
903 } else if (dev->class->suspend) {
904 pm_dev_dbg(dev, state, "legacy class ");
905 error = legacy_suspend(dev, state, dev->class->suspend);
906 goto End;
910 if (dev->bus) {
911 if (dev->bus->pm) {
912 pm_dev_dbg(dev, state, "");
913 error = pm_op(dev, dev->bus->pm, state);
914 } else if (dev->bus->suspend) {
915 pm_dev_dbg(dev, state, "legacy ");
916 error = legacy_suspend(dev, state, dev->bus->suspend);
920 End:
921 if (!error) {
922 dev->power.is_suspended = true;
923 if (dev->power.wakeup_path
924 && dev->parent && !dev->parent->power.ignore_children)
925 dev->parent->power.wakeup_path = true;
928 device_unlock(dev);
929 complete_all(&dev->power.completion);
931 if (error) {
932 pm_runtime_put_sync(dev);
933 async_error = error;
934 } else if (dev->power.is_suspended) {
935 __pm_runtime_disable(dev, false);
938 return error;
941 static void async_suspend(void *data, async_cookie_t cookie)
943 struct device *dev = (struct device *)data;
944 int error;
946 error = __device_suspend(dev, pm_transition, true);
947 if (error) {
948 dpm_save_failed_dev(dev_name(dev));
949 pm_dev_err(dev, pm_transition, " async", error);
952 put_device(dev);
955 static int device_suspend(struct device *dev)
957 INIT_COMPLETION(dev->power.completion);
959 if (pm_async_enabled && dev->power.async_suspend) {
960 get_device(dev);
961 async_schedule(async_suspend, dev);
962 return 0;
965 return __device_suspend(dev, pm_transition, false);
969 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
970 * @state: PM transition of the system being carried out.
972 int dpm_suspend(pm_message_t state)
974 ktime_t starttime = ktime_get();
975 int error = 0;
977 might_sleep();
979 mutex_lock(&dpm_list_mtx);
980 pm_transition = state;
981 async_error = 0;
982 while (!list_empty(&dpm_prepared_list)) {
983 struct device *dev = to_device(dpm_prepared_list.prev);
985 get_device(dev);
986 mutex_unlock(&dpm_list_mtx);
988 error = device_suspend(dev);
990 mutex_lock(&dpm_list_mtx);
991 if (error) {
992 pm_dev_err(dev, state, "", error);
993 dpm_save_failed_dev(dev_name(dev));
994 put_device(dev);
995 break;
997 if (!list_empty(&dev->power.entry))
998 list_move(&dev->power.entry, &dpm_suspended_list);
999 put_device(dev);
1000 if (async_error)
1001 break;
1003 mutex_unlock(&dpm_list_mtx);
1004 async_synchronize_full();
1005 if (!error)
1006 error = async_error;
1007 if (error) {
1008 suspend_stats.failed_suspend++;
1009 dpm_save_failed_step(SUSPEND_SUSPEND);
1010 } else
1011 dpm_show_time(starttime, state, NULL);
1012 return error;
1016 * device_prepare - Prepare a device for system power transition.
1017 * @dev: Device to handle.
1018 * @state: PM transition of the system being carried out.
1020 * Execute the ->prepare() callback(s) for given device. No new children of the
1021 * device may be registered after this function has returned.
1023 static int device_prepare(struct device *dev, pm_message_t state)
1025 int error = 0;
1027 device_lock(dev);
1029 dev->power.wakeup_path = device_may_wakeup(dev);
1031 if (dev->pm_domain) {
1032 pm_dev_dbg(dev, state, "preparing power domain ");
1033 if (dev->pm_domain->ops.prepare)
1034 error = dev->pm_domain->ops.prepare(dev);
1035 suspend_report_result(dev->pm_domain->ops.prepare, error);
1036 if (error)
1037 goto End;
1038 } else if (dev->type && dev->type->pm) {
1039 pm_dev_dbg(dev, state, "preparing type ");
1040 if (dev->type->pm->prepare)
1041 error = dev->type->pm->prepare(dev);
1042 suspend_report_result(dev->type->pm->prepare, error);
1043 if (error)
1044 goto End;
1045 } else if (dev->class && dev->class->pm) {
1046 pm_dev_dbg(dev, state, "preparing class ");
1047 if (dev->class->pm->prepare)
1048 error = dev->class->pm->prepare(dev);
1049 suspend_report_result(dev->class->pm->prepare, error);
1050 if (error)
1051 goto End;
1052 } else if (dev->bus && dev->bus->pm) {
1053 pm_dev_dbg(dev, state, "preparing ");
1054 if (dev->bus->pm->prepare)
1055 error = dev->bus->pm->prepare(dev);
1056 suspend_report_result(dev->bus->pm->prepare, error);
1059 End:
1060 device_unlock(dev);
1062 return error;
1066 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1067 * @state: PM transition of the system being carried out.
1069 * Execute the ->prepare() callback(s) for all devices.
1071 int dpm_prepare(pm_message_t state)
1073 int error = 0;
1075 might_sleep();
1077 mutex_lock(&dpm_list_mtx);
1078 while (!list_empty(&dpm_list)) {
1079 struct device *dev = to_device(dpm_list.next);
1081 get_device(dev);
1082 mutex_unlock(&dpm_list_mtx);
1084 error = device_prepare(dev, state);
1086 mutex_lock(&dpm_list_mtx);
1087 if (error) {
1088 if (error == -EAGAIN) {
1089 put_device(dev);
1090 error = 0;
1091 continue;
1093 printk(KERN_INFO "PM: Device %s not prepared "
1094 "for power transition: code %d\n",
1095 dev_name(dev), error);
1096 put_device(dev);
1097 break;
1099 dev->power.is_prepared = true;
1100 if (!list_empty(&dev->power.entry))
1101 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1102 put_device(dev);
1104 mutex_unlock(&dpm_list_mtx);
1105 return error;
1109 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1110 * @state: PM transition of the system being carried out.
1112 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1113 * callbacks for them.
1115 int dpm_suspend_start(pm_message_t state)
1117 int error;
1119 error = dpm_prepare(state);
1120 if (error) {
1121 suspend_stats.failed_prepare++;
1122 dpm_save_failed_step(SUSPEND_PREPARE);
1123 } else
1124 error = dpm_suspend(state);
1125 return error;
1127 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1129 void __suspend_report_result(const char *function, void *fn, int ret)
1131 if (ret)
1132 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1134 EXPORT_SYMBOL_GPL(__suspend_report_result);
1137 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1138 * @dev: Device to wait for.
1139 * @subordinate: Device that needs to wait for @dev.
1141 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1143 dpm_wait(dev, subordinate->power.async_suspend);
1144 return async_error;
1146 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);