x86/xen: resume timer irqs early
[linux/fpc-iii.git] / drivers / base / power / main.c
blob9f098a82cf04b061edb203fff718ef0ca70e6328
1 /*
2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <trace/events/power.h>
32 #include <linux/cpuidle.h>
33 #include "../base.h"
34 #include "power.h"
36 typedef int (*pm_callback_t)(struct device *);
39 * The entries in the dpm_list list are in a depth first order, simply
40 * because children are guaranteed to be discovered after parents, and
41 * are inserted at the back of the list on discovery.
43 * Since device_pm_add() may be called with a device lock held,
44 * we must never try to acquire a device lock while holding
45 * dpm_list_mutex.
48 LIST_HEAD(dpm_list);
49 static LIST_HEAD(dpm_prepared_list);
50 static LIST_HEAD(dpm_suspended_list);
51 static LIST_HEAD(dpm_late_early_list);
52 static LIST_HEAD(dpm_noirq_list);
54 struct suspend_stats suspend_stats;
55 static DEFINE_MUTEX(dpm_list_mtx);
56 static pm_message_t pm_transition;
58 static int async_error;
60 static char *pm_verb(int event)
62 switch (event) {
63 case PM_EVENT_SUSPEND:
64 return "suspend";
65 case PM_EVENT_RESUME:
66 return "resume";
67 case PM_EVENT_FREEZE:
68 return "freeze";
69 case PM_EVENT_QUIESCE:
70 return "quiesce";
71 case PM_EVENT_HIBERNATE:
72 return "hibernate";
73 case PM_EVENT_THAW:
74 return "thaw";
75 case PM_EVENT_RESTORE:
76 return "restore";
77 case PM_EVENT_RECOVER:
78 return "recover";
79 default:
80 return "(unknown PM event)";
84 /**
85 * device_pm_sleep_init - Initialize system suspend-related device fields.
86 * @dev: Device object being initialized.
88 void device_pm_sleep_init(struct device *dev)
90 dev->power.is_prepared = false;
91 dev->power.is_suspended = false;
92 init_completion(&dev->power.completion);
93 complete_all(&dev->power.completion);
94 dev->power.wakeup = NULL;
95 INIT_LIST_HEAD(&dev->power.entry);
98 /**
99 * device_pm_lock - Lock the list of active devices used by the PM core.
101 void device_pm_lock(void)
103 mutex_lock(&dpm_list_mtx);
107 * device_pm_unlock - Unlock the list of active devices used by the PM core.
109 void device_pm_unlock(void)
111 mutex_unlock(&dpm_list_mtx);
115 * device_pm_add - Add a device to the PM core's list of active devices.
116 * @dev: Device to add to the list.
118 void device_pm_add(struct device *dev)
120 pr_debug("PM: Adding info for %s:%s\n",
121 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
122 mutex_lock(&dpm_list_mtx);
123 if (dev->parent && dev->parent->power.is_prepared)
124 dev_warn(dev, "parent %s should not be sleeping\n",
125 dev_name(dev->parent));
126 list_add_tail(&dev->power.entry, &dpm_list);
127 mutex_unlock(&dpm_list_mtx);
131 * device_pm_remove - Remove a device from the PM core's list of active devices.
132 * @dev: Device to be removed from the list.
134 void device_pm_remove(struct device *dev)
136 pr_debug("PM: Removing info for %s:%s\n",
137 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
138 complete_all(&dev->power.completion);
139 mutex_lock(&dpm_list_mtx);
140 list_del_init(&dev->power.entry);
141 mutex_unlock(&dpm_list_mtx);
142 device_wakeup_disable(dev);
143 pm_runtime_remove(dev);
147 * device_pm_move_before - Move device in the PM core's list of active devices.
148 * @deva: Device to move in dpm_list.
149 * @devb: Device @deva should come before.
151 void device_pm_move_before(struct device *deva, struct device *devb)
153 pr_debug("PM: Moving %s:%s before %s:%s\n",
154 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
155 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
156 /* Delete deva from dpm_list and reinsert before devb. */
157 list_move_tail(&deva->power.entry, &devb->power.entry);
161 * device_pm_move_after - Move device in the PM core's list of active devices.
162 * @deva: Device to move in dpm_list.
163 * @devb: Device @deva should come after.
165 void device_pm_move_after(struct device *deva, struct device *devb)
167 pr_debug("PM: Moving %s:%s after %s:%s\n",
168 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
169 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
170 /* Delete deva from dpm_list and reinsert after devb. */
171 list_move(&deva->power.entry, &devb->power.entry);
175 * device_pm_move_last - Move device to end of the PM core's list of devices.
176 * @dev: Device to move in dpm_list.
178 void device_pm_move_last(struct device *dev)
180 pr_debug("PM: Moving %s:%s to end of list\n",
181 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
182 list_move_tail(&dev->power.entry, &dpm_list);
185 static ktime_t initcall_debug_start(struct device *dev)
187 ktime_t calltime = ktime_set(0, 0);
189 if (pm_print_times_enabled) {
190 pr_info("calling %s+ @ %i, parent: %s\n",
191 dev_name(dev), task_pid_nr(current),
192 dev->parent ? dev_name(dev->parent) : "none");
193 calltime = ktime_get();
196 return calltime;
199 static void initcall_debug_report(struct device *dev, ktime_t calltime,
200 int error, pm_message_t state, char *info)
202 ktime_t rettime;
203 s64 nsecs;
205 rettime = ktime_get();
206 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
208 if (pm_print_times_enabled) {
209 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
210 error, (unsigned long long)nsecs >> 10);
213 trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
214 error);
218 * dpm_wait - Wait for a PM operation to complete.
219 * @dev: Device to wait for.
220 * @async: If unset, wait only if the device's power.async_suspend flag is set.
222 static void dpm_wait(struct device *dev, bool async)
224 if (!dev)
225 return;
227 if (async || (pm_async_enabled && dev->power.async_suspend))
228 wait_for_completion(&dev->power.completion);
231 static int dpm_wait_fn(struct device *dev, void *async_ptr)
233 dpm_wait(dev, *((bool *)async_ptr));
234 return 0;
237 static void dpm_wait_for_children(struct device *dev, bool async)
239 device_for_each_child(dev, &async, dpm_wait_fn);
243 * pm_op - Return the PM operation appropriate for given PM event.
244 * @ops: PM operations to choose from.
245 * @state: PM transition of the system being carried out.
247 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
249 switch (state.event) {
250 #ifdef CONFIG_SUSPEND
251 case PM_EVENT_SUSPEND:
252 return ops->suspend;
253 case PM_EVENT_RESUME:
254 return ops->resume;
255 #endif /* CONFIG_SUSPEND */
256 #ifdef CONFIG_HIBERNATE_CALLBACKS
257 case PM_EVENT_FREEZE:
258 case PM_EVENT_QUIESCE:
259 return ops->freeze;
260 case PM_EVENT_HIBERNATE:
261 return ops->poweroff;
262 case PM_EVENT_THAW:
263 case PM_EVENT_RECOVER:
264 return ops->thaw;
265 break;
266 case PM_EVENT_RESTORE:
267 return ops->restore;
268 #endif /* CONFIG_HIBERNATE_CALLBACKS */
271 return NULL;
275 * pm_late_early_op - Return the PM operation appropriate for given PM event.
276 * @ops: PM operations to choose from.
277 * @state: PM transition of the system being carried out.
279 * Runtime PM is disabled for @dev while this function is being executed.
281 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
282 pm_message_t state)
284 switch (state.event) {
285 #ifdef CONFIG_SUSPEND
286 case PM_EVENT_SUSPEND:
287 return ops->suspend_late;
288 case PM_EVENT_RESUME:
289 return ops->resume_early;
290 #endif /* CONFIG_SUSPEND */
291 #ifdef CONFIG_HIBERNATE_CALLBACKS
292 case PM_EVENT_FREEZE:
293 case PM_EVENT_QUIESCE:
294 return ops->freeze_late;
295 case PM_EVENT_HIBERNATE:
296 return ops->poweroff_late;
297 case PM_EVENT_THAW:
298 case PM_EVENT_RECOVER:
299 return ops->thaw_early;
300 case PM_EVENT_RESTORE:
301 return ops->restore_early;
302 #endif /* CONFIG_HIBERNATE_CALLBACKS */
305 return NULL;
309 * pm_noirq_op - Return the PM operation appropriate for given PM event.
310 * @ops: PM operations to choose from.
311 * @state: PM transition of the system being carried out.
313 * The driver of @dev will not receive interrupts while this function is being
314 * executed.
316 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
318 switch (state.event) {
319 #ifdef CONFIG_SUSPEND
320 case PM_EVENT_SUSPEND:
321 return ops->suspend_noirq;
322 case PM_EVENT_RESUME:
323 return ops->resume_noirq;
324 #endif /* CONFIG_SUSPEND */
325 #ifdef CONFIG_HIBERNATE_CALLBACKS
326 case PM_EVENT_FREEZE:
327 case PM_EVENT_QUIESCE:
328 return ops->freeze_noirq;
329 case PM_EVENT_HIBERNATE:
330 return ops->poweroff_noirq;
331 case PM_EVENT_THAW:
332 case PM_EVENT_RECOVER:
333 return ops->thaw_noirq;
334 case PM_EVENT_RESTORE:
335 return ops->restore_noirq;
336 #endif /* CONFIG_HIBERNATE_CALLBACKS */
339 return NULL;
342 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
344 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
345 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
346 ", may wakeup" : "");
349 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
350 int error)
352 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
353 dev_name(dev), pm_verb(state.event), info, error);
356 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
358 ktime_t calltime;
359 u64 usecs64;
360 int usecs;
362 calltime = ktime_get();
363 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
364 do_div(usecs64, NSEC_PER_USEC);
365 usecs = usecs64;
366 if (usecs == 0)
367 usecs = 1;
368 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
369 info ?: "", info ? " " : "", pm_verb(state.event),
370 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
373 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
374 pm_message_t state, char *info)
376 ktime_t calltime;
377 int error;
379 if (!cb)
380 return 0;
382 calltime = initcall_debug_start(dev);
384 pm_dev_dbg(dev, state, info);
385 error = cb(dev);
386 suspend_report_result(cb, error);
388 initcall_debug_report(dev, calltime, error, state, info);
390 return error;
393 /*------------------------- Resume routines -------------------------*/
396 * device_resume_noirq - Execute an "early resume" callback for given device.
397 * @dev: Device to handle.
398 * @state: PM transition of the system being carried out.
400 * The driver of @dev will not receive interrupts while this function is being
401 * executed.
403 static int device_resume_noirq(struct device *dev, pm_message_t state)
405 pm_callback_t callback = NULL;
406 char *info = NULL;
407 int error = 0;
409 TRACE_DEVICE(dev);
410 TRACE_RESUME(0);
412 if (dev->power.syscore)
413 goto Out;
415 if (dev->pm_domain) {
416 info = "noirq power domain ";
417 callback = pm_noirq_op(&dev->pm_domain->ops, state);
418 } else if (dev->type && dev->type->pm) {
419 info = "noirq type ";
420 callback = pm_noirq_op(dev->type->pm, state);
421 } else if (dev->class && dev->class->pm) {
422 info = "noirq class ";
423 callback = pm_noirq_op(dev->class->pm, state);
424 } else if (dev->bus && dev->bus->pm) {
425 info = "noirq bus ";
426 callback = pm_noirq_op(dev->bus->pm, state);
429 if (!callback && dev->driver && dev->driver->pm) {
430 info = "noirq driver ";
431 callback = pm_noirq_op(dev->driver->pm, state);
434 error = dpm_run_callback(callback, dev, state, info);
436 Out:
437 TRACE_RESUME(error);
438 return error;
442 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
443 * @state: PM transition of the system being carried out.
445 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
446 * enable device drivers to receive interrupts.
448 static void dpm_resume_noirq(pm_message_t state)
450 ktime_t starttime = ktime_get();
452 mutex_lock(&dpm_list_mtx);
453 while (!list_empty(&dpm_noirq_list)) {
454 struct device *dev = to_device(dpm_noirq_list.next);
455 int error;
457 get_device(dev);
458 list_move_tail(&dev->power.entry, &dpm_late_early_list);
459 mutex_unlock(&dpm_list_mtx);
461 error = device_resume_noirq(dev, state);
462 if (error) {
463 suspend_stats.failed_resume_noirq++;
464 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
465 dpm_save_failed_dev(dev_name(dev));
466 pm_dev_err(dev, state, " noirq", error);
469 mutex_lock(&dpm_list_mtx);
470 put_device(dev);
472 mutex_unlock(&dpm_list_mtx);
473 dpm_show_time(starttime, state, "noirq");
474 resume_device_irqs();
475 cpuidle_resume();
479 * device_resume_early - Execute an "early resume" callback for given device.
480 * @dev: Device to handle.
481 * @state: PM transition of the system being carried out.
483 * Runtime PM is disabled for @dev while this function is being executed.
485 static int device_resume_early(struct device *dev, pm_message_t state)
487 pm_callback_t callback = NULL;
488 char *info = NULL;
489 int error = 0;
491 TRACE_DEVICE(dev);
492 TRACE_RESUME(0);
494 if (dev->power.syscore)
495 goto Out;
497 if (dev->pm_domain) {
498 info = "early power domain ";
499 callback = pm_late_early_op(&dev->pm_domain->ops, state);
500 } else if (dev->type && dev->type->pm) {
501 info = "early type ";
502 callback = pm_late_early_op(dev->type->pm, state);
503 } else if (dev->class && dev->class->pm) {
504 info = "early class ";
505 callback = pm_late_early_op(dev->class->pm, state);
506 } else if (dev->bus && dev->bus->pm) {
507 info = "early bus ";
508 callback = pm_late_early_op(dev->bus->pm, state);
511 if (!callback && dev->driver && dev->driver->pm) {
512 info = "early driver ";
513 callback = pm_late_early_op(dev->driver->pm, state);
516 error = dpm_run_callback(callback, dev, state, info);
518 Out:
519 TRACE_RESUME(error);
521 pm_runtime_enable(dev);
522 return error;
526 * dpm_resume_early - Execute "early resume" callbacks for all devices.
527 * @state: PM transition of the system being carried out.
529 static void dpm_resume_early(pm_message_t state)
531 ktime_t starttime = ktime_get();
533 mutex_lock(&dpm_list_mtx);
534 while (!list_empty(&dpm_late_early_list)) {
535 struct device *dev = to_device(dpm_late_early_list.next);
536 int error;
538 get_device(dev);
539 list_move_tail(&dev->power.entry, &dpm_suspended_list);
540 mutex_unlock(&dpm_list_mtx);
542 error = device_resume_early(dev, state);
543 if (error) {
544 suspend_stats.failed_resume_early++;
545 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
546 dpm_save_failed_dev(dev_name(dev));
547 pm_dev_err(dev, state, " early", error);
550 mutex_lock(&dpm_list_mtx);
551 put_device(dev);
553 mutex_unlock(&dpm_list_mtx);
554 dpm_show_time(starttime, state, "early");
558 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
559 * @state: PM transition of the system being carried out.
561 void dpm_resume_start(pm_message_t state)
563 dpm_resume_noirq(state);
564 dpm_resume_early(state);
566 EXPORT_SYMBOL_GPL(dpm_resume_start);
569 * device_resume - Execute "resume" callbacks for given device.
570 * @dev: Device to handle.
571 * @state: PM transition of the system being carried out.
572 * @async: If true, the device is being resumed asynchronously.
574 static int device_resume(struct device *dev, pm_message_t state, bool async)
576 pm_callback_t callback = NULL;
577 char *info = NULL;
578 int error = 0;
580 TRACE_DEVICE(dev);
581 TRACE_RESUME(0);
583 if (dev->power.syscore)
584 goto Complete;
586 dpm_wait(dev->parent, async);
587 device_lock(dev);
590 * This is a fib. But we'll allow new children to be added below
591 * a resumed device, even if the device hasn't been completed yet.
593 dev->power.is_prepared = false;
595 if (!dev->power.is_suspended)
596 goto Unlock;
598 if (dev->pm_domain) {
599 info = "power domain ";
600 callback = pm_op(&dev->pm_domain->ops, state);
601 goto Driver;
604 if (dev->type && dev->type->pm) {
605 info = "type ";
606 callback = pm_op(dev->type->pm, state);
607 goto Driver;
610 if (dev->class) {
611 if (dev->class->pm) {
612 info = "class ";
613 callback = pm_op(dev->class->pm, state);
614 goto Driver;
615 } else if (dev->class->resume) {
616 info = "legacy class ";
617 callback = dev->class->resume;
618 goto End;
622 if (dev->bus) {
623 if (dev->bus->pm) {
624 info = "bus ";
625 callback = pm_op(dev->bus->pm, state);
626 } else if (dev->bus->resume) {
627 info = "legacy bus ";
628 callback = dev->bus->resume;
629 goto End;
633 Driver:
634 if (!callback && dev->driver && dev->driver->pm) {
635 info = "driver ";
636 callback = pm_op(dev->driver->pm, state);
639 End:
640 error = dpm_run_callback(callback, dev, state, info);
641 dev->power.is_suspended = false;
643 Unlock:
644 device_unlock(dev);
646 Complete:
647 complete_all(&dev->power.completion);
649 TRACE_RESUME(error);
651 return error;
654 static void async_resume(void *data, async_cookie_t cookie)
656 struct device *dev = (struct device *)data;
657 int error;
659 error = device_resume(dev, pm_transition, true);
660 if (error)
661 pm_dev_err(dev, pm_transition, " async", error);
662 put_device(dev);
665 static bool is_async(struct device *dev)
667 return dev->power.async_suspend && pm_async_enabled
668 && !pm_trace_is_enabled();
672 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
673 * @state: PM transition of the system being carried out.
675 * Execute the appropriate "resume" callback for all devices whose status
676 * indicates that they are suspended.
678 void dpm_resume(pm_message_t state)
680 struct device *dev;
681 ktime_t starttime = ktime_get();
683 might_sleep();
685 mutex_lock(&dpm_list_mtx);
686 pm_transition = state;
687 async_error = 0;
689 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
690 INIT_COMPLETION(dev->power.completion);
691 if (is_async(dev)) {
692 get_device(dev);
693 async_schedule(async_resume, dev);
697 while (!list_empty(&dpm_suspended_list)) {
698 dev = to_device(dpm_suspended_list.next);
699 get_device(dev);
700 if (!is_async(dev)) {
701 int error;
703 mutex_unlock(&dpm_list_mtx);
705 error = device_resume(dev, state, false);
706 if (error) {
707 suspend_stats.failed_resume++;
708 dpm_save_failed_step(SUSPEND_RESUME);
709 dpm_save_failed_dev(dev_name(dev));
710 pm_dev_err(dev, state, "", error);
713 mutex_lock(&dpm_list_mtx);
715 if (!list_empty(&dev->power.entry))
716 list_move_tail(&dev->power.entry, &dpm_prepared_list);
717 put_device(dev);
719 mutex_unlock(&dpm_list_mtx);
720 async_synchronize_full();
721 dpm_show_time(starttime, state, NULL);
725 * device_complete - Complete a PM transition for given device.
726 * @dev: Device to handle.
727 * @state: PM transition of the system being carried out.
729 static void device_complete(struct device *dev, pm_message_t state)
731 void (*callback)(struct device *) = NULL;
732 char *info = NULL;
734 if (dev->power.syscore)
735 return;
737 device_lock(dev);
739 if (dev->pm_domain) {
740 info = "completing power domain ";
741 callback = dev->pm_domain->ops.complete;
742 } else if (dev->type && dev->type->pm) {
743 info = "completing type ";
744 callback = dev->type->pm->complete;
745 } else if (dev->class && dev->class->pm) {
746 info = "completing class ";
747 callback = dev->class->pm->complete;
748 } else if (dev->bus && dev->bus->pm) {
749 info = "completing bus ";
750 callback = dev->bus->pm->complete;
753 if (!callback && dev->driver && dev->driver->pm) {
754 info = "completing driver ";
755 callback = dev->driver->pm->complete;
758 if (callback) {
759 pm_dev_dbg(dev, state, info);
760 callback(dev);
763 device_unlock(dev);
765 pm_runtime_put(dev);
769 * dpm_complete - Complete a PM transition for all non-sysdev devices.
770 * @state: PM transition of the system being carried out.
772 * Execute the ->complete() callbacks for all devices whose PM status is not
773 * DPM_ON (this allows new devices to be registered).
775 void dpm_complete(pm_message_t state)
777 struct list_head list;
779 might_sleep();
781 INIT_LIST_HEAD(&list);
782 mutex_lock(&dpm_list_mtx);
783 while (!list_empty(&dpm_prepared_list)) {
784 struct device *dev = to_device(dpm_prepared_list.prev);
786 get_device(dev);
787 dev->power.is_prepared = false;
788 list_move(&dev->power.entry, &list);
789 mutex_unlock(&dpm_list_mtx);
791 device_complete(dev, state);
793 mutex_lock(&dpm_list_mtx);
794 put_device(dev);
796 list_splice(&list, &dpm_list);
797 mutex_unlock(&dpm_list_mtx);
801 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
802 * @state: PM transition of the system being carried out.
804 * Execute "resume" callbacks for all devices and complete the PM transition of
805 * the system.
807 void dpm_resume_end(pm_message_t state)
809 dpm_resume(state);
810 dpm_complete(state);
812 EXPORT_SYMBOL_GPL(dpm_resume_end);
815 /*------------------------- Suspend routines -------------------------*/
818 * resume_event - Return a "resume" message for given "suspend" sleep state.
819 * @sleep_state: PM message representing a sleep state.
821 * Return a PM message representing the resume event corresponding to given
822 * sleep state.
824 static pm_message_t resume_event(pm_message_t sleep_state)
826 switch (sleep_state.event) {
827 case PM_EVENT_SUSPEND:
828 return PMSG_RESUME;
829 case PM_EVENT_FREEZE:
830 case PM_EVENT_QUIESCE:
831 return PMSG_RECOVER;
832 case PM_EVENT_HIBERNATE:
833 return PMSG_RESTORE;
835 return PMSG_ON;
839 * device_suspend_noirq - Execute a "late suspend" callback for given device.
840 * @dev: Device to handle.
841 * @state: PM transition of the system being carried out.
843 * The driver of @dev will not receive interrupts while this function is being
844 * executed.
846 static int device_suspend_noirq(struct device *dev, pm_message_t state)
848 pm_callback_t callback = NULL;
849 char *info = NULL;
851 if (dev->power.syscore)
852 return 0;
854 if (dev->pm_domain) {
855 info = "noirq power domain ";
856 callback = pm_noirq_op(&dev->pm_domain->ops, state);
857 } else if (dev->type && dev->type->pm) {
858 info = "noirq type ";
859 callback = pm_noirq_op(dev->type->pm, state);
860 } else if (dev->class && dev->class->pm) {
861 info = "noirq class ";
862 callback = pm_noirq_op(dev->class->pm, state);
863 } else if (dev->bus && dev->bus->pm) {
864 info = "noirq bus ";
865 callback = pm_noirq_op(dev->bus->pm, state);
868 if (!callback && dev->driver && dev->driver->pm) {
869 info = "noirq driver ";
870 callback = pm_noirq_op(dev->driver->pm, state);
873 return dpm_run_callback(callback, dev, state, info);
877 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
878 * @state: PM transition of the system being carried out.
880 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
881 * handlers for all non-sysdev devices.
883 static int dpm_suspend_noirq(pm_message_t state)
885 ktime_t starttime = ktime_get();
886 int error = 0;
888 cpuidle_pause();
889 suspend_device_irqs();
890 mutex_lock(&dpm_list_mtx);
891 while (!list_empty(&dpm_late_early_list)) {
892 struct device *dev = to_device(dpm_late_early_list.prev);
894 get_device(dev);
895 mutex_unlock(&dpm_list_mtx);
897 error = device_suspend_noirq(dev, state);
899 mutex_lock(&dpm_list_mtx);
900 if (error) {
901 pm_dev_err(dev, state, " noirq", error);
902 suspend_stats.failed_suspend_noirq++;
903 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
904 dpm_save_failed_dev(dev_name(dev));
905 put_device(dev);
906 break;
908 if (!list_empty(&dev->power.entry))
909 list_move(&dev->power.entry, &dpm_noirq_list);
910 put_device(dev);
912 if (pm_wakeup_pending()) {
913 error = -EBUSY;
914 break;
917 mutex_unlock(&dpm_list_mtx);
918 if (error)
919 dpm_resume_noirq(resume_event(state));
920 else
921 dpm_show_time(starttime, state, "noirq");
922 return error;
926 * device_suspend_late - Execute a "late suspend" callback for given device.
927 * @dev: Device to handle.
928 * @state: PM transition of the system being carried out.
930 * Runtime PM is disabled for @dev while this function is being executed.
932 static int device_suspend_late(struct device *dev, pm_message_t state)
934 pm_callback_t callback = NULL;
935 char *info = NULL;
937 __pm_runtime_disable(dev, false);
939 if (dev->power.syscore)
940 return 0;
942 if (dev->pm_domain) {
943 info = "late power domain ";
944 callback = pm_late_early_op(&dev->pm_domain->ops, state);
945 } else if (dev->type && dev->type->pm) {
946 info = "late type ";
947 callback = pm_late_early_op(dev->type->pm, state);
948 } else if (dev->class && dev->class->pm) {
949 info = "late class ";
950 callback = pm_late_early_op(dev->class->pm, state);
951 } else if (dev->bus && dev->bus->pm) {
952 info = "late bus ";
953 callback = pm_late_early_op(dev->bus->pm, state);
956 if (!callback && dev->driver && dev->driver->pm) {
957 info = "late driver ";
958 callback = pm_late_early_op(dev->driver->pm, state);
961 return dpm_run_callback(callback, dev, state, info);
965 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
966 * @state: PM transition of the system being carried out.
968 static int dpm_suspend_late(pm_message_t state)
970 ktime_t starttime = ktime_get();
971 int error = 0;
973 mutex_lock(&dpm_list_mtx);
974 while (!list_empty(&dpm_suspended_list)) {
975 struct device *dev = to_device(dpm_suspended_list.prev);
977 get_device(dev);
978 mutex_unlock(&dpm_list_mtx);
980 error = device_suspend_late(dev, state);
982 mutex_lock(&dpm_list_mtx);
983 if (error) {
984 pm_dev_err(dev, state, " late", error);
985 suspend_stats.failed_suspend_late++;
986 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
987 dpm_save_failed_dev(dev_name(dev));
988 put_device(dev);
989 break;
991 if (!list_empty(&dev->power.entry))
992 list_move(&dev->power.entry, &dpm_late_early_list);
993 put_device(dev);
995 if (pm_wakeup_pending()) {
996 error = -EBUSY;
997 break;
1000 mutex_unlock(&dpm_list_mtx);
1001 if (error)
1002 dpm_resume_early(resume_event(state));
1003 else
1004 dpm_show_time(starttime, state, "late");
1006 return error;
1010 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1011 * @state: PM transition of the system being carried out.
1013 int dpm_suspend_end(pm_message_t state)
1015 int error = dpm_suspend_late(state);
1016 if (error)
1017 return error;
1019 error = dpm_suspend_noirq(state);
1020 if (error) {
1021 dpm_resume_early(resume_event(state));
1022 return error;
1025 return 0;
1027 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1030 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1031 * @dev: Device to suspend.
1032 * @state: PM transition of the system being carried out.
1033 * @cb: Suspend callback to execute.
1035 static int legacy_suspend(struct device *dev, pm_message_t state,
1036 int (*cb)(struct device *dev, pm_message_t state),
1037 char *info)
1039 int error;
1040 ktime_t calltime;
1042 calltime = initcall_debug_start(dev);
1044 error = cb(dev, state);
1045 suspend_report_result(cb, error);
1047 initcall_debug_report(dev, calltime, error, state, info);
1049 return error;
1053 * device_suspend - Execute "suspend" callbacks for given device.
1054 * @dev: Device to handle.
1055 * @state: PM transition of the system being carried out.
1056 * @async: If true, the device is being suspended asynchronously.
1058 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1060 pm_callback_t callback = NULL;
1061 char *info = NULL;
1062 int error = 0;
1064 dpm_wait_for_children(dev, async);
1066 if (async_error)
1067 goto Complete;
1070 * If a device configured to wake up the system from sleep states
1071 * has been suspended at run time and there's a resume request pending
1072 * for it, this is equivalent to the device signaling wakeup, so the
1073 * system suspend operation should be aborted.
1075 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1076 pm_wakeup_event(dev, 0);
1078 if (pm_wakeup_pending()) {
1079 async_error = -EBUSY;
1080 goto Complete;
1083 if (dev->power.syscore)
1084 goto Complete;
1086 device_lock(dev);
1088 if (dev->pm_domain) {
1089 info = "power domain ";
1090 callback = pm_op(&dev->pm_domain->ops, state);
1091 goto Run;
1094 if (dev->type && dev->type->pm) {
1095 info = "type ";
1096 callback = pm_op(dev->type->pm, state);
1097 goto Run;
1100 if (dev->class) {
1101 if (dev->class->pm) {
1102 info = "class ";
1103 callback = pm_op(dev->class->pm, state);
1104 goto Run;
1105 } else if (dev->class->suspend) {
1106 pm_dev_dbg(dev, state, "legacy class ");
1107 error = legacy_suspend(dev, state, dev->class->suspend,
1108 "legacy class ");
1109 goto End;
1113 if (dev->bus) {
1114 if (dev->bus->pm) {
1115 info = "bus ";
1116 callback = pm_op(dev->bus->pm, state);
1117 } else if (dev->bus->suspend) {
1118 pm_dev_dbg(dev, state, "legacy bus ");
1119 error = legacy_suspend(dev, state, dev->bus->suspend,
1120 "legacy bus ");
1121 goto End;
1125 Run:
1126 if (!callback && dev->driver && dev->driver->pm) {
1127 info = "driver ";
1128 callback = pm_op(dev->driver->pm, state);
1131 error = dpm_run_callback(callback, dev, state, info);
1133 End:
1134 if (!error) {
1135 dev->power.is_suspended = true;
1136 if (dev->power.wakeup_path
1137 && dev->parent && !dev->parent->power.ignore_children)
1138 dev->parent->power.wakeup_path = true;
1141 device_unlock(dev);
1143 Complete:
1144 complete_all(&dev->power.completion);
1145 if (error)
1146 async_error = error;
1148 return error;
1151 static void async_suspend(void *data, async_cookie_t cookie)
1153 struct device *dev = (struct device *)data;
1154 int error;
1156 error = __device_suspend(dev, pm_transition, true);
1157 if (error) {
1158 dpm_save_failed_dev(dev_name(dev));
1159 pm_dev_err(dev, pm_transition, " async", error);
1162 put_device(dev);
1165 static int device_suspend(struct device *dev)
1167 INIT_COMPLETION(dev->power.completion);
1169 if (pm_async_enabled && dev->power.async_suspend) {
1170 get_device(dev);
1171 async_schedule(async_suspend, dev);
1172 return 0;
1175 return __device_suspend(dev, pm_transition, false);
1179 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1180 * @state: PM transition of the system being carried out.
1182 int dpm_suspend(pm_message_t state)
1184 ktime_t starttime = ktime_get();
1185 int error = 0;
1187 might_sleep();
1189 mutex_lock(&dpm_list_mtx);
1190 pm_transition = state;
1191 async_error = 0;
1192 while (!list_empty(&dpm_prepared_list)) {
1193 struct device *dev = to_device(dpm_prepared_list.prev);
1195 get_device(dev);
1196 mutex_unlock(&dpm_list_mtx);
1198 error = device_suspend(dev);
1200 mutex_lock(&dpm_list_mtx);
1201 if (error) {
1202 pm_dev_err(dev, state, "", error);
1203 dpm_save_failed_dev(dev_name(dev));
1204 put_device(dev);
1205 break;
1207 if (!list_empty(&dev->power.entry))
1208 list_move(&dev->power.entry, &dpm_suspended_list);
1209 put_device(dev);
1210 if (async_error)
1211 break;
1213 mutex_unlock(&dpm_list_mtx);
1214 async_synchronize_full();
1215 if (!error)
1216 error = async_error;
1217 if (error) {
1218 suspend_stats.failed_suspend++;
1219 dpm_save_failed_step(SUSPEND_SUSPEND);
1220 } else
1221 dpm_show_time(starttime, state, NULL);
1222 return error;
1226 * device_prepare - Prepare a device for system power transition.
1227 * @dev: Device to handle.
1228 * @state: PM transition of the system being carried out.
1230 * Execute the ->prepare() callback(s) for given device. No new children of the
1231 * device may be registered after this function has returned.
1233 static int device_prepare(struct device *dev, pm_message_t state)
1235 int (*callback)(struct device *) = NULL;
1236 char *info = NULL;
1237 int error = 0;
1239 if (dev->power.syscore)
1240 return 0;
1243 * If a device's parent goes into runtime suspend at the wrong time,
1244 * it won't be possible to resume the device. To prevent this we
1245 * block runtime suspend here, during the prepare phase, and allow
1246 * it again during the complete phase.
1248 pm_runtime_get_noresume(dev);
1250 device_lock(dev);
1252 dev->power.wakeup_path = device_may_wakeup(dev);
1254 if (dev->pm_domain) {
1255 info = "preparing power domain ";
1256 callback = dev->pm_domain->ops.prepare;
1257 } else if (dev->type && dev->type->pm) {
1258 info = "preparing type ";
1259 callback = dev->type->pm->prepare;
1260 } else if (dev->class && dev->class->pm) {
1261 info = "preparing class ";
1262 callback = dev->class->pm->prepare;
1263 } else if (dev->bus && dev->bus->pm) {
1264 info = "preparing bus ";
1265 callback = dev->bus->pm->prepare;
1268 if (!callback && dev->driver && dev->driver->pm) {
1269 info = "preparing driver ";
1270 callback = dev->driver->pm->prepare;
1273 if (callback) {
1274 error = callback(dev);
1275 suspend_report_result(callback, error);
1278 device_unlock(dev);
1280 return error;
1284 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1285 * @state: PM transition of the system being carried out.
1287 * Execute the ->prepare() callback(s) for all devices.
1289 int dpm_prepare(pm_message_t state)
1291 int error = 0;
1293 might_sleep();
1295 mutex_lock(&dpm_list_mtx);
1296 while (!list_empty(&dpm_list)) {
1297 struct device *dev = to_device(dpm_list.next);
1299 get_device(dev);
1300 mutex_unlock(&dpm_list_mtx);
1302 error = device_prepare(dev, state);
1304 mutex_lock(&dpm_list_mtx);
1305 if (error) {
1306 if (error == -EAGAIN) {
1307 put_device(dev);
1308 error = 0;
1309 continue;
1311 printk(KERN_INFO "PM: Device %s not prepared "
1312 "for power transition: code %d\n",
1313 dev_name(dev), error);
1314 put_device(dev);
1315 break;
1317 dev->power.is_prepared = true;
1318 if (!list_empty(&dev->power.entry))
1319 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1320 put_device(dev);
1322 mutex_unlock(&dpm_list_mtx);
1323 return error;
1327 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1328 * @state: PM transition of the system being carried out.
1330 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1331 * callbacks for them.
1333 int dpm_suspend_start(pm_message_t state)
1335 int error;
1337 error = dpm_prepare(state);
1338 if (error) {
1339 suspend_stats.failed_prepare++;
1340 dpm_save_failed_step(SUSPEND_PREPARE);
1341 } else
1342 error = dpm_suspend(state);
1343 return error;
1345 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1347 void __suspend_report_result(const char *function, void *fn, int ret)
1349 if (ret)
1350 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1352 EXPORT_SYMBOL_GPL(__suspend_report_result);
1355 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1356 * @dev: Device to wait for.
1357 * @subordinate: Device that needs to wait for @dev.
1359 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1361 dpm_wait(dev, subordinate->power.async_suspend);
1362 return async_error;
1364 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1367 * dpm_for_each_dev - device iterator.
1368 * @data: data for the callback.
1369 * @fn: function to be called for each device.
1371 * Iterate over devices in dpm_list, and call @fn for each device,
1372 * passing it @data.
1374 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1376 struct device *dev;
1378 if (!fn)
1379 return;
1381 device_pm_lock();
1382 list_for_each_entry(dev, &dpm_list, power.entry)
1383 fn(dev, data);
1384 device_pm_unlock();
1386 EXPORT_SYMBOL_GPL(dpm_for_each_dev);