2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will intialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/rwsem.h>
27 #include <linux/interrupt.h>
33 * The entries in the dpm_list list are in a depth first order, simply
34 * because children are guaranteed to be discovered after parents, and
35 * are inserted at the back of the list on discovery.
37 * Since device_pm_add() may be called with a device semaphore held,
38 * we must never try to acquire a device semaphore while holding
44 static DEFINE_MUTEX(dpm_list_mtx
);
47 * Set once the preparation of devices for a PM transition has started, reset
48 * before starting to resume devices. Protected by dpm_list_mtx.
50 static bool transition_started
;
53 * device_pm_init - Initialize the PM-related part of a device object.
54 * @dev: Device object being initialized.
56 void device_pm_init(struct device
*dev
)
58 dev
->power
.status
= DPM_ON
;
63 * device_pm_lock - Lock the list of active devices used by the PM core.
65 void device_pm_lock(void)
67 mutex_lock(&dpm_list_mtx
);
71 * device_pm_unlock - Unlock the list of active devices used by the PM core.
73 void device_pm_unlock(void)
75 mutex_unlock(&dpm_list_mtx
);
79 * device_pm_add - Add a device to the PM core's list of active devices.
80 * @dev: Device to add to the list.
82 void device_pm_add(struct device
*dev
)
84 pr_debug("PM: Adding info for %s:%s\n",
85 dev
->bus
? dev
->bus
->name
: "No Bus",
86 kobject_name(&dev
->kobj
));
87 mutex_lock(&dpm_list_mtx
);
89 if (dev
->parent
->power
.status
>= DPM_SUSPENDING
)
90 dev_warn(dev
, "parent %s should not be sleeping\n",
91 dev_name(dev
->parent
));
92 } else if (transition_started
) {
94 * We refuse to register parentless devices while a PM
95 * transition is in progress in order to avoid leaving them
96 * unhandled down the road
98 dev_WARN(dev
, "Parentless device registered during a PM transaction\n");
101 list_add_tail(&dev
->power
.entry
, &dpm_list
);
102 mutex_unlock(&dpm_list_mtx
);
106 * device_pm_remove - Remove a device from the PM core's list of active devices.
107 * @dev: Device to be removed from the list.
109 void device_pm_remove(struct device
*dev
)
111 pr_debug("PM: Removing info for %s:%s\n",
112 dev
->bus
? dev
->bus
->name
: "No Bus",
113 kobject_name(&dev
->kobj
));
114 mutex_lock(&dpm_list_mtx
);
115 list_del_init(&dev
->power
.entry
);
116 mutex_unlock(&dpm_list_mtx
);
117 pm_runtime_remove(dev
);
121 * device_pm_move_before - Move device in the PM core's list of active devices.
122 * @deva: Device to move in dpm_list.
123 * @devb: Device @deva should come before.
125 void device_pm_move_before(struct device
*deva
, struct device
*devb
)
127 pr_debug("PM: Moving %s:%s before %s:%s\n",
128 deva
->bus
? deva
->bus
->name
: "No Bus",
129 kobject_name(&deva
->kobj
),
130 devb
->bus
? devb
->bus
->name
: "No Bus",
131 kobject_name(&devb
->kobj
));
132 /* Delete deva from dpm_list and reinsert before devb. */
133 list_move_tail(&deva
->power
.entry
, &devb
->power
.entry
);
137 * device_pm_move_after - Move device in the PM core's list of active devices.
138 * @deva: Device to move in dpm_list.
139 * @devb: Device @deva should come after.
141 void device_pm_move_after(struct device
*deva
, struct device
*devb
)
143 pr_debug("PM: Moving %s:%s after %s:%s\n",
144 deva
->bus
? deva
->bus
->name
: "No Bus",
145 kobject_name(&deva
->kobj
),
146 devb
->bus
? devb
->bus
->name
: "No Bus",
147 kobject_name(&devb
->kobj
));
148 /* Delete deva from dpm_list and reinsert after devb. */
149 list_move(&deva
->power
.entry
, &devb
->power
.entry
);
153 * device_pm_move_last - Move device to end of the PM core's list of devices.
154 * @dev: Device to move in dpm_list.
156 void device_pm_move_last(struct device
*dev
)
158 pr_debug("PM: Moving %s:%s to end of list\n",
159 dev
->bus
? dev
->bus
->name
: "No Bus",
160 kobject_name(&dev
->kobj
));
161 list_move_tail(&dev
->power
.entry
, &dpm_list
);
165 * pm_op - Execute the PM operation appropriate for given PM event.
166 * @dev: Device to handle.
167 * @ops: PM operations to choose from.
168 * @state: PM transition of the system being carried out.
170 static int pm_op(struct device
*dev
,
171 const struct dev_pm_ops
*ops
,
176 switch (state
.event
) {
177 #ifdef CONFIG_SUSPEND
178 case PM_EVENT_SUSPEND
:
180 error
= ops
->suspend(dev
);
181 suspend_report_result(ops
->suspend
, error
);
184 case PM_EVENT_RESUME
:
186 error
= ops
->resume(dev
);
187 suspend_report_result(ops
->resume
, error
);
190 #endif /* CONFIG_SUSPEND */
191 #ifdef CONFIG_HIBERNATION
192 case PM_EVENT_FREEZE
:
193 case PM_EVENT_QUIESCE
:
195 error
= ops
->freeze(dev
);
196 suspend_report_result(ops
->freeze
, error
);
199 case PM_EVENT_HIBERNATE
:
201 error
= ops
->poweroff(dev
);
202 suspend_report_result(ops
->poweroff
, error
);
206 case PM_EVENT_RECOVER
:
208 error
= ops
->thaw(dev
);
209 suspend_report_result(ops
->thaw
, error
);
212 case PM_EVENT_RESTORE
:
214 error
= ops
->restore(dev
);
215 suspend_report_result(ops
->restore
, error
);
218 #endif /* CONFIG_HIBERNATION */
226 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
227 * @dev: Device to handle.
228 * @ops: PM operations to choose from.
229 * @state: PM transition of the system being carried out.
231 * The driver of @dev will not receive interrupts while this function is being
234 static int pm_noirq_op(struct device
*dev
,
235 const struct dev_pm_ops
*ops
,
240 switch (state
.event
) {
241 #ifdef CONFIG_SUSPEND
242 case PM_EVENT_SUSPEND
:
243 if (ops
->suspend_noirq
) {
244 error
= ops
->suspend_noirq(dev
);
245 suspend_report_result(ops
->suspend_noirq
, error
);
248 case PM_EVENT_RESUME
:
249 if (ops
->resume_noirq
) {
250 error
= ops
->resume_noirq(dev
);
251 suspend_report_result(ops
->resume_noirq
, error
);
254 #endif /* CONFIG_SUSPEND */
255 #ifdef CONFIG_HIBERNATION
256 case PM_EVENT_FREEZE
:
257 case PM_EVENT_QUIESCE
:
258 if (ops
->freeze_noirq
) {
259 error
= ops
->freeze_noirq(dev
);
260 suspend_report_result(ops
->freeze_noirq
, error
);
263 case PM_EVENT_HIBERNATE
:
264 if (ops
->poweroff_noirq
) {
265 error
= ops
->poweroff_noirq(dev
);
266 suspend_report_result(ops
->poweroff_noirq
, error
);
270 case PM_EVENT_RECOVER
:
271 if (ops
->thaw_noirq
) {
272 error
= ops
->thaw_noirq(dev
);
273 suspend_report_result(ops
->thaw_noirq
, error
);
276 case PM_EVENT_RESTORE
:
277 if (ops
->restore_noirq
) {
278 error
= ops
->restore_noirq(dev
);
279 suspend_report_result(ops
->restore_noirq
, error
);
282 #endif /* CONFIG_HIBERNATION */
289 static char *pm_verb(int event
)
292 case PM_EVENT_SUSPEND
:
294 case PM_EVENT_RESUME
:
296 case PM_EVENT_FREEZE
:
298 case PM_EVENT_QUIESCE
:
300 case PM_EVENT_HIBERNATE
:
304 case PM_EVENT_RESTORE
:
306 case PM_EVENT_RECOVER
:
309 return "(unknown PM event)";
313 static void pm_dev_dbg(struct device
*dev
, pm_message_t state
, char *info
)
315 dev_dbg(dev
, "%s%s%s\n", info
, pm_verb(state
.event
),
316 ((state
.event
& PM_EVENT_SLEEP
) && device_may_wakeup(dev
)) ?
317 ", may wakeup" : "");
320 static void pm_dev_err(struct device
*dev
, pm_message_t state
, char *info
,
323 printk(KERN_ERR
"PM: Device %s failed to %s%s: error %d\n",
324 kobject_name(&dev
->kobj
), pm_verb(state
.event
), info
, error
);
327 /*------------------------- Resume routines -------------------------*/
330 * device_resume_noirq - Execute an "early resume" callback for given device.
331 * @dev: Device to handle.
332 * @state: PM transition of the system being carried out.
334 * The driver of @dev will not receive interrupts while this function is being
337 static int device_resume_noirq(struct device
*dev
, pm_message_t state
)
348 pm_dev_dbg(dev
, state
, "EARLY ");
349 error
= pm_noirq_op(dev
, dev
->bus
->pm
, state
);
357 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
358 * @state: PM transition of the system being carried out.
360 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
361 * enable device drivers to receive interrupts.
363 void dpm_resume_noirq(pm_message_t state
)
367 mutex_lock(&dpm_list_mtx
);
368 transition_started
= false;
369 list_for_each_entry(dev
, &dpm_list
, power
.entry
)
370 if (dev
->power
.status
> DPM_OFF
) {
373 dev
->power
.status
= DPM_OFF
;
374 error
= device_resume_noirq(dev
, state
);
376 pm_dev_err(dev
, state
, " early", error
);
378 mutex_unlock(&dpm_list_mtx
);
379 resume_device_irqs();
381 EXPORT_SYMBOL_GPL(dpm_resume_noirq
);
384 * device_resume - Execute "resume" callbacks for given device.
385 * @dev: Device to handle.
386 * @state: PM transition of the system being carried out.
388 static int device_resume(struct device
*dev
, pm_message_t state
)
399 pm_dev_dbg(dev
, state
, "");
400 error
= pm_op(dev
, dev
->bus
->pm
, state
);
401 } else if (dev
->bus
->resume
) {
402 pm_dev_dbg(dev
, state
, "legacy ");
403 error
= dev
->bus
->resume(dev
);
411 pm_dev_dbg(dev
, state
, "type ");
412 error
= pm_op(dev
, dev
->type
->pm
, state
);
419 if (dev
->class->pm
) {
420 pm_dev_dbg(dev
, state
, "class ");
421 error
= pm_op(dev
, dev
->class->pm
, state
);
422 } else if (dev
->class->resume
) {
423 pm_dev_dbg(dev
, state
, "legacy class ");
424 error
= dev
->class->resume(dev
);
435 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
436 * @state: PM transition of the system being carried out.
438 * Execute the appropriate "resume" callback for all devices whose status
439 * indicates that they are suspended.
441 static void dpm_resume(pm_message_t state
)
443 struct list_head list
;
445 INIT_LIST_HEAD(&list
);
446 mutex_lock(&dpm_list_mtx
);
447 while (!list_empty(&dpm_list
)) {
448 struct device
*dev
= to_device(dpm_list
.next
);
451 if (dev
->power
.status
>= DPM_OFF
) {
454 dev
->power
.status
= DPM_RESUMING
;
455 mutex_unlock(&dpm_list_mtx
);
457 error
= device_resume(dev
, state
);
459 mutex_lock(&dpm_list_mtx
);
461 pm_dev_err(dev
, state
, "", error
);
462 } else if (dev
->power
.status
== DPM_SUSPENDING
) {
463 /* Allow new children of the device to be registered */
464 dev
->power
.status
= DPM_RESUMING
;
466 if (!list_empty(&dev
->power
.entry
))
467 list_move_tail(&dev
->power
.entry
, &list
);
470 list_splice(&list
, &dpm_list
);
471 mutex_unlock(&dpm_list_mtx
);
475 * device_complete - Complete a PM transition for given device.
476 * @dev: Device to handle.
477 * @state: PM transition of the system being carried out.
479 static void device_complete(struct device
*dev
, pm_message_t state
)
483 if (dev
->class && dev
->class->pm
&& dev
->class->pm
->complete
) {
484 pm_dev_dbg(dev
, state
, "completing class ");
485 dev
->class->pm
->complete(dev
);
488 if (dev
->type
&& dev
->type
->pm
&& dev
->type
->pm
->complete
) {
489 pm_dev_dbg(dev
, state
, "completing type ");
490 dev
->type
->pm
->complete(dev
);
493 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->complete
) {
494 pm_dev_dbg(dev
, state
, "completing ");
495 dev
->bus
->pm
->complete(dev
);
502 * dpm_complete - Complete a PM transition for all non-sysdev devices.
503 * @state: PM transition of the system being carried out.
505 * Execute the ->complete() callbacks for all devices whose PM status is not
506 * DPM_ON (this allows new devices to be registered).
508 static void dpm_complete(pm_message_t state
)
510 struct list_head list
;
512 INIT_LIST_HEAD(&list
);
513 mutex_lock(&dpm_list_mtx
);
514 while (!list_empty(&dpm_list
)) {
515 struct device
*dev
= to_device(dpm_list
.prev
);
518 if (dev
->power
.status
> DPM_ON
) {
519 dev
->power
.status
= DPM_ON
;
520 mutex_unlock(&dpm_list_mtx
);
522 device_complete(dev
, state
);
523 pm_runtime_put_noidle(dev
);
525 mutex_lock(&dpm_list_mtx
);
527 if (!list_empty(&dev
->power
.entry
))
528 list_move(&dev
->power
.entry
, &list
);
531 list_splice(&list
, &dpm_list
);
532 mutex_unlock(&dpm_list_mtx
);
536 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
537 * @state: PM transition of the system being carried out.
539 * Execute "resume" callbacks for all devices and complete the PM transition of
542 void dpm_resume_end(pm_message_t state
)
548 EXPORT_SYMBOL_GPL(dpm_resume_end
);
551 /*------------------------- Suspend routines -------------------------*/
554 * resume_event - Return a "resume" message for given "suspend" sleep state.
555 * @sleep_state: PM message representing a sleep state.
557 * Return a PM message representing the resume event corresponding to given
560 static pm_message_t
resume_event(pm_message_t sleep_state
)
562 switch (sleep_state
.event
) {
563 case PM_EVENT_SUSPEND
:
565 case PM_EVENT_FREEZE
:
566 case PM_EVENT_QUIESCE
:
568 case PM_EVENT_HIBERNATE
:
575 * device_suspend_noirq - Execute a "late suspend" callback for given device.
576 * @dev: Device to handle.
577 * @state: PM transition of the system being carried out.
579 * The driver of @dev will not receive interrupts while this function is being
582 static int device_suspend_noirq(struct device
*dev
, pm_message_t state
)
590 pm_dev_dbg(dev
, state
, "LATE ");
591 error
= pm_noirq_op(dev
, dev
->bus
->pm
, state
);
597 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
598 * @state: PM transition of the system being carried out.
600 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
601 * handlers for all non-sysdev devices.
603 int dpm_suspend_noirq(pm_message_t state
)
608 suspend_device_irqs();
609 mutex_lock(&dpm_list_mtx
);
610 list_for_each_entry_reverse(dev
, &dpm_list
, power
.entry
) {
611 error
= device_suspend_noirq(dev
, state
);
613 pm_dev_err(dev
, state
, " late", error
);
616 dev
->power
.status
= DPM_OFF_IRQ
;
618 mutex_unlock(&dpm_list_mtx
);
620 dpm_resume_noirq(resume_event(state
));
623 EXPORT_SYMBOL_GPL(dpm_suspend_noirq
);
626 * device_suspend - Execute "suspend" callbacks for given device.
627 * @dev: Device to handle.
628 * @state: PM transition of the system being carried out.
630 static int device_suspend(struct device
*dev
, pm_message_t state
)
637 if (dev
->class->pm
) {
638 pm_dev_dbg(dev
, state
, "class ");
639 error
= pm_op(dev
, dev
->class->pm
, state
);
640 } else if (dev
->class->suspend
) {
641 pm_dev_dbg(dev
, state
, "legacy class ");
642 error
= dev
->class->suspend(dev
, state
);
643 suspend_report_result(dev
->class->suspend
, error
);
651 pm_dev_dbg(dev
, state
, "type ");
652 error
= pm_op(dev
, dev
->type
->pm
, state
);
660 pm_dev_dbg(dev
, state
, "");
661 error
= pm_op(dev
, dev
->bus
->pm
, state
);
662 } else if (dev
->bus
->suspend
) {
663 pm_dev_dbg(dev
, state
, "legacy ");
664 error
= dev
->bus
->suspend(dev
, state
);
665 suspend_report_result(dev
->bus
->suspend
, error
);
675 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
676 * @state: PM transition of the system being carried out.
678 static int dpm_suspend(pm_message_t state
)
680 struct list_head list
;
683 INIT_LIST_HEAD(&list
);
684 mutex_lock(&dpm_list_mtx
);
685 while (!list_empty(&dpm_list
)) {
686 struct device
*dev
= to_device(dpm_list
.prev
);
689 mutex_unlock(&dpm_list_mtx
);
691 error
= device_suspend(dev
, state
);
693 mutex_lock(&dpm_list_mtx
);
695 pm_dev_err(dev
, state
, "", error
);
699 dev
->power
.status
= DPM_OFF
;
700 if (!list_empty(&dev
->power
.entry
))
701 list_move(&dev
->power
.entry
, &list
);
704 list_splice(&list
, dpm_list
.prev
);
705 mutex_unlock(&dpm_list_mtx
);
710 * device_prepare - Prepare a device for system power transition.
711 * @dev: Device to handle.
712 * @state: PM transition of the system being carried out.
714 * Execute the ->prepare() callback(s) for given device. No new children of the
715 * device may be registered after this function has returned.
717 static int device_prepare(struct device
*dev
, pm_message_t state
)
723 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->prepare
) {
724 pm_dev_dbg(dev
, state
, "preparing ");
725 error
= dev
->bus
->pm
->prepare(dev
);
726 suspend_report_result(dev
->bus
->pm
->prepare
, error
);
731 if (dev
->type
&& dev
->type
->pm
&& dev
->type
->pm
->prepare
) {
732 pm_dev_dbg(dev
, state
, "preparing type ");
733 error
= dev
->type
->pm
->prepare(dev
);
734 suspend_report_result(dev
->type
->pm
->prepare
, error
);
739 if (dev
->class && dev
->class->pm
&& dev
->class->pm
->prepare
) {
740 pm_dev_dbg(dev
, state
, "preparing class ");
741 error
= dev
->class->pm
->prepare(dev
);
742 suspend_report_result(dev
->class->pm
->prepare
, error
);
751 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
752 * @state: PM transition of the system being carried out.
754 * Execute the ->prepare() callback(s) for all devices.
756 static int dpm_prepare(pm_message_t state
)
758 struct list_head list
;
761 INIT_LIST_HEAD(&list
);
762 mutex_lock(&dpm_list_mtx
);
763 transition_started
= true;
764 while (!list_empty(&dpm_list
)) {
765 struct device
*dev
= to_device(dpm_list
.next
);
768 dev
->power
.status
= DPM_PREPARING
;
769 mutex_unlock(&dpm_list_mtx
);
771 pm_runtime_get_noresume(dev
);
772 if (pm_runtime_barrier(dev
) && device_may_wakeup(dev
)) {
773 /* Wake-up requested during system sleep transition. */
774 pm_runtime_put_noidle(dev
);
777 error
= device_prepare(dev
, state
);
780 mutex_lock(&dpm_list_mtx
);
782 dev
->power
.status
= DPM_ON
;
783 if (error
== -EAGAIN
) {
788 printk(KERN_ERR
"PM: Failed to prepare device %s "
789 "for power transition: error %d\n",
790 kobject_name(&dev
->kobj
), error
);
794 dev
->power
.status
= DPM_SUSPENDING
;
795 if (!list_empty(&dev
->power
.entry
))
796 list_move_tail(&dev
->power
.entry
, &list
);
799 list_splice(&list
, &dpm_list
);
800 mutex_unlock(&dpm_list_mtx
);
805 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
806 * @state: PM transition of the system being carried out.
808 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
809 * callbacks for them.
811 int dpm_suspend_start(pm_message_t state
)
816 error
= dpm_prepare(state
);
818 error
= dpm_suspend(state
);
821 EXPORT_SYMBOL_GPL(dpm_suspend_start
);
823 void __suspend_report_result(const char *function
, void *fn
, int ret
)
826 printk(KERN_ERR
"%s(): %pF returns %d\n", function
, fn
, ret
);
828 EXPORT_SYMBOL_GPL(__suspend_report_result
);