2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 * This file is released under the GPLv2.
9 #include <linux/sched.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/jiffies.h>
13 static int __pm_runtime_resume(struct device
*dev
, bool from_wq
);
14 static int __pm_request_idle(struct device
*dev
);
15 static int __pm_request_resume(struct device
*dev
);
18 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
19 * @dev: Device to handle.
21 static void pm_runtime_deactivate_timer(struct device
*dev
)
23 if (dev
->power
.timer_expires
> 0) {
24 del_timer(&dev
->power
.suspend_timer
);
25 dev
->power
.timer_expires
= 0;
30 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
31 * @dev: Device to handle.
33 static void pm_runtime_cancel_pending(struct device
*dev
)
35 pm_runtime_deactivate_timer(dev
);
37 * In case there's a request pending, make sure its work function will
38 * return without doing anything.
40 dev
->power
.request
= RPM_REQ_NONE
;
44 * __pm_runtime_idle - Notify device bus type if the device can be suspended.
45 * @dev: Device to notify the bus type about.
47 * This function must be called under dev->power.lock with interrupts disabled.
49 static int __pm_runtime_idle(struct device
*dev
)
50 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
54 if (dev
->power
.runtime_error
)
56 else if (dev
->power
.idle_notification
)
57 retval
= -EINPROGRESS
;
58 else if (atomic_read(&dev
->power
.usage_count
) > 0
59 || dev
->power
.disable_depth
> 0
60 || dev
->power
.runtime_status
!= RPM_ACTIVE
)
62 else if (!pm_children_suspended(dev
))
67 if (dev
->power
.request_pending
) {
69 * If an idle notification request is pending, cancel it. Any
70 * other pending request takes precedence over us.
72 if (dev
->power
.request
== RPM_REQ_IDLE
) {
73 dev
->power
.request
= RPM_REQ_NONE
;
74 } else if (dev
->power
.request
!= RPM_REQ_NONE
) {
80 dev
->power
.idle_notification
= true;
82 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_idle
) {
83 spin_unlock_irq(&dev
->power
.lock
);
85 dev
->bus
->pm
->runtime_idle(dev
);
87 spin_lock_irq(&dev
->power
.lock
);
90 dev
->power
.idle_notification
= false;
91 wake_up_all(&dev
->power
.wait_queue
);
98 * pm_runtime_idle - Notify device bus type if the device can be suspended.
99 * @dev: Device to notify the bus type about.
101 int pm_runtime_idle(struct device
*dev
)
105 spin_lock_irq(&dev
->power
.lock
);
106 retval
= __pm_runtime_idle(dev
);
107 spin_unlock_irq(&dev
->power
.lock
);
111 EXPORT_SYMBOL_GPL(pm_runtime_idle
);
114 * __pm_runtime_suspend - Carry out run-time suspend of given device.
115 * @dev: Device to suspend.
116 * @from_wq: If set, the function has been called via pm_wq.
118 * Check if the device can be suspended and run the ->runtime_suspend() callback
119 * provided by its bus type. If another suspend has been started earlier, wait
120 * for it to finish. If an idle notification or suspend request is pending or
121 * scheduled, cancel it.
123 * This function must be called under dev->power.lock with interrupts disabled.
125 int __pm_runtime_suspend(struct device
*dev
, bool from_wq
)
126 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
128 struct device
*parent
= NULL
;
132 dev_dbg(dev
, "__pm_runtime_suspend()%s!\n",
133 from_wq
? " from workqueue" : "");
136 if (dev
->power
.runtime_error
) {
141 /* Pending resume requests take precedence over us. */
142 if (dev
->power
.request_pending
143 && dev
->power
.request
== RPM_REQ_RESUME
) {
148 /* Other scheduled or pending requests need to be canceled. */
149 pm_runtime_cancel_pending(dev
);
151 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
153 else if (dev
->power
.runtime_status
== RPM_RESUMING
154 || dev
->power
.disable_depth
> 0
155 || atomic_read(&dev
->power
.usage_count
) > 0)
157 else if (!pm_children_suspended(dev
))
162 if (dev
->power
.runtime_status
== RPM_SUSPENDING
) {
166 retval
= -EINPROGRESS
;
170 /* Wait for the other suspend running in parallel with us. */
172 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
173 TASK_UNINTERRUPTIBLE
);
174 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
)
177 spin_unlock_irq(&dev
->power
.lock
);
181 spin_lock_irq(&dev
->power
.lock
);
183 finish_wait(&dev
->power
.wait_queue
, &wait
);
187 dev
->power
.runtime_status
= RPM_SUSPENDING
;
189 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_suspend
) {
190 spin_unlock_irq(&dev
->power
.lock
);
192 retval
= dev
->bus
->pm
->runtime_suspend(dev
);
194 spin_lock_irq(&dev
->power
.lock
);
195 dev
->power
.runtime_error
= retval
;
201 dev
->power
.runtime_status
= RPM_ACTIVE
;
202 pm_runtime_cancel_pending(dev
);
203 dev
->power
.deferred_resume
= false;
205 if (retval
== -EAGAIN
|| retval
== -EBUSY
) {
207 dev
->power
.runtime_error
= 0;
210 dev
->power
.runtime_status
= RPM_SUSPENDED
;
213 parent
= dev
->parent
;
214 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
217 wake_up_all(&dev
->power
.wait_queue
);
219 if (dev
->power
.deferred_resume
) {
220 dev
->power
.deferred_resume
= false;
221 __pm_runtime_resume(dev
, false);
227 __pm_runtime_idle(dev
);
229 if (parent
&& !parent
->power
.ignore_children
) {
230 spin_unlock_irq(&dev
->power
.lock
);
232 pm_request_idle(parent
);
234 spin_lock_irq(&dev
->power
.lock
);
238 dev_dbg(dev
, "__pm_runtime_suspend() returns %d!\n", retval
);
244 * pm_runtime_suspend - Carry out run-time suspend of given device.
245 * @dev: Device to suspend.
247 int pm_runtime_suspend(struct device
*dev
)
251 spin_lock_irq(&dev
->power
.lock
);
252 retval
= __pm_runtime_suspend(dev
, false);
253 spin_unlock_irq(&dev
->power
.lock
);
257 EXPORT_SYMBOL_GPL(pm_runtime_suspend
);
260 * __pm_runtime_resume - Carry out run-time resume of given device.
261 * @dev: Device to resume.
262 * @from_wq: If set, the function has been called via pm_wq.
264 * Check if the device can be woken up and run the ->runtime_resume() callback
265 * provided by its bus type. If another resume has been started earlier, wait
266 * for it to finish. If there's a suspend running in parallel with this
267 * function, wait for it to finish and resume the device. Cancel any scheduled
268 * or pending requests.
270 * This function must be called under dev->power.lock with interrupts disabled.
272 int __pm_runtime_resume(struct device
*dev
, bool from_wq
)
273 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
275 struct device
*parent
= NULL
;
278 dev_dbg(dev
, "__pm_runtime_resume()%s!\n",
279 from_wq
? " from workqueue" : "");
282 if (dev
->power
.runtime_error
) {
287 pm_runtime_cancel_pending(dev
);
289 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
291 else if (dev
->power
.disable_depth
> 0)
296 if (dev
->power
.runtime_status
== RPM_RESUMING
297 || dev
->power
.runtime_status
== RPM_SUSPENDING
) {
301 if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
302 dev
->power
.deferred_resume
= true;
303 retval
= -EINPROGRESS
;
307 /* Wait for the operation carried out in parallel with us. */
309 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
310 TASK_UNINTERRUPTIBLE
);
311 if (dev
->power
.runtime_status
!= RPM_RESUMING
312 && dev
->power
.runtime_status
!= RPM_SUSPENDING
)
315 spin_unlock_irq(&dev
->power
.lock
);
319 spin_lock_irq(&dev
->power
.lock
);
321 finish_wait(&dev
->power
.wait_queue
, &wait
);
325 if (!parent
&& dev
->parent
) {
327 * Increment the parent's resume counter and resume it if
330 parent
= dev
->parent
;
331 spin_unlock_irq(&dev
->power
.lock
);
333 pm_runtime_get_noresume(parent
);
335 spin_lock_irq(&parent
->power
.lock
);
337 * We can resume if the parent's run-time PM is disabled or it
338 * is set to ignore children.
340 if (!parent
->power
.disable_depth
341 && !parent
->power
.ignore_children
) {
342 __pm_runtime_resume(parent
, false);
343 if (parent
->power
.runtime_status
!= RPM_ACTIVE
)
346 spin_unlock_irq(&parent
->power
.lock
);
348 spin_lock_irq(&dev
->power
.lock
);
354 dev
->power
.runtime_status
= RPM_RESUMING
;
356 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_resume
) {
357 spin_unlock_irq(&dev
->power
.lock
);
359 retval
= dev
->bus
->pm
->runtime_resume(dev
);
361 spin_lock_irq(&dev
->power
.lock
);
362 dev
->power
.runtime_error
= retval
;
368 dev
->power
.runtime_status
= RPM_SUSPENDED
;
369 pm_runtime_cancel_pending(dev
);
371 dev
->power
.runtime_status
= RPM_ACTIVE
;
373 atomic_inc(&parent
->power
.child_count
);
375 wake_up_all(&dev
->power
.wait_queue
);
378 __pm_request_idle(dev
);
382 spin_unlock_irq(&dev
->power
.lock
);
384 pm_runtime_put(parent
);
386 spin_lock_irq(&dev
->power
.lock
);
389 dev_dbg(dev
, "__pm_runtime_resume() returns %d!\n", retval
);
395 * pm_runtime_resume - Carry out run-time resume of given device.
396 * @dev: Device to suspend.
398 int pm_runtime_resume(struct device
*dev
)
402 spin_lock_irq(&dev
->power
.lock
);
403 retval
= __pm_runtime_resume(dev
, false);
404 spin_unlock_irq(&dev
->power
.lock
);
408 EXPORT_SYMBOL_GPL(pm_runtime_resume
);
411 * pm_runtime_work - Universal run-time PM work function.
412 * @work: Work structure used for scheduling the execution of this function.
414 * Use @work to get the device object the work is to be done for, determine what
415 * is to be done and execute the appropriate run-time PM function.
417 static void pm_runtime_work(struct work_struct
*work
)
419 struct device
*dev
= container_of(work
, struct device
, power
.work
);
420 enum rpm_request req
;
422 spin_lock_irq(&dev
->power
.lock
);
424 if (!dev
->power
.request_pending
)
427 req
= dev
->power
.request
;
428 dev
->power
.request
= RPM_REQ_NONE
;
429 dev
->power
.request_pending
= false;
435 __pm_runtime_idle(dev
);
437 case RPM_REQ_SUSPEND
:
438 __pm_runtime_suspend(dev
, true);
441 __pm_runtime_resume(dev
, true);
446 spin_unlock_irq(&dev
->power
.lock
);
450 * __pm_request_idle - Submit an idle notification request for given device.
451 * @dev: Device to handle.
453 * Check if the device's run-time PM status is correct for suspending the device
454 * and queue up a request to run __pm_runtime_idle() for it.
456 * This function must be called under dev->power.lock with interrupts disabled.
458 static int __pm_request_idle(struct device
*dev
)
462 if (dev
->power
.runtime_error
)
464 else if (atomic_read(&dev
->power
.usage_count
) > 0
465 || dev
->power
.disable_depth
> 0
466 || dev
->power
.runtime_status
== RPM_SUSPENDED
467 || dev
->power
.runtime_status
== RPM_SUSPENDING
)
469 else if (!pm_children_suspended(dev
))
474 if (dev
->power
.request_pending
) {
475 /* Any requests other then RPM_REQ_IDLE take precedence. */
476 if (dev
->power
.request
== RPM_REQ_NONE
)
477 dev
->power
.request
= RPM_REQ_IDLE
;
478 else if (dev
->power
.request
!= RPM_REQ_IDLE
)
483 dev
->power
.request
= RPM_REQ_IDLE
;
484 dev
->power
.request_pending
= true;
485 queue_work(pm_wq
, &dev
->power
.work
);
491 * pm_request_idle - Submit an idle notification request for given device.
492 * @dev: Device to handle.
494 int pm_request_idle(struct device
*dev
)
499 spin_lock_irqsave(&dev
->power
.lock
, flags
);
500 retval
= __pm_request_idle(dev
);
501 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
505 EXPORT_SYMBOL_GPL(pm_request_idle
);
508 * __pm_request_suspend - Submit a suspend request for given device.
509 * @dev: Device to suspend.
511 * This function must be called under dev->power.lock with interrupts disabled.
513 static int __pm_request_suspend(struct device
*dev
)
517 if (dev
->power
.runtime_error
)
520 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
522 else if (atomic_read(&dev
->power
.usage_count
) > 0
523 || dev
->power
.disable_depth
> 0)
525 else if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
526 retval
= -EINPROGRESS
;
527 else if (!pm_children_suspended(dev
))
532 pm_runtime_deactivate_timer(dev
);
534 if (dev
->power
.request_pending
) {
536 * Pending resume requests take precedence over us, but we can
537 * overtake any other pending request.
539 if (dev
->power
.request
== RPM_REQ_RESUME
)
541 else if (dev
->power
.request
!= RPM_REQ_SUSPEND
)
542 dev
->power
.request
= retval
?
543 RPM_REQ_NONE
: RPM_REQ_SUSPEND
;
549 dev
->power
.request
= RPM_REQ_SUSPEND
;
550 dev
->power
.request_pending
= true;
551 queue_work(pm_wq
, &dev
->power
.work
);
557 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
558 * @data: Device pointer passed by pm_schedule_suspend().
560 * Check if the time is right and execute __pm_request_suspend() in that case.
562 static void pm_suspend_timer_fn(unsigned long data
)
564 struct device
*dev
= (struct device
*)data
;
566 unsigned long expires
;
568 spin_lock_irqsave(&dev
->power
.lock
, flags
);
570 expires
= dev
->power
.timer_expires
;
571 /* If 'expire' is after 'jiffies' we've been called too early. */
572 if (expires
> 0 && !time_after(expires
, jiffies
)) {
573 dev
->power
.timer_expires
= 0;
574 __pm_request_suspend(dev
);
577 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
581 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
582 * @dev: Device to suspend.
583 * @delay: Time to wait before submitting a suspend request, in milliseconds.
585 int pm_schedule_suspend(struct device
*dev
, unsigned int delay
)
590 spin_lock_irqsave(&dev
->power
.lock
, flags
);
592 if (dev
->power
.runtime_error
) {
598 retval
= __pm_request_suspend(dev
);
602 pm_runtime_deactivate_timer(dev
);
604 if (dev
->power
.request_pending
) {
606 * Pending resume requests take precedence over us, but any
607 * other pending requests have to be canceled.
609 if (dev
->power
.request
== RPM_REQ_RESUME
) {
613 dev
->power
.request
= RPM_REQ_NONE
;
616 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
618 else if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
619 retval
= -EINPROGRESS
;
620 else if (atomic_read(&dev
->power
.usage_count
) > 0
621 || dev
->power
.disable_depth
> 0)
623 else if (!pm_children_suspended(dev
))
628 dev
->power
.timer_expires
= jiffies
+ msecs_to_jiffies(delay
);
629 mod_timer(&dev
->power
.suspend_timer
, dev
->power
.timer_expires
);
632 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
636 EXPORT_SYMBOL_GPL(pm_schedule_suspend
);
639 * pm_request_resume - Submit a resume request for given device.
640 * @dev: Device to resume.
642 * This function must be called under dev->power.lock with interrupts disabled.
644 static int __pm_request_resume(struct device
*dev
)
648 if (dev
->power
.runtime_error
)
651 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
653 else if (dev
->power
.runtime_status
== RPM_RESUMING
)
654 retval
= -EINPROGRESS
;
655 else if (dev
->power
.disable_depth
> 0)
660 pm_runtime_deactivate_timer(dev
);
662 if (dev
->power
.request_pending
) {
663 /* If non-resume request is pending, we can overtake it. */
664 dev
->power
.request
= retval
? RPM_REQ_NONE
: RPM_REQ_RESUME
;
670 dev
->power
.request
= RPM_REQ_RESUME
;
671 dev
->power
.request_pending
= true;
672 queue_work(pm_wq
, &dev
->power
.work
);
678 * pm_request_resume - Submit a resume request for given device.
679 * @dev: Device to resume.
681 int pm_request_resume(struct device
*dev
)
686 spin_lock_irqsave(&dev
->power
.lock
, flags
);
687 retval
= __pm_request_resume(dev
);
688 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
692 EXPORT_SYMBOL_GPL(pm_request_resume
);
695 * __pm_runtime_get - Reference count a device and wake it up, if necessary.
696 * @dev: Device to handle.
697 * @sync: If set and the device is suspended, resume it synchronously.
699 * Increment the usage count of the device and if it was zero previously,
700 * resume it or submit a resume request for it, depending on the value of @sync.
702 int __pm_runtime_get(struct device
*dev
, bool sync
)
706 if (atomic_add_return(1, &dev
->power
.usage_count
) == 1)
707 retval
= sync
? pm_runtime_resume(dev
) : pm_request_resume(dev
);
711 EXPORT_SYMBOL_GPL(__pm_runtime_get
);
714 * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
715 * @dev: Device to handle.
716 * @sync: If the device's bus type is to be notified, do that synchronously.
718 * Decrement the usage count of the device and if it reaches zero, carry out a
719 * synchronous idle notification or submit an idle notification request for it,
720 * depending on the value of @sync.
722 int __pm_runtime_put(struct device
*dev
, bool sync
)
726 if (atomic_dec_and_test(&dev
->power
.usage_count
))
727 retval
= sync
? pm_runtime_idle(dev
) : pm_request_idle(dev
);
731 EXPORT_SYMBOL_GPL(__pm_runtime_put
);
734 * __pm_runtime_set_status - Set run-time PM status of a device.
735 * @dev: Device to handle.
736 * @status: New run-time PM status of the device.
738 * If run-time PM of the device is disabled or its power.runtime_error field is
739 * different from zero, the status may be changed either to RPM_ACTIVE, or to
740 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
741 * However, if the device has a parent and the parent is not active, and the
742 * parent's power.ignore_children flag is unset, the device's status cannot be
743 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
745 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
746 * and the device parent's counter of unsuspended children is modified to
747 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
748 * notification request for the parent is submitted.
750 int __pm_runtime_set_status(struct device
*dev
, unsigned int status
)
752 struct device
*parent
= dev
->parent
;
754 bool notify_parent
= false;
757 if (status
!= RPM_ACTIVE
&& status
!= RPM_SUSPENDED
)
760 spin_lock_irqsave(&dev
->power
.lock
, flags
);
762 if (!dev
->power
.runtime_error
&& !dev
->power
.disable_depth
) {
767 if (dev
->power
.runtime_status
== status
)
770 if (status
== RPM_SUSPENDED
) {
771 /* It always is possible to set the status to 'suspended'. */
773 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
774 notify_parent
= !parent
->power
.ignore_children
;
780 spin_lock_irq(&parent
->power
.lock
);
783 * It is invalid to put an active child under a parent that is
784 * not active, has run-time PM enabled and the
785 * 'power.ignore_children' flag unset.
787 if (!parent
->power
.disable_depth
788 && !parent
->power
.ignore_children
789 && parent
->power
.runtime_status
!= RPM_ACTIVE
) {
792 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
793 atomic_inc(&parent
->power
.child_count
);
796 spin_unlock_irq(&parent
->power
.lock
);
803 dev
->power
.runtime_status
= status
;
804 dev
->power
.runtime_error
= 0;
806 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
809 pm_request_idle(parent
);
813 EXPORT_SYMBOL_GPL(__pm_runtime_set_status
);
816 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
817 * @dev: Device to handle.
819 * Flush all pending requests for the device from pm_wq and wait for all
820 * run-time PM operations involving the device in progress to complete.
822 * Should be called under dev->power.lock with interrupts disabled.
824 static void __pm_runtime_barrier(struct device
*dev
)
826 pm_runtime_deactivate_timer(dev
);
828 if (dev
->power
.request_pending
) {
829 dev
->power
.request
= RPM_REQ_NONE
;
830 spin_unlock_irq(&dev
->power
.lock
);
832 cancel_work_sync(&dev
->power
.work
);
834 spin_lock_irq(&dev
->power
.lock
);
835 dev
->power
.request_pending
= false;
838 if (dev
->power
.runtime_status
== RPM_SUSPENDING
839 || dev
->power
.runtime_status
== RPM_RESUMING
840 || dev
->power
.idle_notification
) {
843 /* Suspend, wake-up or idle notification in progress. */
845 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
846 TASK_UNINTERRUPTIBLE
);
847 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
848 && dev
->power
.runtime_status
!= RPM_RESUMING
849 && !dev
->power
.idle_notification
)
851 spin_unlock_irq(&dev
->power
.lock
);
855 spin_lock_irq(&dev
->power
.lock
);
857 finish_wait(&dev
->power
.wait_queue
, &wait
);
862 * pm_runtime_barrier - Flush pending requests and wait for completions.
863 * @dev: Device to handle.
865 * Prevent the device from being suspended by incrementing its usage counter and
866 * if there's a pending resume request for the device, wake the device up.
867 * Next, make sure that all pending requests for the device have been flushed
868 * from pm_wq and wait for all run-time PM operations involving the device in
869 * progress to complete.
872 * 1, if there was a resume request pending and the device had to be woken up,
875 int pm_runtime_barrier(struct device
*dev
)
879 pm_runtime_get_noresume(dev
);
880 spin_lock_irq(&dev
->power
.lock
);
882 if (dev
->power
.request_pending
883 && dev
->power
.request
== RPM_REQ_RESUME
) {
884 __pm_runtime_resume(dev
, false);
888 __pm_runtime_barrier(dev
);
890 spin_unlock_irq(&dev
->power
.lock
);
891 pm_runtime_put_noidle(dev
);
895 EXPORT_SYMBOL_GPL(pm_runtime_barrier
);
898 * __pm_runtime_disable - Disable run-time PM of a device.
899 * @dev: Device to handle.
900 * @check_resume: If set, check if there's a resume request for the device.
902 * Increment power.disable_depth for the device and if was zero previously,
903 * cancel all pending run-time PM requests for the device and wait for all
904 * operations in progress to complete. The device can be either active or
905 * suspended after its run-time PM has been disabled.
907 * If @check_resume is set and there's a resume request pending when
908 * __pm_runtime_disable() is called and power.disable_depth is zero, the
909 * function will wake up the device before disabling its run-time PM.
911 void __pm_runtime_disable(struct device
*dev
, bool check_resume
)
913 spin_lock_irq(&dev
->power
.lock
);
915 if (dev
->power
.disable_depth
> 0) {
916 dev
->power
.disable_depth
++;
921 * Wake up the device if there's a resume request pending, because that
922 * means there probably is some I/O to process and disabling run-time PM
923 * shouldn't prevent the device from processing the I/O.
925 if (check_resume
&& dev
->power
.request_pending
926 && dev
->power
.request
== RPM_REQ_RESUME
) {
928 * Prevent suspends and idle notifications from being carried
929 * out after we have woken up the device.
931 pm_runtime_get_noresume(dev
);
933 __pm_runtime_resume(dev
, false);
935 pm_runtime_put_noidle(dev
);
938 if (!dev
->power
.disable_depth
++)
939 __pm_runtime_barrier(dev
);
942 spin_unlock_irq(&dev
->power
.lock
);
944 EXPORT_SYMBOL_GPL(__pm_runtime_disable
);
947 * pm_runtime_enable - Enable run-time PM of a device.
948 * @dev: Device to handle.
950 void pm_runtime_enable(struct device
*dev
)
954 spin_lock_irqsave(&dev
->power
.lock
, flags
);
956 if (dev
->power
.disable_depth
> 0)
957 dev
->power
.disable_depth
--;
959 dev_warn(dev
, "Unbalanced %s!\n", __func__
);
961 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
963 EXPORT_SYMBOL_GPL(pm_runtime_enable
);
966 * pm_runtime_init - Initialize run-time PM fields in given device object.
967 * @dev: Device object to initialize.
969 void pm_runtime_init(struct device
*dev
)
971 spin_lock_init(&dev
->power
.lock
);
973 dev
->power
.runtime_status
= RPM_SUSPENDED
;
974 dev
->power
.idle_notification
= false;
976 dev
->power
.disable_depth
= 1;
977 atomic_set(&dev
->power
.usage_count
, 0);
979 dev
->power
.runtime_error
= 0;
981 atomic_set(&dev
->power
.child_count
, 0);
982 pm_suspend_ignore_children(dev
, false);
984 dev
->power
.request_pending
= false;
985 dev
->power
.request
= RPM_REQ_NONE
;
986 dev
->power
.deferred_resume
= false;
987 INIT_WORK(&dev
->power
.work
, pm_runtime_work
);
989 dev
->power
.timer_expires
= 0;
990 setup_timer(&dev
->power
.suspend_timer
, pm_suspend_timer_fn
,
993 init_waitqueue_head(&dev
->power
.wait_queue
);
997 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
998 * @dev: Device object being removed from device hierarchy.
1000 void pm_runtime_remove(struct device
*dev
)
1002 __pm_runtime_disable(dev
, false);
1004 /* Change the status back to 'suspended' to match the initial status. */
1005 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
1006 pm_runtime_set_suspended(dev
);