2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 * This file is released under the GPLv2.
9 #include <linux/sched.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/jiffies.h>
13 static int __pm_runtime_resume(struct device
*dev
, bool from_wq
);
14 static int __pm_request_idle(struct device
*dev
);
15 static int __pm_request_resume(struct device
*dev
);
18 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
19 * @dev: Device to handle.
21 static void pm_runtime_deactivate_timer(struct device
*dev
)
23 if (dev
->power
.timer_expires
> 0) {
24 del_timer(&dev
->power
.suspend_timer
);
25 dev
->power
.timer_expires
= 0;
30 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
31 * @dev: Device to handle.
33 static void pm_runtime_cancel_pending(struct device
*dev
)
35 pm_runtime_deactivate_timer(dev
);
37 * In case there's a request pending, make sure its work function will
38 * return without doing anything.
40 dev
->power
.request
= RPM_REQ_NONE
;
44 * __pm_runtime_idle - Notify device bus type if the device can be suspended.
45 * @dev: Device to notify the bus type about.
47 * This function must be called under dev->power.lock with interrupts disabled.
49 static int __pm_runtime_idle(struct device
*dev
)
50 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
54 dev_dbg(dev
, "__pm_runtime_idle()!\n");
56 if (dev
->power
.runtime_error
)
58 else if (dev
->power
.idle_notification
)
59 retval
= -EINPROGRESS
;
60 else if (atomic_read(&dev
->power
.usage_count
) > 0
61 || dev
->power
.disable_depth
> 0
62 || dev
->power
.runtime_status
!= RPM_ACTIVE
)
64 else if (!pm_children_suspended(dev
))
69 if (dev
->power
.request_pending
) {
71 * If an idle notification request is pending, cancel it. Any
72 * other pending request takes precedence over us.
74 if (dev
->power
.request
== RPM_REQ_IDLE
) {
75 dev
->power
.request
= RPM_REQ_NONE
;
76 } else if (dev
->power
.request
!= RPM_REQ_NONE
) {
82 dev
->power
.idle_notification
= true;
84 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_idle
) {
85 spin_unlock_irq(&dev
->power
.lock
);
87 dev
->bus
->pm
->runtime_idle(dev
);
89 spin_lock_irq(&dev
->power
.lock
);
92 dev
->power
.idle_notification
= false;
93 wake_up_all(&dev
->power
.wait_queue
);
96 dev_dbg(dev
, "__pm_runtime_idle() returns %d!\n", retval
);
102 * pm_runtime_idle - Notify device bus type if the device can be suspended.
103 * @dev: Device to notify the bus type about.
105 int pm_runtime_idle(struct device
*dev
)
109 spin_lock_irq(&dev
->power
.lock
);
110 retval
= __pm_runtime_idle(dev
);
111 spin_unlock_irq(&dev
->power
.lock
);
115 EXPORT_SYMBOL_GPL(pm_runtime_idle
);
118 * __pm_runtime_suspend - Carry out run-time suspend of given device.
119 * @dev: Device to suspend.
120 * @from_wq: If set, the function has been called via pm_wq.
122 * Check if the device can be suspended and run the ->runtime_suspend() callback
123 * provided by its bus type. If another suspend has been started earlier, wait
124 * for it to finish. If an idle notification or suspend request is pending or
125 * scheduled, cancel it.
127 * This function must be called under dev->power.lock with interrupts disabled.
129 int __pm_runtime_suspend(struct device
*dev
, bool from_wq
)
130 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
132 struct device
*parent
= NULL
;
136 dev_dbg(dev
, "__pm_runtime_suspend()%s!\n",
137 from_wq
? " from workqueue" : "");
140 if (dev
->power
.runtime_error
) {
145 /* Pending resume requests take precedence over us. */
146 if (dev
->power
.request_pending
147 && dev
->power
.request
== RPM_REQ_RESUME
) {
152 /* Other scheduled or pending requests need to be canceled. */
153 pm_runtime_cancel_pending(dev
);
155 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
157 else if (dev
->power
.runtime_status
== RPM_RESUMING
158 || dev
->power
.disable_depth
> 0
159 || atomic_read(&dev
->power
.usage_count
) > 0)
161 else if (!pm_children_suspended(dev
))
166 if (dev
->power
.runtime_status
== RPM_SUSPENDING
) {
170 retval
= -EINPROGRESS
;
174 /* Wait for the other suspend running in parallel with us. */
176 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
177 TASK_UNINTERRUPTIBLE
);
178 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
)
181 spin_unlock_irq(&dev
->power
.lock
);
185 spin_lock_irq(&dev
->power
.lock
);
187 finish_wait(&dev
->power
.wait_queue
, &wait
);
191 dev
->power
.runtime_status
= RPM_SUSPENDING
;
193 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_suspend
) {
194 spin_unlock_irq(&dev
->power
.lock
);
196 retval
= dev
->bus
->pm
->runtime_suspend(dev
);
198 spin_lock_irq(&dev
->power
.lock
);
199 dev
->power
.runtime_error
= retval
;
205 dev
->power
.runtime_status
= RPM_ACTIVE
;
206 pm_runtime_cancel_pending(dev
);
207 dev
->power
.deferred_resume
= false;
209 if (retval
== -EAGAIN
|| retval
== -EBUSY
) {
211 dev
->power
.runtime_error
= 0;
214 dev
->power
.runtime_status
= RPM_SUSPENDED
;
217 parent
= dev
->parent
;
218 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
221 wake_up_all(&dev
->power
.wait_queue
);
223 if (dev
->power
.deferred_resume
) {
224 dev
->power
.deferred_resume
= false;
225 __pm_runtime_resume(dev
, false);
231 __pm_runtime_idle(dev
);
233 if (parent
&& !parent
->power
.ignore_children
) {
234 spin_unlock_irq(&dev
->power
.lock
);
236 pm_request_idle(parent
);
238 spin_lock_irq(&dev
->power
.lock
);
242 dev_dbg(dev
, "__pm_runtime_suspend() returns %d!\n", retval
);
248 * pm_runtime_suspend - Carry out run-time suspend of given device.
249 * @dev: Device to suspend.
251 int pm_runtime_suspend(struct device
*dev
)
255 spin_lock_irq(&dev
->power
.lock
);
256 retval
= __pm_runtime_suspend(dev
, false);
257 spin_unlock_irq(&dev
->power
.lock
);
261 EXPORT_SYMBOL_GPL(pm_runtime_suspend
);
264 * __pm_runtime_resume - Carry out run-time resume of given device.
265 * @dev: Device to resume.
266 * @from_wq: If set, the function has been called via pm_wq.
268 * Check if the device can be woken up and run the ->runtime_resume() callback
269 * provided by its bus type. If another resume has been started earlier, wait
270 * for it to finish. If there's a suspend running in parallel with this
271 * function, wait for it to finish and resume the device. Cancel any scheduled
272 * or pending requests.
274 * This function must be called under dev->power.lock with interrupts disabled.
276 int __pm_runtime_resume(struct device
*dev
, bool from_wq
)
277 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
279 struct device
*parent
= NULL
;
282 dev_dbg(dev
, "__pm_runtime_resume()%s!\n",
283 from_wq
? " from workqueue" : "");
286 if (dev
->power
.runtime_error
) {
291 pm_runtime_cancel_pending(dev
);
293 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
295 else if (dev
->power
.disable_depth
> 0)
300 if (dev
->power
.runtime_status
== RPM_RESUMING
301 || dev
->power
.runtime_status
== RPM_SUSPENDING
) {
305 if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
306 dev
->power
.deferred_resume
= true;
307 retval
= -EINPROGRESS
;
311 /* Wait for the operation carried out in parallel with us. */
313 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
314 TASK_UNINTERRUPTIBLE
);
315 if (dev
->power
.runtime_status
!= RPM_RESUMING
316 && dev
->power
.runtime_status
!= RPM_SUSPENDING
)
319 spin_unlock_irq(&dev
->power
.lock
);
323 spin_lock_irq(&dev
->power
.lock
);
325 finish_wait(&dev
->power
.wait_queue
, &wait
);
329 if (!parent
&& dev
->parent
) {
331 * Increment the parent's resume counter and resume it if
334 parent
= dev
->parent
;
335 spin_unlock_irq(&dev
->power
.lock
);
337 pm_runtime_get_noresume(parent
);
339 spin_lock_irq(&parent
->power
.lock
);
341 * We can resume if the parent's run-time PM is disabled or it
342 * is set to ignore children.
344 if (!parent
->power
.disable_depth
345 && !parent
->power
.ignore_children
) {
346 __pm_runtime_resume(parent
, false);
347 if (parent
->power
.runtime_status
!= RPM_ACTIVE
)
350 spin_unlock_irq(&parent
->power
.lock
);
352 spin_lock_irq(&dev
->power
.lock
);
358 dev
->power
.runtime_status
= RPM_RESUMING
;
360 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_resume
) {
361 spin_unlock_irq(&dev
->power
.lock
);
363 retval
= dev
->bus
->pm
->runtime_resume(dev
);
365 spin_lock_irq(&dev
->power
.lock
);
366 dev
->power
.runtime_error
= retval
;
372 dev
->power
.runtime_status
= RPM_SUSPENDED
;
373 pm_runtime_cancel_pending(dev
);
375 dev
->power
.runtime_status
= RPM_ACTIVE
;
377 atomic_inc(&parent
->power
.child_count
);
379 wake_up_all(&dev
->power
.wait_queue
);
382 __pm_request_idle(dev
);
386 spin_unlock_irq(&dev
->power
.lock
);
388 pm_runtime_put(parent
);
390 spin_lock_irq(&dev
->power
.lock
);
393 dev_dbg(dev
, "__pm_runtime_resume() returns %d!\n", retval
);
399 * pm_runtime_resume - Carry out run-time resume of given device.
400 * @dev: Device to suspend.
402 int pm_runtime_resume(struct device
*dev
)
406 spin_lock_irq(&dev
->power
.lock
);
407 retval
= __pm_runtime_resume(dev
, false);
408 spin_unlock_irq(&dev
->power
.lock
);
412 EXPORT_SYMBOL_GPL(pm_runtime_resume
);
415 * pm_runtime_work - Universal run-time PM work function.
416 * @work: Work structure used for scheduling the execution of this function.
418 * Use @work to get the device object the work is to be done for, determine what
419 * is to be done and execute the appropriate run-time PM function.
421 static void pm_runtime_work(struct work_struct
*work
)
423 struct device
*dev
= container_of(work
, struct device
, power
.work
);
424 enum rpm_request req
;
426 spin_lock_irq(&dev
->power
.lock
);
428 if (!dev
->power
.request_pending
)
431 req
= dev
->power
.request
;
432 dev
->power
.request
= RPM_REQ_NONE
;
433 dev
->power
.request_pending
= false;
439 __pm_runtime_idle(dev
);
441 case RPM_REQ_SUSPEND
:
442 __pm_runtime_suspend(dev
, true);
445 __pm_runtime_resume(dev
, true);
450 spin_unlock_irq(&dev
->power
.lock
);
454 * __pm_request_idle - Submit an idle notification request for given device.
455 * @dev: Device to handle.
457 * Check if the device's run-time PM status is correct for suspending the device
458 * and queue up a request to run __pm_runtime_idle() for it.
460 * This function must be called under dev->power.lock with interrupts disabled.
462 static int __pm_request_idle(struct device
*dev
)
466 if (dev
->power
.runtime_error
)
468 else if (atomic_read(&dev
->power
.usage_count
) > 0
469 || dev
->power
.disable_depth
> 0
470 || dev
->power
.runtime_status
== RPM_SUSPENDED
471 || dev
->power
.runtime_status
== RPM_SUSPENDING
)
473 else if (!pm_children_suspended(dev
))
478 if (dev
->power
.request_pending
) {
479 /* Any requests other then RPM_REQ_IDLE take precedence. */
480 if (dev
->power
.request
== RPM_REQ_NONE
)
481 dev
->power
.request
= RPM_REQ_IDLE
;
482 else if (dev
->power
.request
!= RPM_REQ_IDLE
)
487 dev
->power
.request
= RPM_REQ_IDLE
;
488 dev
->power
.request_pending
= true;
489 queue_work(pm_wq
, &dev
->power
.work
);
495 * pm_request_idle - Submit an idle notification request for given device.
496 * @dev: Device to handle.
498 int pm_request_idle(struct device
*dev
)
503 spin_lock_irqsave(&dev
->power
.lock
, flags
);
504 retval
= __pm_request_idle(dev
);
505 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
509 EXPORT_SYMBOL_GPL(pm_request_idle
);
512 * __pm_request_suspend - Submit a suspend request for given device.
513 * @dev: Device to suspend.
515 * This function must be called under dev->power.lock with interrupts disabled.
517 static int __pm_request_suspend(struct device
*dev
)
521 if (dev
->power
.runtime_error
)
524 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
526 else if (atomic_read(&dev
->power
.usage_count
) > 0
527 || dev
->power
.disable_depth
> 0)
529 else if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
530 retval
= -EINPROGRESS
;
531 else if (!pm_children_suspended(dev
))
536 pm_runtime_deactivate_timer(dev
);
538 if (dev
->power
.request_pending
) {
540 * Pending resume requests take precedence over us, but we can
541 * overtake any other pending request.
543 if (dev
->power
.request
== RPM_REQ_RESUME
)
545 else if (dev
->power
.request
!= RPM_REQ_SUSPEND
)
546 dev
->power
.request
= retval
?
547 RPM_REQ_NONE
: RPM_REQ_SUSPEND
;
553 dev
->power
.request
= RPM_REQ_SUSPEND
;
554 dev
->power
.request_pending
= true;
555 queue_work(pm_wq
, &dev
->power
.work
);
561 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
562 * @data: Device pointer passed by pm_schedule_suspend().
564 * Check if the time is right and execute __pm_request_suspend() in that case.
566 static void pm_suspend_timer_fn(unsigned long data
)
568 struct device
*dev
= (struct device
*)data
;
570 unsigned long expires
;
572 spin_lock_irqsave(&dev
->power
.lock
, flags
);
574 expires
= dev
->power
.timer_expires
;
575 /* If 'expire' is after 'jiffies' we've been called too early. */
576 if (expires
> 0 && !time_after(expires
, jiffies
)) {
577 dev
->power
.timer_expires
= 0;
578 __pm_request_suspend(dev
);
581 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
585 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
586 * @dev: Device to suspend.
587 * @delay: Time to wait before submitting a suspend request, in milliseconds.
589 int pm_schedule_suspend(struct device
*dev
, unsigned int delay
)
594 spin_lock_irqsave(&dev
->power
.lock
, flags
);
596 if (dev
->power
.runtime_error
) {
602 retval
= __pm_request_suspend(dev
);
606 pm_runtime_deactivate_timer(dev
);
608 if (dev
->power
.request_pending
) {
610 * Pending resume requests take precedence over us, but any
611 * other pending requests have to be canceled.
613 if (dev
->power
.request
== RPM_REQ_RESUME
) {
617 dev
->power
.request
= RPM_REQ_NONE
;
620 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
622 else if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
623 retval
= -EINPROGRESS
;
624 else if (atomic_read(&dev
->power
.usage_count
) > 0
625 || dev
->power
.disable_depth
> 0)
627 else if (!pm_children_suspended(dev
))
632 dev
->power
.timer_expires
= jiffies
+ msecs_to_jiffies(delay
);
633 mod_timer(&dev
->power
.suspend_timer
, dev
->power
.timer_expires
);
636 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
640 EXPORT_SYMBOL_GPL(pm_schedule_suspend
);
643 * pm_request_resume - Submit a resume request for given device.
644 * @dev: Device to resume.
646 * This function must be called under dev->power.lock with interrupts disabled.
648 static int __pm_request_resume(struct device
*dev
)
652 if (dev
->power
.runtime_error
)
655 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
657 else if (dev
->power
.runtime_status
== RPM_RESUMING
)
658 retval
= -EINPROGRESS
;
659 else if (dev
->power
.disable_depth
> 0)
664 pm_runtime_deactivate_timer(dev
);
666 if (dev
->power
.request_pending
) {
667 /* If non-resume request is pending, we can overtake it. */
668 dev
->power
.request
= retval
? RPM_REQ_NONE
: RPM_REQ_RESUME
;
674 dev
->power
.request
= RPM_REQ_RESUME
;
675 dev
->power
.request_pending
= true;
676 queue_work(pm_wq
, &dev
->power
.work
);
682 * pm_request_resume - Submit a resume request for given device.
683 * @dev: Device to resume.
685 int pm_request_resume(struct device
*dev
)
690 spin_lock_irqsave(&dev
->power
.lock
, flags
);
691 retval
= __pm_request_resume(dev
);
692 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
696 EXPORT_SYMBOL_GPL(pm_request_resume
);
699 * __pm_runtime_get - Reference count a device and wake it up, if necessary.
700 * @dev: Device to handle.
701 * @sync: If set and the device is suspended, resume it synchronously.
703 * Increment the usage count of the device and if it was zero previously,
704 * resume it or submit a resume request for it, depending on the value of @sync.
706 int __pm_runtime_get(struct device
*dev
, bool sync
)
710 if (atomic_add_return(1, &dev
->power
.usage_count
) == 1)
711 retval
= sync
? pm_runtime_resume(dev
) : pm_request_resume(dev
);
715 EXPORT_SYMBOL_GPL(__pm_runtime_get
);
718 * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
719 * @dev: Device to handle.
720 * @sync: If the device's bus type is to be notified, do that synchronously.
722 * Decrement the usage count of the device and if it reaches zero, carry out a
723 * synchronous idle notification or submit an idle notification request for it,
724 * depending on the value of @sync.
726 int __pm_runtime_put(struct device
*dev
, bool sync
)
730 if (atomic_dec_and_test(&dev
->power
.usage_count
))
731 retval
= sync
? pm_runtime_idle(dev
) : pm_request_idle(dev
);
735 EXPORT_SYMBOL_GPL(__pm_runtime_put
);
738 * __pm_runtime_set_status - Set run-time PM status of a device.
739 * @dev: Device to handle.
740 * @status: New run-time PM status of the device.
742 * If run-time PM of the device is disabled or its power.runtime_error field is
743 * different from zero, the status may be changed either to RPM_ACTIVE, or to
744 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
745 * However, if the device has a parent and the parent is not active, and the
746 * parent's power.ignore_children flag is unset, the device's status cannot be
747 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
749 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
750 * and the device parent's counter of unsuspended children is modified to
751 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
752 * notification request for the parent is submitted.
754 int __pm_runtime_set_status(struct device
*dev
, unsigned int status
)
756 struct device
*parent
= dev
->parent
;
758 bool notify_parent
= false;
761 if (status
!= RPM_ACTIVE
&& status
!= RPM_SUSPENDED
)
764 spin_lock_irqsave(&dev
->power
.lock
, flags
);
766 if (!dev
->power
.runtime_error
&& !dev
->power
.disable_depth
) {
771 if (dev
->power
.runtime_status
== status
)
774 if (status
== RPM_SUSPENDED
) {
775 /* It always is possible to set the status to 'suspended'. */
777 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
778 notify_parent
= !parent
->power
.ignore_children
;
784 spin_lock_irq(&parent
->power
.lock
);
787 * It is invalid to put an active child under a parent that is
788 * not active, has run-time PM enabled and the
789 * 'power.ignore_children' flag unset.
791 if (!parent
->power
.disable_depth
792 && !parent
->power
.ignore_children
793 && parent
->power
.runtime_status
!= RPM_ACTIVE
) {
796 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
797 atomic_inc(&parent
->power
.child_count
);
800 spin_unlock_irq(&parent
->power
.lock
);
807 dev
->power
.runtime_status
= status
;
808 dev
->power
.runtime_error
= 0;
810 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
813 pm_request_idle(parent
);
817 EXPORT_SYMBOL_GPL(__pm_runtime_set_status
);
820 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
821 * @dev: Device to handle.
823 * Flush all pending requests for the device from pm_wq and wait for all
824 * run-time PM operations involving the device in progress to complete.
826 * Should be called under dev->power.lock with interrupts disabled.
828 static void __pm_runtime_barrier(struct device
*dev
)
830 pm_runtime_deactivate_timer(dev
);
832 if (dev
->power
.request_pending
) {
833 dev
->power
.request
= RPM_REQ_NONE
;
834 spin_unlock_irq(&dev
->power
.lock
);
836 cancel_work_sync(&dev
->power
.work
);
838 spin_lock_irq(&dev
->power
.lock
);
839 dev
->power
.request_pending
= false;
842 if (dev
->power
.runtime_status
== RPM_SUSPENDING
843 || dev
->power
.runtime_status
== RPM_RESUMING
844 || dev
->power
.idle_notification
) {
847 /* Suspend, wake-up or idle notification in progress. */
849 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
850 TASK_UNINTERRUPTIBLE
);
851 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
852 && dev
->power
.runtime_status
!= RPM_RESUMING
853 && !dev
->power
.idle_notification
)
855 spin_unlock_irq(&dev
->power
.lock
);
859 spin_lock_irq(&dev
->power
.lock
);
861 finish_wait(&dev
->power
.wait_queue
, &wait
);
866 * pm_runtime_barrier - Flush pending requests and wait for completions.
867 * @dev: Device to handle.
869 * Prevent the device from being suspended by incrementing its usage counter and
870 * if there's a pending resume request for the device, wake the device up.
871 * Next, make sure that all pending requests for the device have been flushed
872 * from pm_wq and wait for all run-time PM operations involving the device in
873 * progress to complete.
876 * 1, if there was a resume request pending and the device had to be woken up,
879 int pm_runtime_barrier(struct device
*dev
)
883 pm_runtime_get_noresume(dev
);
884 spin_lock_irq(&dev
->power
.lock
);
886 if (dev
->power
.request_pending
887 && dev
->power
.request
== RPM_REQ_RESUME
) {
888 __pm_runtime_resume(dev
, false);
892 __pm_runtime_barrier(dev
);
894 spin_unlock_irq(&dev
->power
.lock
);
895 pm_runtime_put_noidle(dev
);
899 EXPORT_SYMBOL_GPL(pm_runtime_barrier
);
902 * __pm_runtime_disable - Disable run-time PM of a device.
903 * @dev: Device to handle.
904 * @check_resume: If set, check if there's a resume request for the device.
906 * Increment power.disable_depth for the device and if was zero previously,
907 * cancel all pending run-time PM requests for the device and wait for all
908 * operations in progress to complete. The device can be either active or
909 * suspended after its run-time PM has been disabled.
911 * If @check_resume is set and there's a resume request pending when
912 * __pm_runtime_disable() is called and power.disable_depth is zero, the
913 * function will wake up the device before disabling its run-time PM.
915 void __pm_runtime_disable(struct device
*dev
, bool check_resume
)
917 spin_lock_irq(&dev
->power
.lock
);
919 if (dev
->power
.disable_depth
> 0) {
920 dev
->power
.disable_depth
++;
925 * Wake up the device if there's a resume request pending, because that
926 * means there probably is some I/O to process and disabling run-time PM
927 * shouldn't prevent the device from processing the I/O.
929 if (check_resume
&& dev
->power
.request_pending
930 && dev
->power
.request
== RPM_REQ_RESUME
) {
932 * Prevent suspends and idle notifications from being carried
933 * out after we have woken up the device.
935 pm_runtime_get_noresume(dev
);
937 __pm_runtime_resume(dev
, false);
939 pm_runtime_put_noidle(dev
);
942 if (!dev
->power
.disable_depth
++)
943 __pm_runtime_barrier(dev
);
946 spin_unlock_irq(&dev
->power
.lock
);
948 EXPORT_SYMBOL_GPL(__pm_runtime_disable
);
951 * pm_runtime_enable - Enable run-time PM of a device.
952 * @dev: Device to handle.
954 void pm_runtime_enable(struct device
*dev
)
958 spin_lock_irqsave(&dev
->power
.lock
, flags
);
960 if (dev
->power
.disable_depth
> 0)
961 dev
->power
.disable_depth
--;
963 dev_warn(dev
, "Unbalanced %s!\n", __func__
);
965 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
967 EXPORT_SYMBOL_GPL(pm_runtime_enable
);
970 * pm_runtime_init - Initialize run-time PM fields in given device object.
971 * @dev: Device object to initialize.
973 void pm_runtime_init(struct device
*dev
)
975 spin_lock_init(&dev
->power
.lock
);
977 dev
->power
.runtime_status
= RPM_SUSPENDED
;
978 dev
->power
.idle_notification
= false;
980 dev
->power
.disable_depth
= 1;
981 atomic_set(&dev
->power
.usage_count
, 0);
983 dev
->power
.runtime_error
= 0;
985 atomic_set(&dev
->power
.child_count
, 0);
986 pm_suspend_ignore_children(dev
, false);
988 dev
->power
.request_pending
= false;
989 dev
->power
.request
= RPM_REQ_NONE
;
990 dev
->power
.deferred_resume
= false;
991 INIT_WORK(&dev
->power
.work
, pm_runtime_work
);
993 dev
->power
.timer_expires
= 0;
994 setup_timer(&dev
->power
.suspend_timer
, pm_suspend_timer_fn
,
997 init_waitqueue_head(&dev
->power
.wait_queue
);
1001 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1002 * @dev: Device object being removed from device hierarchy.
1004 void pm_runtime_remove(struct device
*dev
)
1006 __pm_runtime_disable(dev
, false);
1008 /* Change the status back to 'suspended' to match the initial status. */
1009 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
1010 pm_runtime_set_suspended(dev
);