2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 * This file is released under the GPLv2.
9 #include <linux/sched.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/jiffies.h>
13 static int __pm_runtime_resume(struct device
*dev
, bool from_wq
);
14 static int __pm_request_idle(struct device
*dev
);
15 static int __pm_request_resume(struct device
*dev
);
18 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
19 * @dev: Device to handle.
21 static void pm_runtime_deactivate_timer(struct device
*dev
)
23 if (dev
->power
.timer_expires
> 0) {
24 del_timer(&dev
->power
.suspend_timer
);
25 dev
->power
.timer_expires
= 0;
30 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
31 * @dev: Device to handle.
33 static void pm_runtime_cancel_pending(struct device
*dev
)
35 pm_runtime_deactivate_timer(dev
);
37 * In case there's a request pending, make sure its work function will
38 * return without doing anything.
40 dev
->power
.request
= RPM_REQ_NONE
;
44 * __pm_runtime_idle - Notify device bus type if the device can be suspended.
45 * @dev: Device to notify the bus type about.
47 * This function must be called under dev->power.lock with interrupts disabled.
49 static int __pm_runtime_idle(struct device
*dev
)
50 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
54 if (dev
->power
.runtime_error
)
56 else if (dev
->power
.idle_notification
)
57 retval
= -EINPROGRESS
;
58 else if (atomic_read(&dev
->power
.usage_count
) > 0
59 || dev
->power
.disable_depth
> 0
60 || dev
->power
.runtime_status
!= RPM_ACTIVE
)
62 else if (!pm_children_suspended(dev
))
67 if (dev
->power
.request_pending
) {
69 * If an idle notification request is pending, cancel it. Any
70 * other pending request takes precedence over us.
72 if (dev
->power
.request
== RPM_REQ_IDLE
) {
73 dev
->power
.request
= RPM_REQ_NONE
;
74 } else if (dev
->power
.request
!= RPM_REQ_NONE
) {
80 dev
->power
.idle_notification
= true;
82 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_idle
) {
83 spin_unlock_irq(&dev
->power
.lock
);
85 dev
->bus
->pm
->runtime_idle(dev
);
87 spin_lock_irq(&dev
->power
.lock
);
88 } else if (dev
->type
&& dev
->type
->pm
&& dev
->type
->pm
->runtime_idle
) {
89 spin_unlock_irq(&dev
->power
.lock
);
91 dev
->type
->pm
->runtime_idle(dev
);
93 spin_lock_irq(&dev
->power
.lock
);
94 } else if (dev
->class && dev
->class->pm
95 && dev
->class->pm
->runtime_idle
) {
96 spin_unlock_irq(&dev
->power
.lock
);
98 dev
->class->pm
->runtime_idle(dev
);
100 spin_lock_irq(&dev
->power
.lock
);
103 dev
->power
.idle_notification
= false;
104 wake_up_all(&dev
->power
.wait_queue
);
111 * pm_runtime_idle - Notify device bus type if the device can be suspended.
112 * @dev: Device to notify the bus type about.
114 int pm_runtime_idle(struct device
*dev
)
118 spin_lock_irq(&dev
->power
.lock
);
119 retval
= __pm_runtime_idle(dev
);
120 spin_unlock_irq(&dev
->power
.lock
);
124 EXPORT_SYMBOL_GPL(pm_runtime_idle
);
127 * __pm_runtime_suspend - Carry out run-time suspend of given device.
128 * @dev: Device to suspend.
129 * @from_wq: If set, the function has been called via pm_wq.
131 * Check if the device can be suspended and run the ->runtime_suspend() callback
132 * provided by its bus type. If another suspend has been started earlier, wait
133 * for it to finish. If an idle notification or suspend request is pending or
134 * scheduled, cancel it.
136 * This function must be called under dev->power.lock with interrupts disabled.
138 int __pm_runtime_suspend(struct device
*dev
, bool from_wq
)
139 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
141 struct device
*parent
= NULL
;
145 dev_dbg(dev
, "__pm_runtime_suspend()%s!\n",
146 from_wq
? " from workqueue" : "");
149 if (dev
->power
.runtime_error
) {
154 /* Pending resume requests take precedence over us. */
155 if (dev
->power
.request_pending
156 && dev
->power
.request
== RPM_REQ_RESUME
) {
161 /* Other scheduled or pending requests need to be canceled. */
162 pm_runtime_cancel_pending(dev
);
164 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
166 else if (dev
->power
.runtime_status
== RPM_RESUMING
167 || dev
->power
.disable_depth
> 0
168 || atomic_read(&dev
->power
.usage_count
) > 0)
170 else if (!pm_children_suspended(dev
))
175 if (dev
->power
.runtime_status
== RPM_SUSPENDING
) {
179 retval
= -EINPROGRESS
;
183 /* Wait for the other suspend running in parallel with us. */
185 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
186 TASK_UNINTERRUPTIBLE
);
187 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
)
190 spin_unlock_irq(&dev
->power
.lock
);
194 spin_lock_irq(&dev
->power
.lock
);
196 finish_wait(&dev
->power
.wait_queue
, &wait
);
200 dev
->power
.runtime_status
= RPM_SUSPENDING
;
201 dev
->power
.deferred_resume
= false;
203 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_suspend
) {
204 spin_unlock_irq(&dev
->power
.lock
);
206 retval
= dev
->bus
->pm
->runtime_suspend(dev
);
208 spin_lock_irq(&dev
->power
.lock
);
209 dev
->power
.runtime_error
= retval
;
210 } else if (dev
->type
&& dev
->type
->pm
211 && dev
->type
->pm
->runtime_suspend
) {
212 spin_unlock_irq(&dev
->power
.lock
);
214 retval
= dev
->type
->pm
->runtime_suspend(dev
);
216 spin_lock_irq(&dev
->power
.lock
);
217 dev
->power
.runtime_error
= retval
;
218 } else if (dev
->class && dev
->class->pm
219 && dev
->class->pm
->runtime_suspend
) {
220 spin_unlock_irq(&dev
->power
.lock
);
222 retval
= dev
->class->pm
->runtime_suspend(dev
);
224 spin_lock_irq(&dev
->power
.lock
);
225 dev
->power
.runtime_error
= retval
;
231 dev
->power
.runtime_status
= RPM_ACTIVE
;
232 pm_runtime_cancel_pending(dev
);
234 if (retval
== -EAGAIN
|| retval
== -EBUSY
) {
236 dev
->power
.runtime_error
= 0;
239 dev
->power
.runtime_status
= RPM_SUSPENDED
;
242 parent
= dev
->parent
;
243 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
246 wake_up_all(&dev
->power
.wait_queue
);
248 if (dev
->power
.deferred_resume
) {
249 __pm_runtime_resume(dev
, false);
255 __pm_runtime_idle(dev
);
257 if (parent
&& !parent
->power
.ignore_children
) {
258 spin_unlock_irq(&dev
->power
.lock
);
260 pm_request_idle(parent
);
262 spin_lock_irq(&dev
->power
.lock
);
266 dev_dbg(dev
, "__pm_runtime_suspend() returns %d!\n", retval
);
272 * pm_runtime_suspend - Carry out run-time suspend of given device.
273 * @dev: Device to suspend.
275 int pm_runtime_suspend(struct device
*dev
)
279 spin_lock_irq(&dev
->power
.lock
);
280 retval
= __pm_runtime_suspend(dev
, false);
281 spin_unlock_irq(&dev
->power
.lock
);
285 EXPORT_SYMBOL_GPL(pm_runtime_suspend
);
288 * __pm_runtime_resume - Carry out run-time resume of given device.
289 * @dev: Device to resume.
290 * @from_wq: If set, the function has been called via pm_wq.
292 * Check if the device can be woken up and run the ->runtime_resume() callback
293 * provided by its bus type. If another resume has been started earlier, wait
294 * for it to finish. If there's a suspend running in parallel with this
295 * function, wait for it to finish and resume the device. Cancel any scheduled
296 * or pending requests.
298 * This function must be called under dev->power.lock with interrupts disabled.
300 int __pm_runtime_resume(struct device
*dev
, bool from_wq
)
301 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
303 struct device
*parent
= NULL
;
306 dev_dbg(dev
, "__pm_runtime_resume()%s!\n",
307 from_wq
? " from workqueue" : "");
310 if (dev
->power
.runtime_error
) {
315 pm_runtime_cancel_pending(dev
);
317 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
319 else if (dev
->power
.disable_depth
> 0)
324 if (dev
->power
.runtime_status
== RPM_RESUMING
325 || dev
->power
.runtime_status
== RPM_SUSPENDING
) {
329 if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
330 dev
->power
.deferred_resume
= true;
331 retval
= -EINPROGRESS
;
335 /* Wait for the operation carried out in parallel with us. */
337 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
338 TASK_UNINTERRUPTIBLE
);
339 if (dev
->power
.runtime_status
!= RPM_RESUMING
340 && dev
->power
.runtime_status
!= RPM_SUSPENDING
)
343 spin_unlock_irq(&dev
->power
.lock
);
347 spin_lock_irq(&dev
->power
.lock
);
349 finish_wait(&dev
->power
.wait_queue
, &wait
);
353 if (!parent
&& dev
->parent
) {
355 * Increment the parent's resume counter and resume it if
358 parent
= dev
->parent
;
359 spin_unlock(&dev
->power
.lock
);
361 pm_runtime_get_noresume(parent
);
363 spin_lock(&parent
->power
.lock
);
365 * We can resume if the parent's run-time PM is disabled or it
366 * is set to ignore children.
368 if (!parent
->power
.disable_depth
369 && !parent
->power
.ignore_children
) {
370 __pm_runtime_resume(parent
, false);
371 if (parent
->power
.runtime_status
!= RPM_ACTIVE
)
374 spin_unlock(&parent
->power
.lock
);
376 spin_lock(&dev
->power
.lock
);
382 dev
->power
.runtime_status
= RPM_RESUMING
;
384 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_resume
) {
385 spin_unlock_irq(&dev
->power
.lock
);
387 retval
= dev
->bus
->pm
->runtime_resume(dev
);
389 spin_lock_irq(&dev
->power
.lock
);
390 dev
->power
.runtime_error
= retval
;
391 } else if (dev
->type
&& dev
->type
->pm
392 && dev
->type
->pm
->runtime_resume
) {
393 spin_unlock_irq(&dev
->power
.lock
);
395 retval
= dev
->type
->pm
->runtime_resume(dev
);
397 spin_lock_irq(&dev
->power
.lock
);
398 dev
->power
.runtime_error
= retval
;
399 } else if (dev
->class && dev
->class->pm
400 && dev
->class->pm
->runtime_resume
) {
401 spin_unlock_irq(&dev
->power
.lock
);
403 retval
= dev
->class->pm
->runtime_resume(dev
);
405 spin_lock_irq(&dev
->power
.lock
);
406 dev
->power
.runtime_error
= retval
;
412 dev
->power
.runtime_status
= RPM_SUSPENDED
;
413 pm_runtime_cancel_pending(dev
);
415 dev
->power
.runtime_status
= RPM_ACTIVE
;
417 atomic_inc(&parent
->power
.child_count
);
419 wake_up_all(&dev
->power
.wait_queue
);
422 __pm_request_idle(dev
);
426 spin_unlock_irq(&dev
->power
.lock
);
428 pm_runtime_put(parent
);
430 spin_lock_irq(&dev
->power
.lock
);
433 dev_dbg(dev
, "__pm_runtime_resume() returns %d!\n", retval
);
439 * pm_runtime_resume - Carry out run-time resume of given device.
440 * @dev: Device to suspend.
442 int pm_runtime_resume(struct device
*dev
)
446 spin_lock_irq(&dev
->power
.lock
);
447 retval
= __pm_runtime_resume(dev
, false);
448 spin_unlock_irq(&dev
->power
.lock
);
452 EXPORT_SYMBOL_GPL(pm_runtime_resume
);
455 * pm_runtime_work - Universal run-time PM work function.
456 * @work: Work structure used for scheduling the execution of this function.
458 * Use @work to get the device object the work is to be done for, determine what
459 * is to be done and execute the appropriate run-time PM function.
461 static void pm_runtime_work(struct work_struct
*work
)
463 struct device
*dev
= container_of(work
, struct device
, power
.work
);
464 enum rpm_request req
;
466 spin_lock_irq(&dev
->power
.lock
);
468 if (!dev
->power
.request_pending
)
471 req
= dev
->power
.request
;
472 dev
->power
.request
= RPM_REQ_NONE
;
473 dev
->power
.request_pending
= false;
479 __pm_runtime_idle(dev
);
481 case RPM_REQ_SUSPEND
:
482 __pm_runtime_suspend(dev
, true);
485 __pm_runtime_resume(dev
, true);
490 spin_unlock_irq(&dev
->power
.lock
);
494 * __pm_request_idle - Submit an idle notification request for given device.
495 * @dev: Device to handle.
497 * Check if the device's run-time PM status is correct for suspending the device
498 * and queue up a request to run __pm_runtime_idle() for it.
500 * This function must be called under dev->power.lock with interrupts disabled.
502 static int __pm_request_idle(struct device
*dev
)
506 if (dev
->power
.runtime_error
)
508 else if (atomic_read(&dev
->power
.usage_count
) > 0
509 || dev
->power
.disable_depth
> 0
510 || dev
->power
.runtime_status
== RPM_SUSPENDED
511 || dev
->power
.runtime_status
== RPM_SUSPENDING
)
513 else if (!pm_children_suspended(dev
))
518 if (dev
->power
.request_pending
) {
519 /* Any requests other then RPM_REQ_IDLE take precedence. */
520 if (dev
->power
.request
== RPM_REQ_NONE
)
521 dev
->power
.request
= RPM_REQ_IDLE
;
522 else if (dev
->power
.request
!= RPM_REQ_IDLE
)
527 dev
->power
.request
= RPM_REQ_IDLE
;
528 dev
->power
.request_pending
= true;
529 queue_work(pm_wq
, &dev
->power
.work
);
535 * pm_request_idle - Submit an idle notification request for given device.
536 * @dev: Device to handle.
538 int pm_request_idle(struct device
*dev
)
543 spin_lock_irqsave(&dev
->power
.lock
, flags
);
544 retval
= __pm_request_idle(dev
);
545 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
549 EXPORT_SYMBOL_GPL(pm_request_idle
);
552 * __pm_request_suspend - Submit a suspend request for given device.
553 * @dev: Device to suspend.
555 * This function must be called under dev->power.lock with interrupts disabled.
557 static int __pm_request_suspend(struct device
*dev
)
561 if (dev
->power
.runtime_error
)
564 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
566 else if (atomic_read(&dev
->power
.usage_count
) > 0
567 || dev
->power
.disable_depth
> 0)
569 else if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
570 retval
= -EINPROGRESS
;
571 else if (!pm_children_suspended(dev
))
576 pm_runtime_deactivate_timer(dev
);
578 if (dev
->power
.request_pending
) {
580 * Pending resume requests take precedence over us, but we can
581 * overtake any other pending request.
583 if (dev
->power
.request
== RPM_REQ_RESUME
)
585 else if (dev
->power
.request
!= RPM_REQ_SUSPEND
)
586 dev
->power
.request
= retval
?
587 RPM_REQ_NONE
: RPM_REQ_SUSPEND
;
593 dev
->power
.request
= RPM_REQ_SUSPEND
;
594 dev
->power
.request_pending
= true;
595 queue_work(pm_wq
, &dev
->power
.work
);
601 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
602 * @data: Device pointer passed by pm_schedule_suspend().
604 * Check if the time is right and execute __pm_request_suspend() in that case.
606 static void pm_suspend_timer_fn(unsigned long data
)
608 struct device
*dev
= (struct device
*)data
;
610 unsigned long expires
;
612 spin_lock_irqsave(&dev
->power
.lock
, flags
);
614 expires
= dev
->power
.timer_expires
;
615 /* If 'expire' is after 'jiffies' we've been called too early. */
616 if (expires
> 0 && !time_after(expires
, jiffies
)) {
617 dev
->power
.timer_expires
= 0;
618 __pm_request_suspend(dev
);
621 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
625 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
626 * @dev: Device to suspend.
627 * @delay: Time to wait before submitting a suspend request, in milliseconds.
629 int pm_schedule_suspend(struct device
*dev
, unsigned int delay
)
634 spin_lock_irqsave(&dev
->power
.lock
, flags
);
636 if (dev
->power
.runtime_error
) {
642 retval
= __pm_request_suspend(dev
);
646 pm_runtime_deactivate_timer(dev
);
648 if (dev
->power
.request_pending
) {
650 * Pending resume requests take precedence over us, but any
651 * other pending requests have to be canceled.
653 if (dev
->power
.request
== RPM_REQ_RESUME
) {
657 dev
->power
.request
= RPM_REQ_NONE
;
660 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
662 else if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
663 retval
= -EINPROGRESS
;
664 else if (atomic_read(&dev
->power
.usage_count
) > 0
665 || dev
->power
.disable_depth
> 0)
667 else if (!pm_children_suspended(dev
))
672 dev
->power
.timer_expires
= jiffies
+ msecs_to_jiffies(delay
);
673 if (!dev
->power
.timer_expires
)
674 dev
->power
.timer_expires
= 1;
675 mod_timer(&dev
->power
.suspend_timer
, dev
->power
.timer_expires
);
678 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
682 EXPORT_SYMBOL_GPL(pm_schedule_suspend
);
685 * pm_request_resume - Submit a resume request for given device.
686 * @dev: Device to resume.
688 * This function must be called under dev->power.lock with interrupts disabled.
690 static int __pm_request_resume(struct device
*dev
)
694 if (dev
->power
.runtime_error
)
697 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
699 else if (dev
->power
.runtime_status
== RPM_RESUMING
)
700 retval
= -EINPROGRESS
;
701 else if (dev
->power
.disable_depth
> 0)
706 pm_runtime_deactivate_timer(dev
);
708 if (dev
->power
.runtime_status
== RPM_SUSPENDING
) {
709 dev
->power
.deferred_resume
= true;
712 if (dev
->power
.request_pending
) {
713 /* If non-resume request is pending, we can overtake it. */
714 dev
->power
.request
= retval
? RPM_REQ_NONE
: RPM_REQ_RESUME
;
720 dev
->power
.request
= RPM_REQ_RESUME
;
721 dev
->power
.request_pending
= true;
722 queue_work(pm_wq
, &dev
->power
.work
);
728 * pm_request_resume - Submit a resume request for given device.
729 * @dev: Device to resume.
731 int pm_request_resume(struct device
*dev
)
736 spin_lock_irqsave(&dev
->power
.lock
, flags
);
737 retval
= __pm_request_resume(dev
);
738 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
742 EXPORT_SYMBOL_GPL(pm_request_resume
);
745 * __pm_runtime_get - Reference count a device and wake it up, if necessary.
746 * @dev: Device to handle.
747 * @sync: If set and the device is suspended, resume it synchronously.
749 * Increment the usage count of the device and resume it or submit a resume
750 * request for it, depending on the value of @sync.
752 int __pm_runtime_get(struct device
*dev
, bool sync
)
756 atomic_inc(&dev
->power
.usage_count
);
757 retval
= sync
? pm_runtime_resume(dev
) : pm_request_resume(dev
);
761 EXPORT_SYMBOL_GPL(__pm_runtime_get
);
764 * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
765 * @dev: Device to handle.
766 * @sync: If the device's bus type is to be notified, do that synchronously.
768 * Decrement the usage count of the device and if it reaches zero, carry out a
769 * synchronous idle notification or submit an idle notification request for it,
770 * depending on the value of @sync.
772 int __pm_runtime_put(struct device
*dev
, bool sync
)
776 if (atomic_dec_and_test(&dev
->power
.usage_count
))
777 retval
= sync
? pm_runtime_idle(dev
) : pm_request_idle(dev
);
781 EXPORT_SYMBOL_GPL(__pm_runtime_put
);
784 * __pm_runtime_set_status - Set run-time PM status of a device.
785 * @dev: Device to handle.
786 * @status: New run-time PM status of the device.
788 * If run-time PM of the device is disabled or its power.runtime_error field is
789 * different from zero, the status may be changed either to RPM_ACTIVE, or to
790 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
791 * However, if the device has a parent and the parent is not active, and the
792 * parent's power.ignore_children flag is unset, the device's status cannot be
793 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
795 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
796 * and the device parent's counter of unsuspended children is modified to
797 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
798 * notification request for the parent is submitted.
800 int __pm_runtime_set_status(struct device
*dev
, unsigned int status
)
802 struct device
*parent
= dev
->parent
;
804 bool notify_parent
= false;
807 if (status
!= RPM_ACTIVE
&& status
!= RPM_SUSPENDED
)
810 spin_lock_irqsave(&dev
->power
.lock
, flags
);
812 if (!dev
->power
.runtime_error
&& !dev
->power
.disable_depth
) {
817 if (dev
->power
.runtime_status
== status
)
820 if (status
== RPM_SUSPENDED
) {
821 /* It always is possible to set the status to 'suspended'. */
823 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
824 notify_parent
= !parent
->power
.ignore_children
;
830 spin_lock_nested(&parent
->power
.lock
, SINGLE_DEPTH_NESTING
);
833 * It is invalid to put an active child under a parent that is
834 * not active, has run-time PM enabled and the
835 * 'power.ignore_children' flag unset.
837 if (!parent
->power
.disable_depth
838 && !parent
->power
.ignore_children
839 && parent
->power
.runtime_status
!= RPM_ACTIVE
)
841 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
842 atomic_inc(&parent
->power
.child_count
);
844 spin_unlock(&parent
->power
.lock
);
851 dev
->power
.runtime_status
= status
;
852 dev
->power
.runtime_error
= 0;
854 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
857 pm_request_idle(parent
);
861 EXPORT_SYMBOL_GPL(__pm_runtime_set_status
);
864 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
865 * @dev: Device to handle.
867 * Flush all pending requests for the device from pm_wq and wait for all
868 * run-time PM operations involving the device in progress to complete.
870 * Should be called under dev->power.lock with interrupts disabled.
872 static void __pm_runtime_barrier(struct device
*dev
)
874 pm_runtime_deactivate_timer(dev
);
876 if (dev
->power
.request_pending
) {
877 dev
->power
.request
= RPM_REQ_NONE
;
878 spin_unlock_irq(&dev
->power
.lock
);
880 cancel_work_sync(&dev
->power
.work
);
882 spin_lock_irq(&dev
->power
.lock
);
883 dev
->power
.request_pending
= false;
886 if (dev
->power
.runtime_status
== RPM_SUSPENDING
887 || dev
->power
.runtime_status
== RPM_RESUMING
888 || dev
->power
.idle_notification
) {
891 /* Suspend, wake-up or idle notification in progress. */
893 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
894 TASK_UNINTERRUPTIBLE
);
895 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
896 && dev
->power
.runtime_status
!= RPM_RESUMING
897 && !dev
->power
.idle_notification
)
899 spin_unlock_irq(&dev
->power
.lock
);
903 spin_lock_irq(&dev
->power
.lock
);
905 finish_wait(&dev
->power
.wait_queue
, &wait
);
910 * pm_runtime_barrier - Flush pending requests and wait for completions.
911 * @dev: Device to handle.
913 * Prevent the device from being suspended by incrementing its usage counter and
914 * if there's a pending resume request for the device, wake the device up.
915 * Next, make sure that all pending requests for the device have been flushed
916 * from pm_wq and wait for all run-time PM operations involving the device in
917 * progress to complete.
920 * 1, if there was a resume request pending and the device had to be woken up,
923 int pm_runtime_barrier(struct device
*dev
)
927 pm_runtime_get_noresume(dev
);
928 spin_lock_irq(&dev
->power
.lock
);
930 if (dev
->power
.request_pending
931 && dev
->power
.request
== RPM_REQ_RESUME
) {
932 __pm_runtime_resume(dev
, false);
936 __pm_runtime_barrier(dev
);
938 spin_unlock_irq(&dev
->power
.lock
);
939 pm_runtime_put_noidle(dev
);
943 EXPORT_SYMBOL_GPL(pm_runtime_barrier
);
946 * __pm_runtime_disable - Disable run-time PM of a device.
947 * @dev: Device to handle.
948 * @check_resume: If set, check if there's a resume request for the device.
950 * Increment power.disable_depth for the device and if was zero previously,
951 * cancel all pending run-time PM requests for the device and wait for all
952 * operations in progress to complete. The device can be either active or
953 * suspended after its run-time PM has been disabled.
955 * If @check_resume is set and there's a resume request pending when
956 * __pm_runtime_disable() is called and power.disable_depth is zero, the
957 * function will wake up the device before disabling its run-time PM.
959 void __pm_runtime_disable(struct device
*dev
, bool check_resume
)
961 spin_lock_irq(&dev
->power
.lock
);
963 if (dev
->power
.disable_depth
> 0) {
964 dev
->power
.disable_depth
++;
969 * Wake up the device if there's a resume request pending, because that
970 * means there probably is some I/O to process and disabling run-time PM
971 * shouldn't prevent the device from processing the I/O.
973 if (check_resume
&& dev
->power
.request_pending
974 && dev
->power
.request
== RPM_REQ_RESUME
) {
976 * Prevent suspends and idle notifications from being carried
977 * out after we have woken up the device.
979 pm_runtime_get_noresume(dev
);
981 __pm_runtime_resume(dev
, false);
983 pm_runtime_put_noidle(dev
);
986 if (!dev
->power
.disable_depth
++)
987 __pm_runtime_barrier(dev
);
990 spin_unlock_irq(&dev
->power
.lock
);
992 EXPORT_SYMBOL_GPL(__pm_runtime_disable
);
995 * pm_runtime_enable - Enable run-time PM of a device.
996 * @dev: Device to handle.
998 void pm_runtime_enable(struct device
*dev
)
1000 unsigned long flags
;
1002 spin_lock_irqsave(&dev
->power
.lock
, flags
);
1004 if (dev
->power
.disable_depth
> 0)
1005 dev
->power
.disable_depth
--;
1007 dev_warn(dev
, "Unbalanced %s!\n", __func__
);
1009 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
1011 EXPORT_SYMBOL_GPL(pm_runtime_enable
);
1014 * pm_runtime_forbid - Block run-time PM of a device.
1015 * @dev: Device to handle.
1017 * Increase the device's usage count and clear its power.runtime_auto flag,
1018 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1021 void pm_runtime_forbid(struct device
*dev
)
1023 spin_lock_irq(&dev
->power
.lock
);
1024 if (!dev
->power
.runtime_auto
)
1027 dev
->power
.runtime_auto
= false;
1028 atomic_inc(&dev
->power
.usage_count
);
1029 __pm_runtime_resume(dev
, false);
1032 spin_unlock_irq(&dev
->power
.lock
);
1034 EXPORT_SYMBOL_GPL(pm_runtime_forbid
);
1037 * pm_runtime_allow - Unblock run-time PM of a device.
1038 * @dev: Device to handle.
1040 * Decrease the device's usage count and set its power.runtime_auto flag.
1042 void pm_runtime_allow(struct device
*dev
)
1044 spin_lock_irq(&dev
->power
.lock
);
1045 if (dev
->power
.runtime_auto
)
1048 dev
->power
.runtime_auto
= true;
1049 if (atomic_dec_and_test(&dev
->power
.usage_count
))
1050 __pm_runtime_idle(dev
);
1053 spin_unlock_irq(&dev
->power
.lock
);
1055 EXPORT_SYMBOL_GPL(pm_runtime_allow
);
1058 * pm_runtime_init - Initialize run-time PM fields in given device object.
1059 * @dev: Device object to initialize.
1061 void pm_runtime_init(struct device
*dev
)
1063 spin_lock_init(&dev
->power
.lock
);
1065 dev
->power
.runtime_status
= RPM_SUSPENDED
;
1066 dev
->power
.idle_notification
= false;
1068 dev
->power
.disable_depth
= 1;
1069 atomic_set(&dev
->power
.usage_count
, 0);
1071 dev
->power
.runtime_error
= 0;
1073 atomic_set(&dev
->power
.child_count
, 0);
1074 pm_suspend_ignore_children(dev
, false);
1075 dev
->power
.runtime_auto
= true;
1077 dev
->power
.request_pending
= false;
1078 dev
->power
.request
= RPM_REQ_NONE
;
1079 dev
->power
.deferred_resume
= false;
1080 INIT_WORK(&dev
->power
.work
, pm_runtime_work
);
1082 dev
->power
.timer_expires
= 0;
1083 setup_timer(&dev
->power
.suspend_timer
, pm_suspend_timer_fn
,
1084 (unsigned long)dev
);
1086 init_waitqueue_head(&dev
->power
.wait_queue
);
1090 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1091 * @dev: Device object being removed from device hierarchy.
1093 void pm_runtime_remove(struct device
*dev
)
1095 __pm_runtime_disable(dev
, false);
1097 /* Change the status back to 'suspended' to match the initial status. */
1098 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
1099 pm_runtime_set_suspended(dev
);