2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
14 static int rpm_resume(struct device
*dev
, int rpmflags
);
15 static int rpm_suspend(struct device
*dev
, int rpmflags
);
18 * update_pm_runtime_accounting - Update the time accounting of power states
19 * @dev: Device to update the accounting for
21 * In order to be able to have time accounting of the various power states
22 * (as used by programs such as PowerTOP to show the effectiveness of runtime
23 * PM), we need to track the time spent in each state.
24 * update_pm_runtime_accounting must be called each time before the
25 * runtime_status field is updated, to account the time in the old state
28 void update_pm_runtime_accounting(struct device
*dev
)
30 unsigned long now
= jiffies
;
33 delta
= now
- dev
->power
.accounting_timestamp
;
38 dev
->power
.accounting_timestamp
= now
;
40 if (dev
->power
.disable_depth
> 0)
43 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
44 dev
->power
.suspended_jiffies
+= delta
;
46 dev
->power
.active_jiffies
+= delta
;
49 static void __update_runtime_status(struct device
*dev
, enum rpm_status status
)
51 update_pm_runtime_accounting(dev
);
52 dev
->power
.runtime_status
= status
;
56 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
57 * @dev: Device to handle.
59 static void pm_runtime_deactivate_timer(struct device
*dev
)
61 if (dev
->power
.timer_expires
> 0) {
62 del_timer(&dev
->power
.suspend_timer
);
63 dev
->power
.timer_expires
= 0;
68 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
69 * @dev: Device to handle.
71 static void pm_runtime_cancel_pending(struct device
*dev
)
73 pm_runtime_deactivate_timer(dev
);
75 * In case there's a request pending, make sure its work function will
76 * return without doing anything.
78 dev
->power
.request
= RPM_REQ_NONE
;
82 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
83 * @dev: Device to handle.
85 * Compute the autosuspend-delay expiration time based on the device's
86 * power.last_busy time. If the delay has already expired or is disabled
87 * (negative) or the power.use_autosuspend flag isn't set, return 0.
88 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
90 * This function may be called either with or without dev->power.lock held.
91 * Either way it can be racy, since power.last_busy may be updated at any time.
93 unsigned long pm_runtime_autosuspend_expiration(struct device
*dev
)
95 int autosuspend_delay
;
97 unsigned long last_busy
;
98 unsigned long expires
= 0;
100 if (!dev
->power
.use_autosuspend
)
103 autosuspend_delay
= ACCESS_ONCE(dev
->power
.autosuspend_delay
);
104 if (autosuspend_delay
< 0)
107 last_busy
= ACCESS_ONCE(dev
->power
.last_busy
);
108 elapsed
= jiffies
- last_busy
;
110 goto out
; /* jiffies has wrapped around. */
113 * If the autosuspend_delay is >= 1 second, align the timer by rounding
114 * up to the nearest second.
116 expires
= last_busy
+ msecs_to_jiffies(autosuspend_delay
);
117 if (autosuspend_delay
>= 1000)
118 expires
= round_jiffies(expires
);
120 if (elapsed
>= expires
- last_busy
)
121 expires
= 0; /* Already expired. */
126 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration
);
129 * rpm_check_suspend_allowed - Test whether a device may be suspended.
130 * @dev: Device to test.
132 static int rpm_check_suspend_allowed(struct device
*dev
)
136 if (dev
->power
.runtime_error
)
138 else if (atomic_read(&dev
->power
.usage_count
) > 0
139 || dev
->power
.disable_depth
> 0)
141 else if (!pm_children_suspended(dev
))
144 /* Pending resume requests take precedence over suspends. */
145 else if ((dev
->power
.deferred_resume
146 && dev
->power
.runtime_status
== RPM_SUSPENDING
)
147 || (dev
->power
.request_pending
148 && dev
->power
.request
== RPM_REQ_RESUME
))
150 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
157 * rpm_idle - Notify device bus type if the device can be suspended.
158 * @dev: Device to notify the bus type about.
159 * @rpmflags: Flag bits.
161 * Check if the device's run-time PM status allows it to be suspended. If
162 * another idle notification has been started earlier, return immediately. If
163 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
164 * run the ->runtime_idle() callback directly.
166 * This function must be called under dev->power.lock with interrupts disabled.
168 static int rpm_idle(struct device
*dev
, int rpmflags
)
170 int (*callback
)(struct device
*);
173 retval
= rpm_check_suspend_allowed(dev
);
175 ; /* Conditions are wrong. */
177 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
178 else if (dev
->power
.runtime_status
!= RPM_ACTIVE
)
182 * Any pending request other than an idle notification takes
183 * precedence over us, except that the timer may be running.
185 else if (dev
->power
.request_pending
&&
186 dev
->power
.request
> RPM_REQ_IDLE
)
189 /* Act as though RPM_NOWAIT is always set. */
190 else if (dev
->power
.idle_notification
)
191 retval
= -EINPROGRESS
;
195 /* Pending requests need to be canceled. */
196 dev
->power
.request
= RPM_REQ_NONE
;
198 if (dev
->power
.no_callbacks
) {
199 /* Assume ->runtime_idle() callback would have suspended. */
200 retval
= rpm_suspend(dev
, rpmflags
);
204 /* Carry out an asynchronous or a synchronous idle notification. */
205 if (rpmflags
& RPM_ASYNC
) {
206 dev
->power
.request
= RPM_REQ_IDLE
;
207 if (!dev
->power
.request_pending
) {
208 dev
->power
.request_pending
= true;
209 queue_work(pm_wq
, &dev
->power
.work
);
214 dev
->power
.idle_notification
= true;
217 callback
= dev
->pwr_domain
->ops
.runtime_idle
;
218 else if (dev
->type
&& dev
->type
->pm
)
219 callback
= dev
->type
->pm
->runtime_idle
;
220 else if (dev
->class && dev
->class->pm
)
221 callback
= dev
->class->pm
->runtime_idle
;
222 else if (dev
->bus
&& dev
->bus
->pm
)
223 callback
= dev
->bus
->pm
->runtime_idle
;
228 spin_unlock_irq(&dev
->power
.lock
);
232 spin_lock_irq(&dev
->power
.lock
);
235 dev
->power
.idle_notification
= false;
236 wake_up_all(&dev
->power
.wait_queue
);
243 * rpm_callback - Run a given runtime PM callback for a given device.
244 * @cb: Runtime PM callback to run.
245 * @dev: Device to run the callback for.
247 static int rpm_callback(int (*cb
)(struct device
*), struct device
*dev
)
248 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
255 if (dev
->power
.irq_safe
) {
258 spin_unlock_irq(&dev
->power
.lock
);
262 spin_lock_irq(&dev
->power
.lock
);
264 dev
->power
.runtime_error
= retval
;
269 * rpm_suspend - Carry out run-time suspend of given device.
270 * @dev: Device to suspend.
271 * @rpmflags: Flag bits.
273 * Check if the device's run-time PM status allows it to be suspended. If
274 * another suspend has been started earlier, either return immediately or wait
275 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
276 * pending idle notification. If the RPM_ASYNC flag is set then queue a
277 * suspend request; otherwise run the ->runtime_suspend() callback directly.
278 * If a deferred resume was requested while the callback was running then carry
279 * it out; otherwise send an idle notification for the device (if the suspend
280 * failed) or for its parent (if the suspend succeeded).
282 * This function must be called under dev->power.lock with interrupts disabled.
284 static int rpm_suspend(struct device
*dev
, int rpmflags
)
285 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
287 int (*callback
)(struct device
*);
288 struct device
*parent
= NULL
;
291 dev_dbg(dev
, "%s flags 0x%x\n", __func__
, rpmflags
);
294 retval
= rpm_check_suspend_allowed(dev
);
297 ; /* Conditions are wrong. */
299 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
300 else if (dev
->power
.runtime_status
== RPM_RESUMING
&&
301 !(rpmflags
& RPM_ASYNC
))
306 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
307 if ((rpmflags
& RPM_AUTO
)
308 && dev
->power
.runtime_status
!= RPM_SUSPENDING
) {
309 unsigned long expires
= pm_runtime_autosuspend_expiration(dev
);
312 /* Pending requests need to be canceled. */
313 dev
->power
.request
= RPM_REQ_NONE
;
316 * Optimization: If the timer is already running and is
317 * set to expire at or before the autosuspend delay,
318 * avoid the overhead of resetting it. Just let it
319 * expire; pm_suspend_timer_fn() will take care of the
322 if (!(dev
->power
.timer_expires
&& time_before_eq(
323 dev
->power
.timer_expires
, expires
))) {
324 dev
->power
.timer_expires
= expires
;
325 mod_timer(&dev
->power
.suspend_timer
, expires
);
327 dev
->power
.timer_autosuspends
= 1;
332 /* Other scheduled or pending requests need to be canceled. */
333 pm_runtime_cancel_pending(dev
);
335 if (dev
->power
.runtime_status
== RPM_SUSPENDING
) {
338 if (rpmflags
& (RPM_ASYNC
| RPM_NOWAIT
)) {
339 retval
= -EINPROGRESS
;
343 /* Wait for the other suspend running in parallel with us. */
345 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
346 TASK_UNINTERRUPTIBLE
);
347 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
)
350 spin_unlock_irq(&dev
->power
.lock
);
354 spin_lock_irq(&dev
->power
.lock
);
356 finish_wait(&dev
->power
.wait_queue
, &wait
);
360 dev
->power
.deferred_resume
= false;
361 if (dev
->power
.no_callbacks
)
362 goto no_callback
; /* Assume success. */
364 /* Carry out an asynchronous or a synchronous suspend. */
365 if (rpmflags
& RPM_ASYNC
) {
366 dev
->power
.request
= (rpmflags
& RPM_AUTO
) ?
367 RPM_REQ_AUTOSUSPEND
: RPM_REQ_SUSPEND
;
368 if (!dev
->power
.request_pending
) {
369 dev
->power
.request_pending
= true;
370 queue_work(pm_wq
, &dev
->power
.work
);
375 __update_runtime_status(dev
, RPM_SUSPENDING
);
378 callback
= dev
->pwr_domain
->ops
.runtime_suspend
;
379 else if (dev
->type
&& dev
->type
->pm
)
380 callback
= dev
->type
->pm
->runtime_suspend
;
381 else if (dev
->class && dev
->class->pm
)
382 callback
= dev
->class->pm
->runtime_suspend
;
383 else if (dev
->bus
&& dev
->bus
->pm
)
384 callback
= dev
->bus
->pm
->runtime_suspend
;
388 retval
= rpm_callback(callback
, dev
);
390 __update_runtime_status(dev
, RPM_ACTIVE
);
391 dev
->power
.deferred_resume
= 0;
392 if (retval
== -EAGAIN
|| retval
== -EBUSY
)
393 dev
->power
.runtime_error
= 0;
395 pm_runtime_cancel_pending(dev
);
398 __update_runtime_status(dev
, RPM_SUSPENDED
);
399 pm_runtime_deactivate_timer(dev
);
402 parent
= dev
->parent
;
403 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
406 wake_up_all(&dev
->power
.wait_queue
);
408 if (dev
->power
.deferred_resume
) {
414 /* Maybe the parent is now able to suspend. */
415 if (parent
&& !parent
->power
.ignore_children
&& !dev
->power
.irq_safe
) {
416 spin_unlock(&dev
->power
.lock
);
418 spin_lock(&parent
->power
.lock
);
419 rpm_idle(parent
, RPM_ASYNC
);
420 spin_unlock(&parent
->power
.lock
);
422 spin_lock(&dev
->power
.lock
);
426 dev_dbg(dev
, "%s returns %d\n", __func__
, retval
);
432 * rpm_resume - Carry out run-time resume of given device.
433 * @dev: Device to resume.
434 * @rpmflags: Flag bits.
436 * Check if the device's run-time PM status allows it to be resumed. Cancel
437 * any scheduled or pending requests. If another resume has been started
438 * earlier, either return immediately or wait for it to finish, depending on the
439 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
440 * parallel with this function, either tell the other process to resume after
441 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
442 * flag is set then queue a resume request; otherwise run the
443 * ->runtime_resume() callback directly. Queue an idle notification for the
444 * device if the resume succeeded.
446 * This function must be called under dev->power.lock with interrupts disabled.
448 static int rpm_resume(struct device
*dev
, int rpmflags
)
449 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
451 int (*callback
)(struct device
*);
452 struct device
*parent
= NULL
;
455 dev_dbg(dev
, "%s flags 0x%x\n", __func__
, rpmflags
);
458 if (dev
->power
.runtime_error
)
460 else if (dev
->power
.disable_depth
> 0)
466 * Other scheduled or pending requests need to be canceled. Small
467 * optimization: If an autosuspend timer is running, leave it running
468 * rather than cancelling it now only to restart it again in the near
471 dev
->power
.request
= RPM_REQ_NONE
;
472 if (!dev
->power
.timer_autosuspends
)
473 pm_runtime_deactivate_timer(dev
);
475 if (dev
->power
.runtime_status
== RPM_ACTIVE
) {
480 if (dev
->power
.runtime_status
== RPM_RESUMING
481 || dev
->power
.runtime_status
== RPM_SUSPENDING
) {
484 if (rpmflags
& (RPM_ASYNC
| RPM_NOWAIT
)) {
485 if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
486 dev
->power
.deferred_resume
= true;
488 retval
= -EINPROGRESS
;
492 /* Wait for the operation carried out in parallel with us. */
494 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
495 TASK_UNINTERRUPTIBLE
);
496 if (dev
->power
.runtime_status
!= RPM_RESUMING
497 && dev
->power
.runtime_status
!= RPM_SUSPENDING
)
500 spin_unlock_irq(&dev
->power
.lock
);
504 spin_lock_irq(&dev
->power
.lock
);
506 finish_wait(&dev
->power
.wait_queue
, &wait
);
511 * See if we can skip waking up the parent. This is safe only if
512 * power.no_callbacks is set, because otherwise we don't know whether
513 * the resume will actually succeed.
515 if (dev
->power
.no_callbacks
&& !parent
&& dev
->parent
) {
516 spin_lock_nested(&dev
->parent
->power
.lock
, SINGLE_DEPTH_NESTING
);
517 if (dev
->parent
->power
.disable_depth
> 0
518 || dev
->parent
->power
.ignore_children
519 || dev
->parent
->power
.runtime_status
== RPM_ACTIVE
) {
520 atomic_inc(&dev
->parent
->power
.child_count
);
521 spin_unlock(&dev
->parent
->power
.lock
);
522 goto no_callback
; /* Assume success. */
524 spin_unlock(&dev
->parent
->power
.lock
);
527 /* Carry out an asynchronous or a synchronous resume. */
528 if (rpmflags
& RPM_ASYNC
) {
529 dev
->power
.request
= RPM_REQ_RESUME
;
530 if (!dev
->power
.request_pending
) {
531 dev
->power
.request_pending
= true;
532 queue_work(pm_wq
, &dev
->power
.work
);
538 if (!parent
&& dev
->parent
) {
540 * Increment the parent's usage counter and resume it if
541 * necessary. Not needed if dev is irq-safe; then the
542 * parent is permanently resumed.
544 parent
= dev
->parent
;
545 if (dev
->power
.irq_safe
)
547 spin_unlock(&dev
->power
.lock
);
549 pm_runtime_get_noresume(parent
);
551 spin_lock(&parent
->power
.lock
);
553 * We can resume if the parent's run-time PM is disabled or it
554 * is set to ignore children.
556 if (!parent
->power
.disable_depth
557 && !parent
->power
.ignore_children
) {
558 rpm_resume(parent
, 0);
559 if (parent
->power
.runtime_status
!= RPM_ACTIVE
)
562 spin_unlock(&parent
->power
.lock
);
564 spin_lock(&dev
->power
.lock
);
571 if (dev
->power
.no_callbacks
)
572 goto no_callback
; /* Assume success. */
574 __update_runtime_status(dev
, RPM_RESUMING
);
577 callback
= dev
->pwr_domain
->ops
.runtime_resume
;
578 else if (dev
->type
&& dev
->type
->pm
)
579 callback
= dev
->type
->pm
->runtime_resume
;
580 else if (dev
->class && dev
->class->pm
)
581 callback
= dev
->class->pm
->runtime_resume
;
582 else if (dev
->bus
&& dev
->bus
->pm
)
583 callback
= dev
->bus
->pm
->runtime_resume
;
587 retval
= rpm_callback(callback
, dev
);
589 __update_runtime_status(dev
, RPM_SUSPENDED
);
590 pm_runtime_cancel_pending(dev
);
593 __update_runtime_status(dev
, RPM_ACTIVE
);
595 atomic_inc(&parent
->power
.child_count
);
597 wake_up_all(&dev
->power
.wait_queue
);
600 rpm_idle(dev
, RPM_ASYNC
);
603 if (parent
&& !dev
->power
.irq_safe
) {
604 spin_unlock_irq(&dev
->power
.lock
);
606 pm_runtime_put(parent
);
608 spin_lock_irq(&dev
->power
.lock
);
611 dev_dbg(dev
, "%s returns %d\n", __func__
, retval
);
617 * pm_runtime_work - Universal run-time PM work function.
618 * @work: Work structure used for scheduling the execution of this function.
620 * Use @work to get the device object the work is to be done for, determine what
621 * is to be done and execute the appropriate run-time PM function.
623 static void pm_runtime_work(struct work_struct
*work
)
625 struct device
*dev
= container_of(work
, struct device
, power
.work
);
626 enum rpm_request req
;
628 spin_lock_irq(&dev
->power
.lock
);
630 if (!dev
->power
.request_pending
)
633 req
= dev
->power
.request
;
634 dev
->power
.request
= RPM_REQ_NONE
;
635 dev
->power
.request_pending
= false;
641 rpm_idle(dev
, RPM_NOWAIT
);
643 case RPM_REQ_SUSPEND
:
644 rpm_suspend(dev
, RPM_NOWAIT
);
646 case RPM_REQ_AUTOSUSPEND
:
647 rpm_suspend(dev
, RPM_NOWAIT
| RPM_AUTO
);
650 rpm_resume(dev
, RPM_NOWAIT
);
655 spin_unlock_irq(&dev
->power
.lock
);
659 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
660 * @data: Device pointer passed by pm_schedule_suspend().
662 * Check if the time is right and queue a suspend request.
664 static void pm_suspend_timer_fn(unsigned long data
)
666 struct device
*dev
= (struct device
*)data
;
668 unsigned long expires
;
670 spin_lock_irqsave(&dev
->power
.lock
, flags
);
672 expires
= dev
->power
.timer_expires
;
673 /* If 'expire' is after 'jiffies' we've been called too early. */
674 if (expires
> 0 && !time_after(expires
, jiffies
)) {
675 dev
->power
.timer_expires
= 0;
676 rpm_suspend(dev
, dev
->power
.timer_autosuspends
?
677 (RPM_ASYNC
| RPM_AUTO
) : RPM_ASYNC
);
680 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
684 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
685 * @dev: Device to suspend.
686 * @delay: Time to wait before submitting a suspend request, in milliseconds.
688 int pm_schedule_suspend(struct device
*dev
, unsigned int delay
)
693 spin_lock_irqsave(&dev
->power
.lock
, flags
);
696 retval
= rpm_suspend(dev
, RPM_ASYNC
);
700 retval
= rpm_check_suspend_allowed(dev
);
704 /* Other scheduled or pending requests need to be canceled. */
705 pm_runtime_cancel_pending(dev
);
707 dev
->power
.timer_expires
= jiffies
+ msecs_to_jiffies(delay
);
708 dev
->power
.timer_expires
+= !dev
->power
.timer_expires
;
709 dev
->power
.timer_autosuspends
= 0;
710 mod_timer(&dev
->power
.suspend_timer
, dev
->power
.timer_expires
);
713 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
717 EXPORT_SYMBOL_GPL(pm_schedule_suspend
);
720 * __pm_runtime_idle - Entry point for run-time idle operations.
721 * @dev: Device to send idle notification for.
722 * @rpmflags: Flag bits.
724 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
725 * return immediately if it is larger than zero. Then carry out an idle
726 * notification, either synchronous or asynchronous.
728 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
730 int __pm_runtime_idle(struct device
*dev
, int rpmflags
)
735 if (rpmflags
& RPM_GET_PUT
) {
736 if (!atomic_dec_and_test(&dev
->power
.usage_count
))
740 spin_lock_irqsave(&dev
->power
.lock
, flags
);
741 retval
= rpm_idle(dev
, rpmflags
);
742 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
746 EXPORT_SYMBOL_GPL(__pm_runtime_idle
);
749 * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
750 * @dev: Device to suspend.
751 * @rpmflags: Flag bits.
753 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
754 * return immediately if it is larger than zero. Then carry out a suspend,
755 * either synchronous or asynchronous.
757 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
759 int __pm_runtime_suspend(struct device
*dev
, int rpmflags
)
764 if (rpmflags
& RPM_GET_PUT
) {
765 if (!atomic_dec_and_test(&dev
->power
.usage_count
))
769 spin_lock_irqsave(&dev
->power
.lock
, flags
);
770 retval
= rpm_suspend(dev
, rpmflags
);
771 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
775 EXPORT_SYMBOL_GPL(__pm_runtime_suspend
);
778 * __pm_runtime_resume - Entry point for run-time resume operations.
779 * @dev: Device to resume.
780 * @rpmflags: Flag bits.
782 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
783 * carry out a resume, either synchronous or asynchronous.
785 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
787 int __pm_runtime_resume(struct device
*dev
, int rpmflags
)
792 if (rpmflags
& RPM_GET_PUT
)
793 atomic_inc(&dev
->power
.usage_count
);
795 spin_lock_irqsave(&dev
->power
.lock
, flags
);
796 retval
= rpm_resume(dev
, rpmflags
);
797 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
801 EXPORT_SYMBOL_GPL(__pm_runtime_resume
);
804 * __pm_runtime_set_status - Set run-time PM status of a device.
805 * @dev: Device to handle.
806 * @status: New run-time PM status of the device.
808 * If run-time PM of the device is disabled or its power.runtime_error field is
809 * different from zero, the status may be changed either to RPM_ACTIVE, or to
810 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
811 * However, if the device has a parent and the parent is not active, and the
812 * parent's power.ignore_children flag is unset, the device's status cannot be
813 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
815 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
816 * and the device parent's counter of unsuspended children is modified to
817 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
818 * notification request for the parent is submitted.
820 int __pm_runtime_set_status(struct device
*dev
, unsigned int status
)
822 struct device
*parent
= dev
->parent
;
824 bool notify_parent
= false;
827 if (status
!= RPM_ACTIVE
&& status
!= RPM_SUSPENDED
)
830 spin_lock_irqsave(&dev
->power
.lock
, flags
);
832 if (!dev
->power
.runtime_error
&& !dev
->power
.disable_depth
) {
837 if (dev
->power
.runtime_status
== status
)
840 if (status
== RPM_SUSPENDED
) {
841 /* It always is possible to set the status to 'suspended'. */
843 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
844 notify_parent
= !parent
->power
.ignore_children
;
850 spin_lock_nested(&parent
->power
.lock
, SINGLE_DEPTH_NESTING
);
853 * It is invalid to put an active child under a parent that is
854 * not active, has run-time PM enabled and the
855 * 'power.ignore_children' flag unset.
857 if (!parent
->power
.disable_depth
858 && !parent
->power
.ignore_children
859 && parent
->power
.runtime_status
!= RPM_ACTIVE
)
861 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
862 atomic_inc(&parent
->power
.child_count
);
864 spin_unlock(&parent
->power
.lock
);
871 __update_runtime_status(dev
, status
);
872 dev
->power
.runtime_error
= 0;
874 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
877 pm_request_idle(parent
);
881 EXPORT_SYMBOL_GPL(__pm_runtime_set_status
);
884 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
885 * @dev: Device to handle.
887 * Flush all pending requests for the device from pm_wq and wait for all
888 * run-time PM operations involving the device in progress to complete.
890 * Should be called under dev->power.lock with interrupts disabled.
892 static void __pm_runtime_barrier(struct device
*dev
)
894 pm_runtime_deactivate_timer(dev
);
896 if (dev
->power
.request_pending
) {
897 dev
->power
.request
= RPM_REQ_NONE
;
898 spin_unlock_irq(&dev
->power
.lock
);
900 cancel_work_sync(&dev
->power
.work
);
902 spin_lock_irq(&dev
->power
.lock
);
903 dev
->power
.request_pending
= false;
906 if (dev
->power
.runtime_status
== RPM_SUSPENDING
907 || dev
->power
.runtime_status
== RPM_RESUMING
908 || dev
->power
.idle_notification
) {
911 /* Suspend, wake-up or idle notification in progress. */
913 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
914 TASK_UNINTERRUPTIBLE
);
915 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
916 && dev
->power
.runtime_status
!= RPM_RESUMING
917 && !dev
->power
.idle_notification
)
919 spin_unlock_irq(&dev
->power
.lock
);
923 spin_lock_irq(&dev
->power
.lock
);
925 finish_wait(&dev
->power
.wait_queue
, &wait
);
930 * pm_runtime_barrier - Flush pending requests and wait for completions.
931 * @dev: Device to handle.
933 * Prevent the device from being suspended by incrementing its usage counter and
934 * if there's a pending resume request for the device, wake the device up.
935 * Next, make sure that all pending requests for the device have been flushed
936 * from pm_wq and wait for all run-time PM operations involving the device in
937 * progress to complete.
940 * 1, if there was a resume request pending and the device had to be woken up,
943 int pm_runtime_barrier(struct device
*dev
)
947 pm_runtime_get_noresume(dev
);
948 spin_lock_irq(&dev
->power
.lock
);
950 if (dev
->power
.request_pending
951 && dev
->power
.request
== RPM_REQ_RESUME
) {
956 __pm_runtime_barrier(dev
);
958 spin_unlock_irq(&dev
->power
.lock
);
959 pm_runtime_put_noidle(dev
);
963 EXPORT_SYMBOL_GPL(pm_runtime_barrier
);
966 * __pm_runtime_disable - Disable run-time PM of a device.
967 * @dev: Device to handle.
968 * @check_resume: If set, check if there's a resume request for the device.
970 * Increment power.disable_depth for the device and if was zero previously,
971 * cancel all pending run-time PM requests for the device and wait for all
972 * operations in progress to complete. The device can be either active or
973 * suspended after its run-time PM has been disabled.
975 * If @check_resume is set and there's a resume request pending when
976 * __pm_runtime_disable() is called and power.disable_depth is zero, the
977 * function will wake up the device before disabling its run-time PM.
979 void __pm_runtime_disable(struct device
*dev
, bool check_resume
)
981 spin_lock_irq(&dev
->power
.lock
);
983 if (dev
->power
.disable_depth
> 0) {
984 dev
->power
.disable_depth
++;
989 * Wake up the device if there's a resume request pending, because that
990 * means there probably is some I/O to process and disabling run-time PM
991 * shouldn't prevent the device from processing the I/O.
993 if (check_resume
&& dev
->power
.request_pending
994 && dev
->power
.request
== RPM_REQ_RESUME
) {
996 * Prevent suspends and idle notifications from being carried
997 * out after we have woken up the device.
999 pm_runtime_get_noresume(dev
);
1003 pm_runtime_put_noidle(dev
);
1006 if (!dev
->power
.disable_depth
++)
1007 __pm_runtime_barrier(dev
);
1010 spin_unlock_irq(&dev
->power
.lock
);
1012 EXPORT_SYMBOL_GPL(__pm_runtime_disable
);
1015 * pm_runtime_enable - Enable run-time PM of a device.
1016 * @dev: Device to handle.
1018 void pm_runtime_enable(struct device
*dev
)
1020 unsigned long flags
;
1022 spin_lock_irqsave(&dev
->power
.lock
, flags
);
1024 if (dev
->power
.disable_depth
> 0)
1025 dev
->power
.disable_depth
--;
1027 dev_warn(dev
, "Unbalanced %s!\n", __func__
);
1029 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
1031 EXPORT_SYMBOL_GPL(pm_runtime_enable
);
1034 * pm_runtime_forbid - Block run-time PM of a device.
1035 * @dev: Device to handle.
1037 * Increase the device's usage count and clear its power.runtime_auto flag,
1038 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1041 void pm_runtime_forbid(struct device
*dev
)
1043 spin_lock_irq(&dev
->power
.lock
);
1044 if (!dev
->power
.runtime_auto
)
1047 dev
->power
.runtime_auto
= false;
1048 atomic_inc(&dev
->power
.usage_count
);
1052 spin_unlock_irq(&dev
->power
.lock
);
1054 EXPORT_SYMBOL_GPL(pm_runtime_forbid
);
1057 * pm_runtime_allow - Unblock run-time PM of a device.
1058 * @dev: Device to handle.
1060 * Decrease the device's usage count and set its power.runtime_auto flag.
1062 void pm_runtime_allow(struct device
*dev
)
1064 spin_lock_irq(&dev
->power
.lock
);
1065 if (dev
->power
.runtime_auto
)
1068 dev
->power
.runtime_auto
= true;
1069 if (atomic_dec_and_test(&dev
->power
.usage_count
))
1070 rpm_idle(dev
, RPM_AUTO
);
1073 spin_unlock_irq(&dev
->power
.lock
);
1075 EXPORT_SYMBOL_GPL(pm_runtime_allow
);
1078 * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
1079 * @dev: Device to handle.
1081 * Set the power.no_callbacks flag, which tells the PM core that this
1082 * device is power-managed through its parent and has no run-time PM
1083 * callbacks of its own. The run-time sysfs attributes will be removed.
1085 void pm_runtime_no_callbacks(struct device
*dev
)
1087 spin_lock_irq(&dev
->power
.lock
);
1088 dev
->power
.no_callbacks
= 1;
1089 spin_unlock_irq(&dev
->power
.lock
);
1090 if (device_is_registered(dev
))
1091 rpm_sysfs_remove(dev
);
1093 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks
);
1096 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1097 * @dev: Device to handle
1099 * Set the power.irq_safe flag, which tells the PM core that the
1100 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1101 * always be invoked with the spinlock held and interrupts disabled. It also
1102 * causes the parent's usage counter to be permanently incremented, preventing
1103 * the parent from runtime suspending -- otherwise an irq-safe child might have
1104 * to wait for a non-irq-safe parent.
1106 void pm_runtime_irq_safe(struct device
*dev
)
1109 pm_runtime_get_sync(dev
->parent
);
1110 spin_lock_irq(&dev
->power
.lock
);
1111 dev
->power
.irq_safe
= 1;
1112 spin_unlock_irq(&dev
->power
.lock
);
1114 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe
);
1117 * update_autosuspend - Handle a change to a device's autosuspend settings.
1118 * @dev: Device to handle.
1119 * @old_delay: The former autosuspend_delay value.
1120 * @old_use: The former use_autosuspend value.
1122 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1123 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1125 * This function must be called under dev->power.lock with interrupts disabled.
1127 static void update_autosuspend(struct device
*dev
, int old_delay
, int old_use
)
1129 int delay
= dev
->power
.autosuspend_delay
;
1131 /* Should runtime suspend be prevented now? */
1132 if (dev
->power
.use_autosuspend
&& delay
< 0) {
1134 /* If it used to be allowed then prevent it. */
1135 if (!old_use
|| old_delay
>= 0) {
1136 atomic_inc(&dev
->power
.usage_count
);
1141 /* Runtime suspend should be allowed now. */
1144 /* If it used to be prevented then allow it. */
1145 if (old_use
&& old_delay
< 0)
1146 atomic_dec(&dev
->power
.usage_count
);
1148 /* Maybe we can autosuspend now. */
1149 rpm_idle(dev
, RPM_AUTO
);
1154 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1155 * @dev: Device to handle.
1156 * @delay: Value of the new delay in milliseconds.
1158 * Set the device's power.autosuspend_delay value. If it changes to negative
1159 * and the power.use_autosuspend flag is set, prevent run-time suspends. If it
1160 * changes the other way, allow run-time suspends.
1162 void pm_runtime_set_autosuspend_delay(struct device
*dev
, int delay
)
1164 int old_delay
, old_use
;
1166 spin_lock_irq(&dev
->power
.lock
);
1167 old_delay
= dev
->power
.autosuspend_delay
;
1168 old_use
= dev
->power
.use_autosuspend
;
1169 dev
->power
.autosuspend_delay
= delay
;
1170 update_autosuspend(dev
, old_delay
, old_use
);
1171 spin_unlock_irq(&dev
->power
.lock
);
1173 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay
);
1176 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1177 * @dev: Device to handle.
1178 * @use: New value for use_autosuspend.
1180 * Set the device's power.use_autosuspend flag, and allow or prevent run-time
1181 * suspends as needed.
1183 void __pm_runtime_use_autosuspend(struct device
*dev
, bool use
)
1185 int old_delay
, old_use
;
1187 spin_lock_irq(&dev
->power
.lock
);
1188 old_delay
= dev
->power
.autosuspend_delay
;
1189 old_use
= dev
->power
.use_autosuspend
;
1190 dev
->power
.use_autosuspend
= use
;
1191 update_autosuspend(dev
, old_delay
, old_use
);
1192 spin_unlock_irq(&dev
->power
.lock
);
1194 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend
);
1197 * pm_runtime_init - Initialize run-time PM fields in given device object.
1198 * @dev: Device object to initialize.
1200 void pm_runtime_init(struct device
*dev
)
1202 dev
->power
.runtime_status
= RPM_SUSPENDED
;
1203 dev
->power
.idle_notification
= false;
1205 dev
->power
.disable_depth
= 1;
1206 atomic_set(&dev
->power
.usage_count
, 0);
1208 dev
->power
.runtime_error
= 0;
1210 atomic_set(&dev
->power
.child_count
, 0);
1211 pm_suspend_ignore_children(dev
, false);
1212 dev
->power
.runtime_auto
= true;
1214 dev
->power
.request_pending
= false;
1215 dev
->power
.request
= RPM_REQ_NONE
;
1216 dev
->power
.deferred_resume
= false;
1217 dev
->power
.accounting_timestamp
= jiffies
;
1218 INIT_WORK(&dev
->power
.work
, pm_runtime_work
);
1220 dev
->power
.timer_expires
= 0;
1221 setup_timer(&dev
->power
.suspend_timer
, pm_suspend_timer_fn
,
1222 (unsigned long)dev
);
1224 init_waitqueue_head(&dev
->power
.wait_queue
);
1228 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1229 * @dev: Device object being removed from device hierarchy.
1231 void pm_runtime_remove(struct device
*dev
)
1233 __pm_runtime_disable(dev
, false);
1235 /* Change the status back to 'suspended' to match the initial status. */
1236 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
1237 pm_runtime_set_suspended(dev
);
1238 if (dev
->power
.irq_safe
&& dev
->parent
)
1239 pm_runtime_put_sync(dev
->parent
);