2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <trace/events/rpm.h>
16 typedef int (*pm_callback_t
)(struct device
*);
18 static pm_callback_t
__rpm_get_callback(struct device
*dev
, size_t cb_offset
)
21 const struct dev_pm_ops
*ops
;
24 ops
= &dev
->pm_domain
->ops
;
25 else if (dev
->type
&& dev
->type
->pm
)
27 else if (dev
->class && dev
->class->pm
)
29 else if (dev
->bus
&& dev
->bus
->pm
)
35 cb
= *(pm_callback_t
*)((void *)ops
+ cb_offset
);
39 if (!cb
&& dev
->driver
&& dev
->driver
->pm
)
40 cb
= *(pm_callback_t
*)((void *)dev
->driver
->pm
+ cb_offset
);
45 #define RPM_GET_CALLBACK(dev, callback) \
46 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
48 static int rpm_resume(struct device
*dev
, int rpmflags
);
49 static int rpm_suspend(struct device
*dev
, int rpmflags
);
52 * update_pm_runtime_accounting - Update the time accounting of power states
53 * @dev: Device to update the accounting for
55 * In order to be able to have time accounting of the various power states
56 * (as used by programs such as PowerTOP to show the effectiveness of runtime
57 * PM), we need to track the time spent in each state.
58 * update_pm_runtime_accounting must be called each time before the
59 * runtime_status field is updated, to account the time in the old state
62 void update_pm_runtime_accounting(struct device
*dev
)
64 unsigned long now
= jiffies
;
67 delta
= now
- dev
->power
.accounting_timestamp
;
69 dev
->power
.accounting_timestamp
= now
;
71 if (dev
->power
.disable_depth
> 0)
74 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
75 dev
->power
.suspended_jiffies
+= delta
;
77 dev
->power
.active_jiffies
+= delta
;
80 static void __update_runtime_status(struct device
*dev
, enum rpm_status status
)
82 update_pm_runtime_accounting(dev
);
83 dev
->power
.runtime_status
= status
;
87 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
88 * @dev: Device to handle.
90 static void pm_runtime_deactivate_timer(struct device
*dev
)
92 if (dev
->power
.timer_expires
> 0) {
93 del_timer(&dev
->power
.suspend_timer
);
94 dev
->power
.timer_expires
= 0;
99 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
100 * @dev: Device to handle.
102 static void pm_runtime_cancel_pending(struct device
*dev
)
104 pm_runtime_deactivate_timer(dev
);
106 * In case there's a request pending, make sure its work function will
107 * return without doing anything.
109 dev
->power
.request
= RPM_REQ_NONE
;
113 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
114 * @dev: Device to handle.
116 * Compute the autosuspend-delay expiration time based on the device's
117 * power.last_busy time. If the delay has already expired or is disabled
118 * (negative) or the power.use_autosuspend flag isn't set, return 0.
119 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
121 * This function may be called either with or without dev->power.lock held.
122 * Either way it can be racy, since power.last_busy may be updated at any time.
124 unsigned long pm_runtime_autosuspend_expiration(struct device
*dev
)
126 int autosuspend_delay
;
128 unsigned long last_busy
;
129 unsigned long expires
= 0;
131 if (!dev
->power
.use_autosuspend
)
134 autosuspend_delay
= ACCESS_ONCE(dev
->power
.autosuspend_delay
);
135 if (autosuspend_delay
< 0)
138 last_busy
= ACCESS_ONCE(dev
->power
.last_busy
);
139 elapsed
= jiffies
- last_busy
;
141 goto out
; /* jiffies has wrapped around. */
144 * If the autosuspend_delay is >= 1 second, align the timer by rounding
145 * up to the nearest second.
147 expires
= last_busy
+ msecs_to_jiffies(autosuspend_delay
);
148 if (autosuspend_delay
>= 1000)
149 expires
= round_jiffies(expires
);
151 if (elapsed
>= expires
- last_busy
)
152 expires
= 0; /* Already expired. */
157 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration
);
159 static int dev_memalloc_noio(struct device
*dev
, void *data
)
161 return dev
->power
.memalloc_noio
;
165 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
166 * @dev: Device to handle.
167 * @enable: True for setting the flag and False for clearing the flag.
169 * Set the flag for all devices in the path from the device to the
170 * root device in the device tree if @enable is true, otherwise clear
171 * the flag for devices in the path whose siblings don't set the flag.
173 * The function should only be called by block device, or network
174 * device driver for solving the deadlock problem during runtime
177 * If memory allocation with GFP_KERNEL is called inside runtime
178 * resume/suspend callback of any one of its ancestors(or the
179 * block device itself), the deadlock may be triggered inside the
180 * memory allocation since it might not complete until the block
181 * device becomes active and the involed page I/O finishes. The
182 * situation is pointed out first by Alan Stern. Network device
183 * are involved in iSCSI kind of situation.
185 * The lock of dev_hotplug_mutex is held in the function for handling
186 * hotplug race because pm_runtime_set_memalloc_noio() may be called
189 * The function should be called between device_add() and device_del()
190 * on the affected device(block/network device).
192 void pm_runtime_set_memalloc_noio(struct device
*dev
, bool enable
)
194 static DEFINE_MUTEX(dev_hotplug_mutex
);
196 mutex_lock(&dev_hotplug_mutex
);
200 /* hold power lock since bitfield is not SMP-safe. */
201 spin_lock_irq(&dev
->power
.lock
);
202 enabled
= dev
->power
.memalloc_noio
;
203 dev
->power
.memalloc_noio
= enable
;
204 spin_unlock_irq(&dev
->power
.lock
);
207 * not need to enable ancestors any more if the device
210 if (enabled
&& enable
)
216 * clear flag of the parent device only if all the
217 * children don't set the flag because ancestor's
218 * flag was set by any one of the descendants.
220 if (!dev
|| (!enable
&&
221 device_for_each_child(dev
, NULL
,
225 mutex_unlock(&dev_hotplug_mutex
);
227 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio
);
230 * rpm_check_suspend_allowed - Test whether a device may be suspended.
231 * @dev: Device to test.
233 static int rpm_check_suspend_allowed(struct device
*dev
)
237 if (dev
->power
.runtime_error
)
239 else if (dev
->power
.disable_depth
> 0)
241 else if (atomic_read(&dev
->power
.usage_count
) > 0)
243 else if (!pm_children_suspended(dev
))
246 /* Pending resume requests take precedence over suspends. */
247 else if ((dev
->power
.deferred_resume
248 && dev
->power
.runtime_status
== RPM_SUSPENDING
)
249 || (dev
->power
.request_pending
250 && dev
->power
.request
== RPM_REQ_RESUME
))
252 else if (__dev_pm_qos_read_value(dev
) < 0)
254 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
261 * __rpm_callback - Run a given runtime PM callback for a given device.
262 * @cb: Runtime PM callback to run.
263 * @dev: Device to run the callback for.
265 static int __rpm_callback(int (*cb
)(struct device
*), struct device
*dev
)
266 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
270 if (dev
->power
.irq_safe
)
271 spin_unlock(&dev
->power
.lock
);
273 spin_unlock_irq(&dev
->power
.lock
);
277 if (dev
->power
.irq_safe
)
278 spin_lock(&dev
->power
.lock
);
280 spin_lock_irq(&dev
->power
.lock
);
286 * rpm_idle - Notify device bus type if the device can be suspended.
287 * @dev: Device to notify the bus type about.
288 * @rpmflags: Flag bits.
290 * Check if the device's runtime PM status allows it to be suspended. If
291 * another idle notification has been started earlier, return immediately. If
292 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
293 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
294 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
296 * This function must be called under dev->power.lock with interrupts disabled.
298 static int rpm_idle(struct device
*dev
, int rpmflags
)
300 int (*callback
)(struct device
*);
303 trace_rpm_idle(dev
, rpmflags
);
304 retval
= rpm_check_suspend_allowed(dev
);
306 ; /* Conditions are wrong. */
308 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
309 else if (dev
->power
.runtime_status
!= RPM_ACTIVE
)
313 * Any pending request other than an idle notification takes
314 * precedence over us, except that the timer may be running.
316 else if (dev
->power
.request_pending
&&
317 dev
->power
.request
> RPM_REQ_IDLE
)
320 /* Act as though RPM_NOWAIT is always set. */
321 else if (dev
->power
.idle_notification
)
322 retval
= -EINPROGRESS
;
326 /* Pending requests need to be canceled. */
327 dev
->power
.request
= RPM_REQ_NONE
;
329 if (dev
->power
.no_callbacks
)
332 /* Carry out an asynchronous or a synchronous idle notification. */
333 if (rpmflags
& RPM_ASYNC
) {
334 dev
->power
.request
= RPM_REQ_IDLE
;
335 if (!dev
->power
.request_pending
) {
336 dev
->power
.request_pending
= true;
337 queue_work(pm_wq
, &dev
->power
.work
);
339 trace_rpm_return_int(dev
, _THIS_IP_
, 0);
343 dev
->power
.idle_notification
= true;
345 callback
= RPM_GET_CALLBACK(dev
, runtime_idle
);
348 retval
= __rpm_callback(callback
, dev
);
350 dev
->power
.idle_notification
= false;
351 wake_up_all(&dev
->power
.wait_queue
);
354 trace_rpm_return_int(dev
, _THIS_IP_
, retval
);
355 return retval
? retval
: rpm_suspend(dev
, rpmflags
| RPM_AUTO
);
359 * rpm_callback - Run a given runtime PM callback for a given device.
360 * @cb: Runtime PM callback to run.
361 * @dev: Device to run the callback for.
363 static int rpm_callback(int (*cb
)(struct device
*), struct device
*dev
)
370 if (dev
->power
.memalloc_noio
) {
371 unsigned int noio_flag
;
374 * Deadlock might be caused if memory allocation with
375 * GFP_KERNEL happens inside runtime_suspend and
376 * runtime_resume callbacks of one block device's
377 * ancestor or the block device itself. Network
378 * device might be thought as part of iSCSI block
379 * device, so network device and its ancestor should
380 * be marked as memalloc_noio too.
382 noio_flag
= memalloc_noio_save();
383 retval
= __rpm_callback(cb
, dev
);
384 memalloc_noio_restore(noio_flag
);
386 retval
= __rpm_callback(cb
, dev
);
389 dev
->power
.runtime_error
= retval
;
390 return retval
!= -EACCES
? retval
: -EIO
;
394 * rpm_suspend - Carry out runtime suspend of given device.
395 * @dev: Device to suspend.
396 * @rpmflags: Flag bits.
398 * Check if the device's runtime PM status allows it to be suspended.
399 * Cancel a pending idle notification, autosuspend or suspend. If
400 * another suspend has been started earlier, either return immediately
401 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
402 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
403 * otherwise run the ->runtime_suspend() callback directly. When
404 * ->runtime_suspend succeeded, if a deferred resume was requested while
405 * the callback was running then carry it out, otherwise send an idle
406 * notification for its parent (if the suspend succeeded and both
407 * ignore_children of parent->power and irq_safe of dev->power are not set).
408 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
409 * flag is set and the next autosuspend-delay expiration time is in the
410 * future, schedule another autosuspend attempt.
412 * This function must be called under dev->power.lock with interrupts disabled.
414 static int rpm_suspend(struct device
*dev
, int rpmflags
)
415 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
417 int (*callback
)(struct device
*);
418 struct device
*parent
= NULL
;
421 trace_rpm_suspend(dev
, rpmflags
);
424 retval
= rpm_check_suspend_allowed(dev
);
427 ; /* Conditions are wrong. */
429 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
430 else if (dev
->power
.runtime_status
== RPM_RESUMING
&&
431 !(rpmflags
& RPM_ASYNC
))
436 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
437 if ((rpmflags
& RPM_AUTO
)
438 && dev
->power
.runtime_status
!= RPM_SUSPENDING
) {
439 unsigned long expires
= pm_runtime_autosuspend_expiration(dev
);
442 /* Pending requests need to be canceled. */
443 dev
->power
.request
= RPM_REQ_NONE
;
446 * Optimization: If the timer is already running and is
447 * set to expire at or before the autosuspend delay,
448 * avoid the overhead of resetting it. Just let it
449 * expire; pm_suspend_timer_fn() will take care of the
452 if (!(dev
->power
.timer_expires
&& time_before_eq(
453 dev
->power
.timer_expires
, expires
))) {
454 dev
->power
.timer_expires
= expires
;
455 mod_timer(&dev
->power
.suspend_timer
, expires
);
457 dev
->power
.timer_autosuspends
= 1;
462 /* Other scheduled or pending requests need to be canceled. */
463 pm_runtime_cancel_pending(dev
);
465 if (dev
->power
.runtime_status
== RPM_SUSPENDING
) {
468 if (rpmflags
& (RPM_ASYNC
| RPM_NOWAIT
)) {
469 retval
= -EINPROGRESS
;
473 if (dev
->power
.irq_safe
) {
474 spin_unlock(&dev
->power
.lock
);
478 spin_lock(&dev
->power
.lock
);
482 /* Wait for the other suspend running in parallel with us. */
484 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
485 TASK_UNINTERRUPTIBLE
);
486 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
)
489 spin_unlock_irq(&dev
->power
.lock
);
493 spin_lock_irq(&dev
->power
.lock
);
495 finish_wait(&dev
->power
.wait_queue
, &wait
);
499 if (dev
->power
.no_callbacks
)
500 goto no_callback
; /* Assume success. */
502 /* Carry out an asynchronous or a synchronous suspend. */
503 if (rpmflags
& RPM_ASYNC
) {
504 dev
->power
.request
= (rpmflags
& RPM_AUTO
) ?
505 RPM_REQ_AUTOSUSPEND
: RPM_REQ_SUSPEND
;
506 if (!dev
->power
.request_pending
) {
507 dev
->power
.request_pending
= true;
508 queue_work(pm_wq
, &dev
->power
.work
);
513 __update_runtime_status(dev
, RPM_SUSPENDING
);
515 callback
= RPM_GET_CALLBACK(dev
, runtime_suspend
);
517 retval
= rpm_callback(callback
, dev
);
522 __update_runtime_status(dev
, RPM_SUSPENDED
);
523 pm_runtime_deactivate_timer(dev
);
526 parent
= dev
->parent
;
527 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
529 wake_up_all(&dev
->power
.wait_queue
);
531 if (dev
->power
.deferred_resume
) {
532 dev
->power
.deferred_resume
= false;
538 /* Maybe the parent is now able to suspend. */
539 if (parent
&& !parent
->power
.ignore_children
&& !dev
->power
.irq_safe
) {
540 spin_unlock(&dev
->power
.lock
);
542 spin_lock(&parent
->power
.lock
);
543 rpm_idle(parent
, RPM_ASYNC
);
544 spin_unlock(&parent
->power
.lock
);
546 spin_lock(&dev
->power
.lock
);
550 trace_rpm_return_int(dev
, _THIS_IP_
, retval
);
555 __update_runtime_status(dev
, RPM_ACTIVE
);
556 dev
->power
.deferred_resume
= false;
557 wake_up_all(&dev
->power
.wait_queue
);
559 if (retval
== -EAGAIN
|| retval
== -EBUSY
) {
560 dev
->power
.runtime_error
= 0;
563 * If the callback routine failed an autosuspend, and
564 * if the last_busy time has been updated so that there
565 * is a new autosuspend expiration time, automatically
566 * reschedule another autosuspend.
568 if ((rpmflags
& RPM_AUTO
) &&
569 pm_runtime_autosuspend_expiration(dev
) != 0)
572 pm_runtime_cancel_pending(dev
);
578 * rpm_resume - Carry out runtime resume of given device.
579 * @dev: Device to resume.
580 * @rpmflags: Flag bits.
582 * Check if the device's runtime PM status allows it to be resumed. Cancel
583 * any scheduled or pending requests. If another resume has been started
584 * earlier, either return immediately or wait for it to finish, depending on the
585 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
586 * parallel with this function, either tell the other process to resume after
587 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
588 * flag is set then queue a resume request; otherwise run the
589 * ->runtime_resume() callback directly. Queue an idle notification for the
590 * device if the resume succeeded.
592 * This function must be called under dev->power.lock with interrupts disabled.
594 static int rpm_resume(struct device
*dev
, int rpmflags
)
595 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
597 int (*callback
)(struct device
*);
598 struct device
*parent
= NULL
;
601 trace_rpm_resume(dev
, rpmflags
);
604 if (dev
->power
.runtime_error
)
606 else if (dev
->power
.disable_depth
== 1 && dev
->power
.is_suspended
607 && dev
->power
.runtime_status
== RPM_ACTIVE
)
609 else if (dev
->power
.disable_depth
> 0)
615 * Other scheduled or pending requests need to be canceled. Small
616 * optimization: If an autosuspend timer is running, leave it running
617 * rather than cancelling it now only to restart it again in the near
620 dev
->power
.request
= RPM_REQ_NONE
;
621 if (!dev
->power
.timer_autosuspends
)
622 pm_runtime_deactivate_timer(dev
);
624 if (dev
->power
.runtime_status
== RPM_ACTIVE
) {
629 if (dev
->power
.runtime_status
== RPM_RESUMING
630 || dev
->power
.runtime_status
== RPM_SUSPENDING
) {
633 if (rpmflags
& (RPM_ASYNC
| RPM_NOWAIT
)) {
634 if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
635 dev
->power
.deferred_resume
= true;
637 retval
= -EINPROGRESS
;
641 if (dev
->power
.irq_safe
) {
642 spin_unlock(&dev
->power
.lock
);
646 spin_lock(&dev
->power
.lock
);
650 /* Wait for the operation carried out in parallel with us. */
652 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
653 TASK_UNINTERRUPTIBLE
);
654 if (dev
->power
.runtime_status
!= RPM_RESUMING
655 && dev
->power
.runtime_status
!= RPM_SUSPENDING
)
658 spin_unlock_irq(&dev
->power
.lock
);
662 spin_lock_irq(&dev
->power
.lock
);
664 finish_wait(&dev
->power
.wait_queue
, &wait
);
669 * See if we can skip waking up the parent. This is safe only if
670 * power.no_callbacks is set, because otherwise we don't know whether
671 * the resume will actually succeed.
673 if (dev
->power
.no_callbacks
&& !parent
&& dev
->parent
) {
674 spin_lock_nested(&dev
->parent
->power
.lock
, SINGLE_DEPTH_NESTING
);
675 if (dev
->parent
->power
.disable_depth
> 0
676 || dev
->parent
->power
.ignore_children
677 || dev
->parent
->power
.runtime_status
== RPM_ACTIVE
) {
678 atomic_inc(&dev
->parent
->power
.child_count
);
679 spin_unlock(&dev
->parent
->power
.lock
);
681 goto no_callback
; /* Assume success. */
683 spin_unlock(&dev
->parent
->power
.lock
);
686 /* Carry out an asynchronous or a synchronous resume. */
687 if (rpmflags
& RPM_ASYNC
) {
688 dev
->power
.request
= RPM_REQ_RESUME
;
689 if (!dev
->power
.request_pending
) {
690 dev
->power
.request_pending
= true;
691 queue_work(pm_wq
, &dev
->power
.work
);
697 if (!parent
&& dev
->parent
) {
699 * Increment the parent's usage counter and resume it if
700 * necessary. Not needed if dev is irq-safe; then the
701 * parent is permanently resumed.
703 parent
= dev
->parent
;
704 if (dev
->power
.irq_safe
)
706 spin_unlock(&dev
->power
.lock
);
708 pm_runtime_get_noresume(parent
);
710 spin_lock(&parent
->power
.lock
);
712 * We can resume if the parent's runtime PM is disabled or it
713 * is set to ignore children.
715 if (!parent
->power
.disable_depth
716 && !parent
->power
.ignore_children
) {
717 rpm_resume(parent
, 0);
718 if (parent
->power
.runtime_status
!= RPM_ACTIVE
)
721 spin_unlock(&parent
->power
.lock
);
723 spin_lock(&dev
->power
.lock
);
730 if (dev
->power
.no_callbacks
)
731 goto no_callback
; /* Assume success. */
733 __update_runtime_status(dev
, RPM_RESUMING
);
735 callback
= RPM_GET_CALLBACK(dev
, runtime_resume
);
737 retval
= rpm_callback(callback
, dev
);
739 __update_runtime_status(dev
, RPM_SUSPENDED
);
740 pm_runtime_cancel_pending(dev
);
743 __update_runtime_status(dev
, RPM_ACTIVE
);
745 atomic_inc(&parent
->power
.child_count
);
747 wake_up_all(&dev
->power
.wait_queue
);
750 rpm_idle(dev
, RPM_ASYNC
);
753 if (parent
&& !dev
->power
.irq_safe
) {
754 spin_unlock_irq(&dev
->power
.lock
);
756 pm_runtime_put(parent
);
758 spin_lock_irq(&dev
->power
.lock
);
761 trace_rpm_return_int(dev
, _THIS_IP_
, retval
);
767 * pm_runtime_work - Universal runtime PM work function.
768 * @work: Work structure used for scheduling the execution of this function.
770 * Use @work to get the device object the work is to be done for, determine what
771 * is to be done and execute the appropriate runtime PM function.
773 static void pm_runtime_work(struct work_struct
*work
)
775 struct device
*dev
= container_of(work
, struct device
, power
.work
);
776 enum rpm_request req
;
778 spin_lock_irq(&dev
->power
.lock
);
780 if (!dev
->power
.request_pending
)
783 req
= dev
->power
.request
;
784 dev
->power
.request
= RPM_REQ_NONE
;
785 dev
->power
.request_pending
= false;
791 rpm_idle(dev
, RPM_NOWAIT
);
793 case RPM_REQ_SUSPEND
:
794 rpm_suspend(dev
, RPM_NOWAIT
);
796 case RPM_REQ_AUTOSUSPEND
:
797 rpm_suspend(dev
, RPM_NOWAIT
| RPM_AUTO
);
800 rpm_resume(dev
, RPM_NOWAIT
);
805 spin_unlock_irq(&dev
->power
.lock
);
809 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
810 * @data: Device pointer passed by pm_schedule_suspend().
812 * Check if the time is right and queue a suspend request.
814 static void pm_suspend_timer_fn(unsigned long data
)
816 struct device
*dev
= (struct device
*)data
;
818 unsigned long expires
;
820 spin_lock_irqsave(&dev
->power
.lock
, flags
);
822 expires
= dev
->power
.timer_expires
;
823 /* If 'expire' is after 'jiffies' we've been called too early. */
824 if (expires
> 0 && !time_after(expires
, jiffies
)) {
825 dev
->power
.timer_expires
= 0;
826 rpm_suspend(dev
, dev
->power
.timer_autosuspends
?
827 (RPM_ASYNC
| RPM_AUTO
) : RPM_ASYNC
);
830 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
834 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
835 * @dev: Device to suspend.
836 * @delay: Time to wait before submitting a suspend request, in milliseconds.
838 int pm_schedule_suspend(struct device
*dev
, unsigned int delay
)
843 spin_lock_irqsave(&dev
->power
.lock
, flags
);
846 retval
= rpm_suspend(dev
, RPM_ASYNC
);
850 retval
= rpm_check_suspend_allowed(dev
);
854 /* Other scheduled or pending requests need to be canceled. */
855 pm_runtime_cancel_pending(dev
);
857 dev
->power
.timer_expires
= jiffies
+ msecs_to_jiffies(delay
);
858 dev
->power
.timer_expires
+= !dev
->power
.timer_expires
;
859 dev
->power
.timer_autosuspends
= 0;
860 mod_timer(&dev
->power
.suspend_timer
, dev
->power
.timer_expires
);
863 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
867 EXPORT_SYMBOL_GPL(pm_schedule_suspend
);
870 * __pm_runtime_idle - Entry point for runtime idle operations.
871 * @dev: Device to send idle notification for.
872 * @rpmflags: Flag bits.
874 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
875 * return immediately if it is larger than zero. Then carry out an idle
876 * notification, either synchronous or asynchronous.
878 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
879 * or if pm_runtime_irq_safe() has been called.
881 int __pm_runtime_idle(struct device
*dev
, int rpmflags
)
886 might_sleep_if(!(rpmflags
& RPM_ASYNC
) && !dev
->power
.irq_safe
);
888 if (rpmflags
& RPM_GET_PUT
) {
889 if (!atomic_dec_and_test(&dev
->power
.usage_count
))
893 spin_lock_irqsave(&dev
->power
.lock
, flags
);
894 retval
= rpm_idle(dev
, rpmflags
);
895 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
899 EXPORT_SYMBOL_GPL(__pm_runtime_idle
);
902 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
903 * @dev: Device to suspend.
904 * @rpmflags: Flag bits.
906 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
907 * return immediately if it is larger than zero. Then carry out a suspend,
908 * either synchronous or asynchronous.
910 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
911 * or if pm_runtime_irq_safe() has been called.
913 int __pm_runtime_suspend(struct device
*dev
, int rpmflags
)
918 might_sleep_if(!(rpmflags
& RPM_ASYNC
) && !dev
->power
.irq_safe
);
920 if (rpmflags
& RPM_GET_PUT
) {
921 if (!atomic_dec_and_test(&dev
->power
.usage_count
))
925 spin_lock_irqsave(&dev
->power
.lock
, flags
);
926 retval
= rpm_suspend(dev
, rpmflags
);
927 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
931 EXPORT_SYMBOL_GPL(__pm_runtime_suspend
);
934 * __pm_runtime_resume - Entry point for runtime resume operations.
935 * @dev: Device to resume.
936 * @rpmflags: Flag bits.
938 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
939 * carry out a resume, either synchronous or asynchronous.
941 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
942 * or if pm_runtime_irq_safe() has been called.
944 int __pm_runtime_resume(struct device
*dev
, int rpmflags
)
949 might_sleep_if(!(rpmflags
& RPM_ASYNC
) && !dev
->power
.irq_safe
);
951 if (rpmflags
& RPM_GET_PUT
)
952 atomic_inc(&dev
->power
.usage_count
);
954 spin_lock_irqsave(&dev
->power
.lock
, flags
);
955 retval
= rpm_resume(dev
, rpmflags
);
956 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
960 EXPORT_SYMBOL_GPL(__pm_runtime_resume
);
963 * __pm_runtime_set_status - Set runtime PM status of a device.
964 * @dev: Device to handle.
965 * @status: New runtime PM status of the device.
967 * If runtime PM of the device is disabled or its power.runtime_error field is
968 * different from zero, the status may be changed either to RPM_ACTIVE, or to
969 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
970 * However, if the device has a parent and the parent is not active, and the
971 * parent's power.ignore_children flag is unset, the device's status cannot be
972 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
974 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
975 * and the device parent's counter of unsuspended children is modified to
976 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
977 * notification request for the parent is submitted.
979 int __pm_runtime_set_status(struct device
*dev
, unsigned int status
)
981 struct device
*parent
= dev
->parent
;
983 bool notify_parent
= false;
986 if (status
!= RPM_ACTIVE
&& status
!= RPM_SUSPENDED
)
989 spin_lock_irqsave(&dev
->power
.lock
, flags
);
991 if (!dev
->power
.runtime_error
&& !dev
->power
.disable_depth
) {
996 if (dev
->power
.runtime_status
== status
)
999 if (status
== RPM_SUSPENDED
) {
1000 /* It always is possible to set the status to 'suspended'. */
1002 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
1003 notify_parent
= !parent
->power
.ignore_children
;
1009 spin_lock_nested(&parent
->power
.lock
, SINGLE_DEPTH_NESTING
);
1012 * It is invalid to put an active child under a parent that is
1013 * not active, has runtime PM enabled and the
1014 * 'power.ignore_children' flag unset.
1016 if (!parent
->power
.disable_depth
1017 && !parent
->power
.ignore_children
1018 && parent
->power
.runtime_status
!= RPM_ACTIVE
)
1020 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
1021 atomic_inc(&parent
->power
.child_count
);
1023 spin_unlock(&parent
->power
.lock
);
1030 __update_runtime_status(dev
, status
);
1031 dev
->power
.runtime_error
= 0;
1033 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
1036 pm_request_idle(parent
);
1040 EXPORT_SYMBOL_GPL(__pm_runtime_set_status
);
1043 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1044 * @dev: Device to handle.
1046 * Flush all pending requests for the device from pm_wq and wait for all
1047 * runtime PM operations involving the device in progress to complete.
1049 * Should be called under dev->power.lock with interrupts disabled.
1051 static void __pm_runtime_barrier(struct device
*dev
)
1053 pm_runtime_deactivate_timer(dev
);
1055 if (dev
->power
.request_pending
) {
1056 dev
->power
.request
= RPM_REQ_NONE
;
1057 spin_unlock_irq(&dev
->power
.lock
);
1059 cancel_work_sync(&dev
->power
.work
);
1061 spin_lock_irq(&dev
->power
.lock
);
1062 dev
->power
.request_pending
= false;
1065 if (dev
->power
.runtime_status
== RPM_SUSPENDING
1066 || dev
->power
.runtime_status
== RPM_RESUMING
1067 || dev
->power
.idle_notification
) {
1070 /* Suspend, wake-up or idle notification in progress. */
1072 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
1073 TASK_UNINTERRUPTIBLE
);
1074 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
1075 && dev
->power
.runtime_status
!= RPM_RESUMING
1076 && !dev
->power
.idle_notification
)
1078 spin_unlock_irq(&dev
->power
.lock
);
1082 spin_lock_irq(&dev
->power
.lock
);
1084 finish_wait(&dev
->power
.wait_queue
, &wait
);
1089 * pm_runtime_barrier - Flush pending requests and wait for completions.
1090 * @dev: Device to handle.
1092 * Prevent the device from being suspended by incrementing its usage counter and
1093 * if there's a pending resume request for the device, wake the device up.
1094 * Next, make sure that all pending requests for the device have been flushed
1095 * from pm_wq and wait for all runtime PM operations involving the device in
1096 * progress to complete.
1099 * 1, if there was a resume request pending and the device had to be woken up,
1102 int pm_runtime_barrier(struct device
*dev
)
1106 pm_runtime_get_noresume(dev
);
1107 spin_lock_irq(&dev
->power
.lock
);
1109 if (dev
->power
.request_pending
1110 && dev
->power
.request
== RPM_REQ_RESUME
) {
1115 __pm_runtime_barrier(dev
);
1117 spin_unlock_irq(&dev
->power
.lock
);
1118 pm_runtime_put_noidle(dev
);
1122 EXPORT_SYMBOL_GPL(pm_runtime_barrier
);
1125 * __pm_runtime_disable - Disable runtime PM of a device.
1126 * @dev: Device to handle.
1127 * @check_resume: If set, check if there's a resume request for the device.
1129 * Increment power.disable_depth for the device and if it was zero previously,
1130 * cancel all pending runtime PM requests for the device and wait for all
1131 * operations in progress to complete. The device can be either active or
1132 * suspended after its runtime PM has been disabled.
1134 * If @check_resume is set and there's a resume request pending when
1135 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1136 * function will wake up the device before disabling its runtime PM.
1138 void __pm_runtime_disable(struct device
*dev
, bool check_resume
)
1140 spin_lock_irq(&dev
->power
.lock
);
1142 if (dev
->power
.disable_depth
> 0) {
1143 dev
->power
.disable_depth
++;
1148 * Wake up the device if there's a resume request pending, because that
1149 * means there probably is some I/O to process and disabling runtime PM
1150 * shouldn't prevent the device from processing the I/O.
1152 if (check_resume
&& dev
->power
.request_pending
1153 && dev
->power
.request
== RPM_REQ_RESUME
) {
1155 * Prevent suspends and idle notifications from being carried
1156 * out after we have woken up the device.
1158 pm_runtime_get_noresume(dev
);
1162 pm_runtime_put_noidle(dev
);
1165 if (!dev
->power
.disable_depth
++)
1166 __pm_runtime_barrier(dev
);
1169 spin_unlock_irq(&dev
->power
.lock
);
1171 EXPORT_SYMBOL_GPL(__pm_runtime_disable
);
1174 * pm_runtime_enable - Enable runtime PM of a device.
1175 * @dev: Device to handle.
1177 void pm_runtime_enable(struct device
*dev
)
1179 unsigned long flags
;
1181 spin_lock_irqsave(&dev
->power
.lock
, flags
);
1183 if (dev
->power
.disable_depth
> 0)
1184 dev
->power
.disable_depth
--;
1186 dev_warn(dev
, "Unbalanced %s!\n", __func__
);
1188 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
1190 EXPORT_SYMBOL_GPL(pm_runtime_enable
);
1193 * pm_runtime_forbid - Block runtime PM of a device.
1194 * @dev: Device to handle.
1196 * Increase the device's usage count and clear its power.runtime_auto flag,
1197 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1200 void pm_runtime_forbid(struct device
*dev
)
1202 spin_lock_irq(&dev
->power
.lock
);
1203 if (!dev
->power
.runtime_auto
)
1206 dev
->power
.runtime_auto
= false;
1207 atomic_inc(&dev
->power
.usage_count
);
1211 spin_unlock_irq(&dev
->power
.lock
);
1213 EXPORT_SYMBOL_GPL(pm_runtime_forbid
);
1216 * pm_runtime_allow - Unblock runtime PM of a device.
1217 * @dev: Device to handle.
1219 * Decrease the device's usage count and set its power.runtime_auto flag.
1221 void pm_runtime_allow(struct device
*dev
)
1223 spin_lock_irq(&dev
->power
.lock
);
1224 if (dev
->power
.runtime_auto
)
1227 dev
->power
.runtime_auto
= true;
1228 if (atomic_dec_and_test(&dev
->power
.usage_count
))
1229 rpm_idle(dev
, RPM_AUTO
);
1232 spin_unlock_irq(&dev
->power
.lock
);
1234 EXPORT_SYMBOL_GPL(pm_runtime_allow
);
1237 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1238 * @dev: Device to handle.
1240 * Set the power.no_callbacks flag, which tells the PM core that this
1241 * device is power-managed through its parent and has no runtime PM
1242 * callbacks of its own. The runtime sysfs attributes will be removed.
1244 void pm_runtime_no_callbacks(struct device
*dev
)
1246 spin_lock_irq(&dev
->power
.lock
);
1247 dev
->power
.no_callbacks
= 1;
1248 spin_unlock_irq(&dev
->power
.lock
);
1249 if (device_is_registered(dev
))
1250 rpm_sysfs_remove(dev
);
1252 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks
);
1255 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1256 * @dev: Device to handle
1258 * Set the power.irq_safe flag, which tells the PM core that the
1259 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1260 * always be invoked with the spinlock held and interrupts disabled. It also
1261 * causes the parent's usage counter to be permanently incremented, preventing
1262 * the parent from runtime suspending -- otherwise an irq-safe child might have
1263 * to wait for a non-irq-safe parent.
1265 void pm_runtime_irq_safe(struct device
*dev
)
1268 pm_runtime_get_sync(dev
->parent
);
1269 spin_lock_irq(&dev
->power
.lock
);
1270 dev
->power
.irq_safe
= 1;
1271 spin_unlock_irq(&dev
->power
.lock
);
1273 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe
);
1276 * update_autosuspend - Handle a change to a device's autosuspend settings.
1277 * @dev: Device to handle.
1278 * @old_delay: The former autosuspend_delay value.
1279 * @old_use: The former use_autosuspend value.
1281 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1282 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1284 * This function must be called under dev->power.lock with interrupts disabled.
1286 static void update_autosuspend(struct device
*dev
, int old_delay
, int old_use
)
1288 int delay
= dev
->power
.autosuspend_delay
;
1290 /* Should runtime suspend be prevented now? */
1291 if (dev
->power
.use_autosuspend
&& delay
< 0) {
1293 /* If it used to be allowed then prevent it. */
1294 if (!old_use
|| old_delay
>= 0) {
1295 atomic_inc(&dev
->power
.usage_count
);
1300 /* Runtime suspend should be allowed now. */
1303 /* If it used to be prevented then allow it. */
1304 if (old_use
&& old_delay
< 0)
1305 atomic_dec(&dev
->power
.usage_count
);
1307 /* Maybe we can autosuspend now. */
1308 rpm_idle(dev
, RPM_AUTO
);
1313 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1314 * @dev: Device to handle.
1315 * @delay: Value of the new delay in milliseconds.
1317 * Set the device's power.autosuspend_delay value. If it changes to negative
1318 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1319 * changes the other way, allow runtime suspends.
1321 void pm_runtime_set_autosuspend_delay(struct device
*dev
, int delay
)
1323 int old_delay
, old_use
;
1325 spin_lock_irq(&dev
->power
.lock
);
1326 old_delay
= dev
->power
.autosuspend_delay
;
1327 old_use
= dev
->power
.use_autosuspend
;
1328 dev
->power
.autosuspend_delay
= delay
;
1329 update_autosuspend(dev
, old_delay
, old_use
);
1330 spin_unlock_irq(&dev
->power
.lock
);
1332 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay
);
1335 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1336 * @dev: Device to handle.
1337 * @use: New value for use_autosuspend.
1339 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1340 * suspends as needed.
1342 void __pm_runtime_use_autosuspend(struct device
*dev
, bool use
)
1344 int old_delay
, old_use
;
1346 spin_lock_irq(&dev
->power
.lock
);
1347 old_delay
= dev
->power
.autosuspend_delay
;
1348 old_use
= dev
->power
.use_autosuspend
;
1349 dev
->power
.use_autosuspend
= use
;
1350 update_autosuspend(dev
, old_delay
, old_use
);
1351 spin_unlock_irq(&dev
->power
.lock
);
1353 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend
);
1356 * pm_runtime_init - Initialize runtime PM fields in given device object.
1357 * @dev: Device object to initialize.
1359 void pm_runtime_init(struct device
*dev
)
1361 dev
->power
.runtime_status
= RPM_SUSPENDED
;
1362 dev
->power
.idle_notification
= false;
1364 dev
->power
.disable_depth
= 1;
1365 atomic_set(&dev
->power
.usage_count
, 0);
1367 dev
->power
.runtime_error
= 0;
1369 atomic_set(&dev
->power
.child_count
, 0);
1370 pm_suspend_ignore_children(dev
, false);
1371 dev
->power
.runtime_auto
= true;
1373 dev
->power
.request_pending
= false;
1374 dev
->power
.request
= RPM_REQ_NONE
;
1375 dev
->power
.deferred_resume
= false;
1376 dev
->power
.accounting_timestamp
= jiffies
;
1377 INIT_WORK(&dev
->power
.work
, pm_runtime_work
);
1379 dev
->power
.timer_expires
= 0;
1380 setup_timer(&dev
->power
.suspend_timer
, pm_suspend_timer_fn
,
1381 (unsigned long)dev
);
1383 init_waitqueue_head(&dev
->power
.wait_queue
);
1387 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1388 * @dev: Device object being removed from device hierarchy.
1390 void pm_runtime_remove(struct device
*dev
)
1392 __pm_runtime_disable(dev
, false);
1394 /* Change the status back to 'suspended' to match the initial status. */
1395 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
1396 pm_runtime_set_suspended(dev
);
1397 if (dev
->power
.irq_safe
&& dev
->parent
)
1398 pm_runtime_put(dev
->parent
);
1402 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1403 * @dev: Device to suspend.
1405 * Disable runtime PM so we safely can check the device's runtime PM status and
1406 * if it is active, invoke it's .runtime_suspend callback to bring it into
1407 * suspend state. Keep runtime PM disabled to preserve the state unless we
1410 * Typically this function may be invoked from a system suspend callback to make
1411 * sure the device is put into low power state.
1413 int pm_runtime_force_suspend(struct device
*dev
)
1415 int (*callback
)(struct device
*);
1418 pm_runtime_disable(dev
);
1419 if (pm_runtime_status_suspended(dev
))
1422 callback
= RPM_GET_CALLBACK(dev
, runtime_suspend
);
1429 ret
= callback(dev
);
1433 pm_runtime_set_suspended(dev
);
1436 pm_runtime_enable(dev
);
1439 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend
);
1442 * pm_runtime_force_resume - Force a device into resume state.
1443 * @dev: Device to resume.
1445 * Prior invoking this function we expect the user to have brought the device
1446 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1447 * those actions and brings the device into full power. We update the runtime PM
1448 * status and re-enables runtime PM.
1450 * Typically this function may be invoked from a system resume callback to make
1451 * sure the device is put into full power state.
1453 int pm_runtime_force_resume(struct device
*dev
)
1455 int (*callback
)(struct device
*);
1458 callback
= RPM_GET_CALLBACK(dev
, runtime_resume
);
1465 ret
= callback(dev
);
1469 pm_runtime_set_active(dev
);
1470 pm_runtime_mark_last_busy(dev
);
1472 pm_runtime_enable(dev
);
1475 EXPORT_SYMBOL_GPL(pm_runtime_force_resume
);