2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <trace/events/rpm.h>
16 #define RPM_GET_CALLBACK(dev, cb) \
18 int (*__rpm_cb)(struct device *__d); \
21 __rpm_cb = dev->pm_domain->ops.cb; \
22 else if (dev->type && dev->type->pm) \
23 __rpm_cb = dev->type->pm->cb; \
24 else if (dev->class && dev->class->pm) \
25 __rpm_cb = dev->class->pm->cb; \
26 else if (dev->bus && dev->bus->pm) \
27 __rpm_cb = dev->bus->pm->cb; \
31 if (!__rpm_cb && dev->driver && dev->driver->pm) \
32 __rpm_cb = dev->driver->pm->cb; \
37 static int (*rpm_get_suspend_cb(struct device
*dev
))(struct device
*)
39 return RPM_GET_CALLBACK(dev
, runtime_suspend
);
42 static int (*rpm_get_resume_cb(struct device
*dev
))(struct device
*)
44 return RPM_GET_CALLBACK(dev
, runtime_resume
);
47 #ifdef CONFIG_PM_RUNTIME
48 static int (*rpm_get_idle_cb(struct device
*dev
))(struct device
*)
50 return RPM_GET_CALLBACK(dev
, runtime_idle
);
53 static int rpm_resume(struct device
*dev
, int rpmflags
);
54 static int rpm_suspend(struct device
*dev
, int rpmflags
);
57 * update_pm_runtime_accounting - Update the time accounting of power states
58 * @dev: Device to update the accounting for
60 * In order to be able to have time accounting of the various power states
61 * (as used by programs such as PowerTOP to show the effectiveness of runtime
62 * PM), we need to track the time spent in each state.
63 * update_pm_runtime_accounting must be called each time before the
64 * runtime_status field is updated, to account the time in the old state
67 void update_pm_runtime_accounting(struct device
*dev
)
69 unsigned long now
= jiffies
;
72 delta
= now
- dev
->power
.accounting_timestamp
;
74 dev
->power
.accounting_timestamp
= now
;
76 if (dev
->power
.disable_depth
> 0)
79 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
80 dev
->power
.suspended_jiffies
+= delta
;
82 dev
->power
.active_jiffies
+= delta
;
85 static void __update_runtime_status(struct device
*dev
, enum rpm_status status
)
87 update_pm_runtime_accounting(dev
);
88 dev
->power
.runtime_status
= status
;
92 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
93 * @dev: Device to handle.
95 static void pm_runtime_deactivate_timer(struct device
*dev
)
97 if (dev
->power
.timer_expires
> 0) {
98 del_timer(&dev
->power
.suspend_timer
);
99 dev
->power
.timer_expires
= 0;
104 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
105 * @dev: Device to handle.
107 static void pm_runtime_cancel_pending(struct device
*dev
)
109 pm_runtime_deactivate_timer(dev
);
111 * In case there's a request pending, make sure its work function will
112 * return without doing anything.
114 dev
->power
.request
= RPM_REQ_NONE
;
118 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
119 * @dev: Device to handle.
121 * Compute the autosuspend-delay expiration time based on the device's
122 * power.last_busy time. If the delay has already expired or is disabled
123 * (negative) or the power.use_autosuspend flag isn't set, return 0.
124 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
126 * This function may be called either with or without dev->power.lock held.
127 * Either way it can be racy, since power.last_busy may be updated at any time.
129 unsigned long pm_runtime_autosuspend_expiration(struct device
*dev
)
131 int autosuspend_delay
;
133 unsigned long last_busy
;
134 unsigned long expires
= 0;
136 if (!dev
->power
.use_autosuspend
)
139 autosuspend_delay
= ACCESS_ONCE(dev
->power
.autosuspend_delay
);
140 if (autosuspend_delay
< 0)
143 last_busy
= ACCESS_ONCE(dev
->power
.last_busy
);
144 elapsed
= jiffies
- last_busy
;
146 goto out
; /* jiffies has wrapped around. */
149 * If the autosuspend_delay is >= 1 second, align the timer by rounding
150 * up to the nearest second.
152 expires
= last_busy
+ msecs_to_jiffies(autosuspend_delay
);
153 if (autosuspend_delay
>= 1000)
154 expires
= round_jiffies(expires
);
156 if (elapsed
>= expires
- last_busy
)
157 expires
= 0; /* Already expired. */
162 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration
);
164 static int dev_memalloc_noio(struct device
*dev
, void *data
)
166 return dev
->power
.memalloc_noio
;
170 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
171 * @dev: Device to handle.
172 * @enable: True for setting the flag and False for clearing the flag.
174 * Set the flag for all devices in the path from the device to the
175 * root device in the device tree if @enable is true, otherwise clear
176 * the flag for devices in the path whose siblings don't set the flag.
178 * The function should only be called by block device, or network
179 * device driver for solving the deadlock problem during runtime
182 * If memory allocation with GFP_KERNEL is called inside runtime
183 * resume/suspend callback of any one of its ancestors(or the
184 * block device itself), the deadlock may be triggered inside the
185 * memory allocation since it might not complete until the block
186 * device becomes active and the involed page I/O finishes. The
187 * situation is pointed out first by Alan Stern. Network device
188 * are involved in iSCSI kind of situation.
190 * The lock of dev_hotplug_mutex is held in the function for handling
191 * hotplug race because pm_runtime_set_memalloc_noio() may be called
194 * The function should be called between device_add() and device_del()
195 * on the affected device(block/network device).
197 void pm_runtime_set_memalloc_noio(struct device
*dev
, bool enable
)
199 static DEFINE_MUTEX(dev_hotplug_mutex
);
201 mutex_lock(&dev_hotplug_mutex
);
205 /* hold power lock since bitfield is not SMP-safe. */
206 spin_lock_irq(&dev
->power
.lock
);
207 enabled
= dev
->power
.memalloc_noio
;
208 dev
->power
.memalloc_noio
= enable
;
209 spin_unlock_irq(&dev
->power
.lock
);
212 * not need to enable ancestors any more if the device
215 if (enabled
&& enable
)
221 * clear flag of the parent device only if all the
222 * children don't set the flag because ancestor's
223 * flag was set by any one of the descendants.
225 if (!dev
|| (!enable
&&
226 device_for_each_child(dev
, NULL
,
230 mutex_unlock(&dev_hotplug_mutex
);
232 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio
);
235 * rpm_check_suspend_allowed - Test whether a device may be suspended.
236 * @dev: Device to test.
238 static int rpm_check_suspend_allowed(struct device
*dev
)
242 if (dev
->power
.runtime_error
)
244 else if (dev
->power
.disable_depth
> 0)
246 else if (atomic_read(&dev
->power
.usage_count
) > 0)
248 else if (!pm_children_suspended(dev
))
251 /* Pending resume requests take precedence over suspends. */
252 else if ((dev
->power
.deferred_resume
253 && dev
->power
.runtime_status
== RPM_SUSPENDING
)
254 || (dev
->power
.request_pending
255 && dev
->power
.request
== RPM_REQ_RESUME
))
257 else if (__dev_pm_qos_read_value(dev
) < 0)
259 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
266 * __rpm_callback - Run a given runtime PM callback for a given device.
267 * @cb: Runtime PM callback to run.
268 * @dev: Device to run the callback for.
270 static int __rpm_callback(int (*cb
)(struct device
*), struct device
*dev
)
271 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
275 if (dev
->power
.irq_safe
)
276 spin_unlock(&dev
->power
.lock
);
278 spin_unlock_irq(&dev
->power
.lock
);
282 if (dev
->power
.irq_safe
)
283 spin_lock(&dev
->power
.lock
);
285 spin_lock_irq(&dev
->power
.lock
);
291 * rpm_idle - Notify device bus type if the device can be suspended.
292 * @dev: Device to notify the bus type about.
293 * @rpmflags: Flag bits.
295 * Check if the device's runtime PM status allows it to be suspended. If
296 * another idle notification has been started earlier, return immediately. If
297 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
298 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
299 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
301 * This function must be called under dev->power.lock with interrupts disabled.
303 static int rpm_idle(struct device
*dev
, int rpmflags
)
305 int (*callback
)(struct device
*);
308 trace_rpm_idle(dev
, rpmflags
);
309 retval
= rpm_check_suspend_allowed(dev
);
311 ; /* Conditions are wrong. */
313 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
314 else if (dev
->power
.runtime_status
!= RPM_ACTIVE
)
318 * Any pending request other than an idle notification takes
319 * precedence over us, except that the timer may be running.
321 else if (dev
->power
.request_pending
&&
322 dev
->power
.request
> RPM_REQ_IDLE
)
325 /* Act as though RPM_NOWAIT is always set. */
326 else if (dev
->power
.idle_notification
)
327 retval
= -EINPROGRESS
;
331 /* Pending requests need to be canceled. */
332 dev
->power
.request
= RPM_REQ_NONE
;
334 if (dev
->power
.no_callbacks
)
337 /* Carry out an asynchronous or a synchronous idle notification. */
338 if (rpmflags
& RPM_ASYNC
) {
339 dev
->power
.request
= RPM_REQ_IDLE
;
340 if (!dev
->power
.request_pending
) {
341 dev
->power
.request_pending
= true;
342 queue_work(pm_wq
, &dev
->power
.work
);
344 trace_rpm_return_int(dev
, _THIS_IP_
, 0);
348 dev
->power
.idle_notification
= true;
350 callback
= rpm_get_idle_cb(dev
);
353 retval
= __rpm_callback(callback
, dev
);
355 dev
->power
.idle_notification
= false;
356 wake_up_all(&dev
->power
.wait_queue
);
359 trace_rpm_return_int(dev
, _THIS_IP_
, retval
);
360 return retval
? retval
: rpm_suspend(dev
, rpmflags
| RPM_AUTO
);
364 * rpm_callback - Run a given runtime PM callback for a given device.
365 * @cb: Runtime PM callback to run.
366 * @dev: Device to run the callback for.
368 static int rpm_callback(int (*cb
)(struct device
*), struct device
*dev
)
375 if (dev
->power
.memalloc_noio
) {
376 unsigned int noio_flag
;
379 * Deadlock might be caused if memory allocation with
380 * GFP_KERNEL happens inside runtime_suspend and
381 * runtime_resume callbacks of one block device's
382 * ancestor or the block device itself. Network
383 * device might be thought as part of iSCSI block
384 * device, so network device and its ancestor should
385 * be marked as memalloc_noio too.
387 noio_flag
= memalloc_noio_save();
388 retval
= __rpm_callback(cb
, dev
);
389 memalloc_noio_restore(noio_flag
);
391 retval
= __rpm_callback(cb
, dev
);
394 dev
->power
.runtime_error
= retval
;
395 return retval
!= -EACCES
? retval
: -EIO
;
399 * rpm_suspend - Carry out runtime suspend of given device.
400 * @dev: Device to suspend.
401 * @rpmflags: Flag bits.
403 * Check if the device's runtime PM status allows it to be suspended.
404 * Cancel a pending idle notification, autosuspend or suspend. If
405 * another suspend has been started earlier, either return immediately
406 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
407 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
408 * otherwise run the ->runtime_suspend() callback directly. When
409 * ->runtime_suspend succeeded, if a deferred resume was requested while
410 * the callback was running then carry it out, otherwise send an idle
411 * notification for its parent (if the suspend succeeded and both
412 * ignore_children of parent->power and irq_safe of dev->power are not set).
413 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
414 * flag is set and the next autosuspend-delay expiration time is in the
415 * future, schedule another autosuspend attempt.
417 * This function must be called under dev->power.lock with interrupts disabled.
419 static int rpm_suspend(struct device
*dev
, int rpmflags
)
420 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
422 int (*callback
)(struct device
*);
423 struct device
*parent
= NULL
;
426 trace_rpm_suspend(dev
, rpmflags
);
429 retval
= rpm_check_suspend_allowed(dev
);
432 ; /* Conditions are wrong. */
434 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
435 else if (dev
->power
.runtime_status
== RPM_RESUMING
&&
436 !(rpmflags
& RPM_ASYNC
))
441 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
442 if ((rpmflags
& RPM_AUTO
)
443 && dev
->power
.runtime_status
!= RPM_SUSPENDING
) {
444 unsigned long expires
= pm_runtime_autosuspend_expiration(dev
);
447 /* Pending requests need to be canceled. */
448 dev
->power
.request
= RPM_REQ_NONE
;
451 * Optimization: If the timer is already running and is
452 * set to expire at or before the autosuspend delay,
453 * avoid the overhead of resetting it. Just let it
454 * expire; pm_suspend_timer_fn() will take care of the
457 if (!(dev
->power
.timer_expires
&& time_before_eq(
458 dev
->power
.timer_expires
, expires
))) {
459 dev
->power
.timer_expires
= expires
;
460 mod_timer(&dev
->power
.suspend_timer
, expires
);
462 dev
->power
.timer_autosuspends
= 1;
467 /* Other scheduled or pending requests need to be canceled. */
468 pm_runtime_cancel_pending(dev
);
470 if (dev
->power
.runtime_status
== RPM_SUSPENDING
) {
473 if (rpmflags
& (RPM_ASYNC
| RPM_NOWAIT
)) {
474 retval
= -EINPROGRESS
;
478 if (dev
->power
.irq_safe
) {
479 spin_unlock(&dev
->power
.lock
);
483 spin_lock(&dev
->power
.lock
);
487 /* Wait for the other suspend running in parallel with us. */
489 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
490 TASK_UNINTERRUPTIBLE
);
491 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
)
494 spin_unlock_irq(&dev
->power
.lock
);
498 spin_lock_irq(&dev
->power
.lock
);
500 finish_wait(&dev
->power
.wait_queue
, &wait
);
504 if (dev
->power
.no_callbacks
)
505 goto no_callback
; /* Assume success. */
507 /* Carry out an asynchronous or a synchronous suspend. */
508 if (rpmflags
& RPM_ASYNC
) {
509 dev
->power
.request
= (rpmflags
& RPM_AUTO
) ?
510 RPM_REQ_AUTOSUSPEND
: RPM_REQ_SUSPEND
;
511 if (!dev
->power
.request_pending
) {
512 dev
->power
.request_pending
= true;
513 queue_work(pm_wq
, &dev
->power
.work
);
518 __update_runtime_status(dev
, RPM_SUSPENDING
);
520 callback
= rpm_get_suspend_cb(dev
);
522 retval
= rpm_callback(callback
, dev
);
527 __update_runtime_status(dev
, RPM_SUSPENDED
);
528 pm_runtime_deactivate_timer(dev
);
531 parent
= dev
->parent
;
532 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
534 wake_up_all(&dev
->power
.wait_queue
);
536 if (dev
->power
.deferred_resume
) {
537 dev
->power
.deferred_resume
= false;
543 /* Maybe the parent is now able to suspend. */
544 if (parent
&& !parent
->power
.ignore_children
&& !dev
->power
.irq_safe
) {
545 spin_unlock(&dev
->power
.lock
);
547 spin_lock(&parent
->power
.lock
);
548 rpm_idle(parent
, RPM_ASYNC
);
549 spin_unlock(&parent
->power
.lock
);
551 spin_lock(&dev
->power
.lock
);
555 trace_rpm_return_int(dev
, _THIS_IP_
, retval
);
560 __update_runtime_status(dev
, RPM_ACTIVE
);
561 dev
->power
.deferred_resume
= false;
562 wake_up_all(&dev
->power
.wait_queue
);
564 if (retval
== -EAGAIN
|| retval
== -EBUSY
) {
565 dev
->power
.runtime_error
= 0;
568 * If the callback routine failed an autosuspend, and
569 * if the last_busy time has been updated so that there
570 * is a new autosuspend expiration time, automatically
571 * reschedule another autosuspend.
573 if ((rpmflags
& RPM_AUTO
) &&
574 pm_runtime_autosuspend_expiration(dev
) != 0)
577 pm_runtime_cancel_pending(dev
);
583 * rpm_resume - Carry out runtime resume of given device.
584 * @dev: Device to resume.
585 * @rpmflags: Flag bits.
587 * Check if the device's runtime PM status allows it to be resumed. Cancel
588 * any scheduled or pending requests. If another resume has been started
589 * earlier, either return immediately or wait for it to finish, depending on the
590 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
591 * parallel with this function, either tell the other process to resume after
592 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
593 * flag is set then queue a resume request; otherwise run the
594 * ->runtime_resume() callback directly. Queue an idle notification for the
595 * device if the resume succeeded.
597 * This function must be called under dev->power.lock with interrupts disabled.
599 static int rpm_resume(struct device
*dev
, int rpmflags
)
600 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
602 int (*callback
)(struct device
*);
603 struct device
*parent
= NULL
;
606 trace_rpm_resume(dev
, rpmflags
);
609 if (dev
->power
.runtime_error
)
611 else if (dev
->power
.disable_depth
== 1 && dev
->power
.is_suspended
612 && dev
->power
.runtime_status
== RPM_ACTIVE
)
614 else if (dev
->power
.disable_depth
> 0)
620 * Other scheduled or pending requests need to be canceled. Small
621 * optimization: If an autosuspend timer is running, leave it running
622 * rather than cancelling it now only to restart it again in the near
625 dev
->power
.request
= RPM_REQ_NONE
;
626 if (!dev
->power
.timer_autosuspends
)
627 pm_runtime_deactivate_timer(dev
);
629 if (dev
->power
.runtime_status
== RPM_ACTIVE
) {
634 if (dev
->power
.runtime_status
== RPM_RESUMING
635 || dev
->power
.runtime_status
== RPM_SUSPENDING
) {
638 if (rpmflags
& (RPM_ASYNC
| RPM_NOWAIT
)) {
639 if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
640 dev
->power
.deferred_resume
= true;
642 retval
= -EINPROGRESS
;
646 if (dev
->power
.irq_safe
) {
647 spin_unlock(&dev
->power
.lock
);
651 spin_lock(&dev
->power
.lock
);
655 /* Wait for the operation carried out in parallel with us. */
657 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
658 TASK_UNINTERRUPTIBLE
);
659 if (dev
->power
.runtime_status
!= RPM_RESUMING
660 && dev
->power
.runtime_status
!= RPM_SUSPENDING
)
663 spin_unlock_irq(&dev
->power
.lock
);
667 spin_lock_irq(&dev
->power
.lock
);
669 finish_wait(&dev
->power
.wait_queue
, &wait
);
674 * See if we can skip waking up the parent. This is safe only if
675 * power.no_callbacks is set, because otherwise we don't know whether
676 * the resume will actually succeed.
678 if (dev
->power
.no_callbacks
&& !parent
&& dev
->parent
) {
679 spin_lock_nested(&dev
->parent
->power
.lock
, SINGLE_DEPTH_NESTING
);
680 if (dev
->parent
->power
.disable_depth
> 0
681 || dev
->parent
->power
.ignore_children
682 || dev
->parent
->power
.runtime_status
== RPM_ACTIVE
) {
683 atomic_inc(&dev
->parent
->power
.child_count
);
684 spin_unlock(&dev
->parent
->power
.lock
);
686 goto no_callback
; /* Assume success. */
688 spin_unlock(&dev
->parent
->power
.lock
);
691 /* Carry out an asynchronous or a synchronous resume. */
692 if (rpmflags
& RPM_ASYNC
) {
693 dev
->power
.request
= RPM_REQ_RESUME
;
694 if (!dev
->power
.request_pending
) {
695 dev
->power
.request_pending
= true;
696 queue_work(pm_wq
, &dev
->power
.work
);
702 if (!parent
&& dev
->parent
) {
704 * Increment the parent's usage counter and resume it if
705 * necessary. Not needed if dev is irq-safe; then the
706 * parent is permanently resumed.
708 parent
= dev
->parent
;
709 if (dev
->power
.irq_safe
)
711 spin_unlock(&dev
->power
.lock
);
713 pm_runtime_get_noresume(parent
);
715 spin_lock(&parent
->power
.lock
);
717 * We can resume if the parent's runtime PM is disabled or it
718 * is set to ignore children.
720 if (!parent
->power
.disable_depth
721 && !parent
->power
.ignore_children
) {
722 rpm_resume(parent
, 0);
723 if (parent
->power
.runtime_status
!= RPM_ACTIVE
)
726 spin_unlock(&parent
->power
.lock
);
728 spin_lock(&dev
->power
.lock
);
735 if (dev
->power
.no_callbacks
)
736 goto no_callback
; /* Assume success. */
738 __update_runtime_status(dev
, RPM_RESUMING
);
740 callback
= rpm_get_resume_cb(dev
);
742 retval
= rpm_callback(callback
, dev
);
744 __update_runtime_status(dev
, RPM_SUSPENDED
);
745 pm_runtime_cancel_pending(dev
);
748 __update_runtime_status(dev
, RPM_ACTIVE
);
750 atomic_inc(&parent
->power
.child_count
);
752 wake_up_all(&dev
->power
.wait_queue
);
755 rpm_idle(dev
, RPM_ASYNC
);
758 if (parent
&& !dev
->power
.irq_safe
) {
759 spin_unlock_irq(&dev
->power
.lock
);
761 pm_runtime_put(parent
);
763 spin_lock_irq(&dev
->power
.lock
);
766 trace_rpm_return_int(dev
, _THIS_IP_
, retval
);
772 * pm_runtime_work - Universal runtime PM work function.
773 * @work: Work structure used for scheduling the execution of this function.
775 * Use @work to get the device object the work is to be done for, determine what
776 * is to be done and execute the appropriate runtime PM function.
778 static void pm_runtime_work(struct work_struct
*work
)
780 struct device
*dev
= container_of(work
, struct device
, power
.work
);
781 enum rpm_request req
;
783 spin_lock_irq(&dev
->power
.lock
);
785 if (!dev
->power
.request_pending
)
788 req
= dev
->power
.request
;
789 dev
->power
.request
= RPM_REQ_NONE
;
790 dev
->power
.request_pending
= false;
796 rpm_idle(dev
, RPM_NOWAIT
);
798 case RPM_REQ_SUSPEND
:
799 rpm_suspend(dev
, RPM_NOWAIT
);
801 case RPM_REQ_AUTOSUSPEND
:
802 rpm_suspend(dev
, RPM_NOWAIT
| RPM_AUTO
);
805 rpm_resume(dev
, RPM_NOWAIT
);
810 spin_unlock_irq(&dev
->power
.lock
);
814 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
815 * @data: Device pointer passed by pm_schedule_suspend().
817 * Check if the time is right and queue a suspend request.
819 static void pm_suspend_timer_fn(unsigned long data
)
821 struct device
*dev
= (struct device
*)data
;
823 unsigned long expires
;
825 spin_lock_irqsave(&dev
->power
.lock
, flags
);
827 expires
= dev
->power
.timer_expires
;
828 /* If 'expire' is after 'jiffies' we've been called too early. */
829 if (expires
> 0 && !time_after(expires
, jiffies
)) {
830 dev
->power
.timer_expires
= 0;
831 rpm_suspend(dev
, dev
->power
.timer_autosuspends
?
832 (RPM_ASYNC
| RPM_AUTO
) : RPM_ASYNC
);
835 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
839 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
840 * @dev: Device to suspend.
841 * @delay: Time to wait before submitting a suspend request, in milliseconds.
843 int pm_schedule_suspend(struct device
*dev
, unsigned int delay
)
848 spin_lock_irqsave(&dev
->power
.lock
, flags
);
851 retval
= rpm_suspend(dev
, RPM_ASYNC
);
855 retval
= rpm_check_suspend_allowed(dev
);
859 /* Other scheduled or pending requests need to be canceled. */
860 pm_runtime_cancel_pending(dev
);
862 dev
->power
.timer_expires
= jiffies
+ msecs_to_jiffies(delay
);
863 dev
->power
.timer_expires
+= !dev
->power
.timer_expires
;
864 dev
->power
.timer_autosuspends
= 0;
865 mod_timer(&dev
->power
.suspend_timer
, dev
->power
.timer_expires
);
868 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
872 EXPORT_SYMBOL_GPL(pm_schedule_suspend
);
875 * __pm_runtime_idle - Entry point for runtime idle operations.
876 * @dev: Device to send idle notification for.
877 * @rpmflags: Flag bits.
879 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
880 * return immediately if it is larger than zero. Then carry out an idle
881 * notification, either synchronous or asynchronous.
883 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
884 * or if pm_runtime_irq_safe() has been called.
886 int __pm_runtime_idle(struct device
*dev
, int rpmflags
)
891 might_sleep_if(!(rpmflags
& RPM_ASYNC
) && !dev
->power
.irq_safe
);
893 if (rpmflags
& RPM_GET_PUT
) {
894 if (!atomic_dec_and_test(&dev
->power
.usage_count
))
898 spin_lock_irqsave(&dev
->power
.lock
, flags
);
899 retval
= rpm_idle(dev
, rpmflags
);
900 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
904 EXPORT_SYMBOL_GPL(__pm_runtime_idle
);
907 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
908 * @dev: Device to suspend.
909 * @rpmflags: Flag bits.
911 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
912 * return immediately if it is larger than zero. Then carry out a suspend,
913 * either synchronous or asynchronous.
915 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
916 * or if pm_runtime_irq_safe() has been called.
918 int __pm_runtime_suspend(struct device
*dev
, int rpmflags
)
923 might_sleep_if(!(rpmflags
& RPM_ASYNC
) && !dev
->power
.irq_safe
);
925 if (rpmflags
& RPM_GET_PUT
) {
926 if (!atomic_dec_and_test(&dev
->power
.usage_count
))
930 spin_lock_irqsave(&dev
->power
.lock
, flags
);
931 retval
= rpm_suspend(dev
, rpmflags
);
932 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
936 EXPORT_SYMBOL_GPL(__pm_runtime_suspend
);
939 * __pm_runtime_resume - Entry point for runtime resume operations.
940 * @dev: Device to resume.
941 * @rpmflags: Flag bits.
943 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
944 * carry out a resume, either synchronous or asynchronous.
946 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
947 * or if pm_runtime_irq_safe() has been called.
949 int __pm_runtime_resume(struct device
*dev
, int rpmflags
)
954 might_sleep_if(!(rpmflags
& RPM_ASYNC
) && !dev
->power
.irq_safe
);
956 if (rpmflags
& RPM_GET_PUT
)
957 atomic_inc(&dev
->power
.usage_count
);
959 spin_lock_irqsave(&dev
->power
.lock
, flags
);
960 retval
= rpm_resume(dev
, rpmflags
);
961 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
965 EXPORT_SYMBOL_GPL(__pm_runtime_resume
);
968 * __pm_runtime_set_status - Set runtime PM status of a device.
969 * @dev: Device to handle.
970 * @status: New runtime PM status of the device.
972 * If runtime PM of the device is disabled or its power.runtime_error field is
973 * different from zero, the status may be changed either to RPM_ACTIVE, or to
974 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
975 * However, if the device has a parent and the parent is not active, and the
976 * parent's power.ignore_children flag is unset, the device's status cannot be
977 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
979 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
980 * and the device parent's counter of unsuspended children is modified to
981 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
982 * notification request for the parent is submitted.
984 int __pm_runtime_set_status(struct device
*dev
, unsigned int status
)
986 struct device
*parent
= dev
->parent
;
988 bool notify_parent
= false;
991 if (status
!= RPM_ACTIVE
&& status
!= RPM_SUSPENDED
)
994 spin_lock_irqsave(&dev
->power
.lock
, flags
);
996 if (!dev
->power
.runtime_error
&& !dev
->power
.disable_depth
) {
1001 if (dev
->power
.runtime_status
== status
)
1004 if (status
== RPM_SUSPENDED
) {
1005 /* It always is possible to set the status to 'suspended'. */
1007 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
1008 notify_parent
= !parent
->power
.ignore_children
;
1014 spin_lock_nested(&parent
->power
.lock
, SINGLE_DEPTH_NESTING
);
1017 * It is invalid to put an active child under a parent that is
1018 * not active, has runtime PM enabled and the
1019 * 'power.ignore_children' flag unset.
1021 if (!parent
->power
.disable_depth
1022 && !parent
->power
.ignore_children
1023 && parent
->power
.runtime_status
!= RPM_ACTIVE
)
1025 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
1026 atomic_inc(&parent
->power
.child_count
);
1028 spin_unlock(&parent
->power
.lock
);
1035 __update_runtime_status(dev
, status
);
1036 dev
->power
.runtime_error
= 0;
1038 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
1041 pm_request_idle(parent
);
1045 EXPORT_SYMBOL_GPL(__pm_runtime_set_status
);
1048 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1049 * @dev: Device to handle.
1051 * Flush all pending requests for the device from pm_wq and wait for all
1052 * runtime PM operations involving the device in progress to complete.
1054 * Should be called under dev->power.lock with interrupts disabled.
1056 static void __pm_runtime_barrier(struct device
*dev
)
1058 pm_runtime_deactivate_timer(dev
);
1060 if (dev
->power
.request_pending
) {
1061 dev
->power
.request
= RPM_REQ_NONE
;
1062 spin_unlock_irq(&dev
->power
.lock
);
1064 cancel_work_sync(&dev
->power
.work
);
1066 spin_lock_irq(&dev
->power
.lock
);
1067 dev
->power
.request_pending
= false;
1070 if (dev
->power
.runtime_status
== RPM_SUSPENDING
1071 || dev
->power
.runtime_status
== RPM_RESUMING
1072 || dev
->power
.idle_notification
) {
1075 /* Suspend, wake-up or idle notification in progress. */
1077 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
1078 TASK_UNINTERRUPTIBLE
);
1079 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
1080 && dev
->power
.runtime_status
!= RPM_RESUMING
1081 && !dev
->power
.idle_notification
)
1083 spin_unlock_irq(&dev
->power
.lock
);
1087 spin_lock_irq(&dev
->power
.lock
);
1089 finish_wait(&dev
->power
.wait_queue
, &wait
);
1094 * pm_runtime_barrier - Flush pending requests and wait for completions.
1095 * @dev: Device to handle.
1097 * Prevent the device from being suspended by incrementing its usage counter and
1098 * if there's a pending resume request for the device, wake the device up.
1099 * Next, make sure that all pending requests for the device have been flushed
1100 * from pm_wq and wait for all runtime PM operations involving the device in
1101 * progress to complete.
1104 * 1, if there was a resume request pending and the device had to be woken up,
1107 int pm_runtime_barrier(struct device
*dev
)
1111 pm_runtime_get_noresume(dev
);
1112 spin_lock_irq(&dev
->power
.lock
);
1114 if (dev
->power
.request_pending
1115 && dev
->power
.request
== RPM_REQ_RESUME
) {
1120 __pm_runtime_barrier(dev
);
1122 spin_unlock_irq(&dev
->power
.lock
);
1123 pm_runtime_put_noidle(dev
);
1127 EXPORT_SYMBOL_GPL(pm_runtime_barrier
);
1130 * __pm_runtime_disable - Disable runtime PM of a device.
1131 * @dev: Device to handle.
1132 * @check_resume: If set, check if there's a resume request for the device.
1134 * Increment power.disable_depth for the device and if it was zero previously,
1135 * cancel all pending runtime PM requests for the device and wait for all
1136 * operations in progress to complete. The device can be either active or
1137 * suspended after its runtime PM has been disabled.
1139 * If @check_resume is set and there's a resume request pending when
1140 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1141 * function will wake up the device before disabling its runtime PM.
1143 void __pm_runtime_disable(struct device
*dev
, bool check_resume
)
1145 spin_lock_irq(&dev
->power
.lock
);
1147 if (dev
->power
.disable_depth
> 0) {
1148 dev
->power
.disable_depth
++;
1153 * Wake up the device if there's a resume request pending, because that
1154 * means there probably is some I/O to process and disabling runtime PM
1155 * shouldn't prevent the device from processing the I/O.
1157 if (check_resume
&& dev
->power
.request_pending
1158 && dev
->power
.request
== RPM_REQ_RESUME
) {
1160 * Prevent suspends and idle notifications from being carried
1161 * out after we have woken up the device.
1163 pm_runtime_get_noresume(dev
);
1167 pm_runtime_put_noidle(dev
);
1170 if (!dev
->power
.disable_depth
++)
1171 __pm_runtime_barrier(dev
);
1174 spin_unlock_irq(&dev
->power
.lock
);
1176 EXPORT_SYMBOL_GPL(__pm_runtime_disable
);
1179 * pm_runtime_enable - Enable runtime PM of a device.
1180 * @dev: Device to handle.
1182 void pm_runtime_enable(struct device
*dev
)
1184 unsigned long flags
;
1186 spin_lock_irqsave(&dev
->power
.lock
, flags
);
1188 if (dev
->power
.disable_depth
> 0)
1189 dev
->power
.disable_depth
--;
1191 dev_warn(dev
, "Unbalanced %s!\n", __func__
);
1193 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
1195 EXPORT_SYMBOL_GPL(pm_runtime_enable
);
1198 * pm_runtime_forbid - Block runtime PM of a device.
1199 * @dev: Device to handle.
1201 * Increase the device's usage count and clear its power.runtime_auto flag,
1202 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1205 void pm_runtime_forbid(struct device
*dev
)
1207 spin_lock_irq(&dev
->power
.lock
);
1208 if (!dev
->power
.runtime_auto
)
1211 dev
->power
.runtime_auto
= false;
1212 atomic_inc(&dev
->power
.usage_count
);
1216 spin_unlock_irq(&dev
->power
.lock
);
1218 EXPORT_SYMBOL_GPL(pm_runtime_forbid
);
1221 * pm_runtime_allow - Unblock runtime PM of a device.
1222 * @dev: Device to handle.
1224 * Decrease the device's usage count and set its power.runtime_auto flag.
1226 void pm_runtime_allow(struct device
*dev
)
1228 spin_lock_irq(&dev
->power
.lock
);
1229 if (dev
->power
.runtime_auto
)
1232 dev
->power
.runtime_auto
= true;
1233 if (atomic_dec_and_test(&dev
->power
.usage_count
))
1234 rpm_idle(dev
, RPM_AUTO
);
1237 spin_unlock_irq(&dev
->power
.lock
);
1239 EXPORT_SYMBOL_GPL(pm_runtime_allow
);
1242 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1243 * @dev: Device to handle.
1245 * Set the power.no_callbacks flag, which tells the PM core that this
1246 * device is power-managed through its parent and has no runtime PM
1247 * callbacks of its own. The runtime sysfs attributes will be removed.
1249 void pm_runtime_no_callbacks(struct device
*dev
)
1251 spin_lock_irq(&dev
->power
.lock
);
1252 dev
->power
.no_callbacks
= 1;
1253 spin_unlock_irq(&dev
->power
.lock
);
1254 if (device_is_registered(dev
))
1255 rpm_sysfs_remove(dev
);
1257 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks
);
1260 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1261 * @dev: Device to handle
1263 * Set the power.irq_safe flag, which tells the PM core that the
1264 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1265 * always be invoked with the spinlock held and interrupts disabled. It also
1266 * causes the parent's usage counter to be permanently incremented, preventing
1267 * the parent from runtime suspending -- otherwise an irq-safe child might have
1268 * to wait for a non-irq-safe parent.
1270 void pm_runtime_irq_safe(struct device
*dev
)
1273 pm_runtime_get_sync(dev
->parent
);
1274 spin_lock_irq(&dev
->power
.lock
);
1275 dev
->power
.irq_safe
= 1;
1276 spin_unlock_irq(&dev
->power
.lock
);
1278 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe
);
1281 * update_autosuspend - Handle a change to a device's autosuspend settings.
1282 * @dev: Device to handle.
1283 * @old_delay: The former autosuspend_delay value.
1284 * @old_use: The former use_autosuspend value.
1286 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1287 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1289 * This function must be called under dev->power.lock with interrupts disabled.
1291 static void update_autosuspend(struct device
*dev
, int old_delay
, int old_use
)
1293 int delay
= dev
->power
.autosuspend_delay
;
1295 /* Should runtime suspend be prevented now? */
1296 if (dev
->power
.use_autosuspend
&& delay
< 0) {
1298 /* If it used to be allowed then prevent it. */
1299 if (!old_use
|| old_delay
>= 0) {
1300 atomic_inc(&dev
->power
.usage_count
);
1305 /* Runtime suspend should be allowed now. */
1308 /* If it used to be prevented then allow it. */
1309 if (old_use
&& old_delay
< 0)
1310 atomic_dec(&dev
->power
.usage_count
);
1312 /* Maybe we can autosuspend now. */
1313 rpm_idle(dev
, RPM_AUTO
);
1318 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1319 * @dev: Device to handle.
1320 * @delay: Value of the new delay in milliseconds.
1322 * Set the device's power.autosuspend_delay value. If it changes to negative
1323 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1324 * changes the other way, allow runtime suspends.
1326 void pm_runtime_set_autosuspend_delay(struct device
*dev
, int delay
)
1328 int old_delay
, old_use
;
1330 spin_lock_irq(&dev
->power
.lock
);
1331 old_delay
= dev
->power
.autosuspend_delay
;
1332 old_use
= dev
->power
.use_autosuspend
;
1333 dev
->power
.autosuspend_delay
= delay
;
1334 update_autosuspend(dev
, old_delay
, old_use
);
1335 spin_unlock_irq(&dev
->power
.lock
);
1337 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay
);
1340 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1341 * @dev: Device to handle.
1342 * @use: New value for use_autosuspend.
1344 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1345 * suspends as needed.
1347 void __pm_runtime_use_autosuspend(struct device
*dev
, bool use
)
1349 int old_delay
, old_use
;
1351 spin_lock_irq(&dev
->power
.lock
);
1352 old_delay
= dev
->power
.autosuspend_delay
;
1353 old_use
= dev
->power
.use_autosuspend
;
1354 dev
->power
.use_autosuspend
= use
;
1355 update_autosuspend(dev
, old_delay
, old_use
);
1356 spin_unlock_irq(&dev
->power
.lock
);
1358 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend
);
1361 * pm_runtime_init - Initialize runtime PM fields in given device object.
1362 * @dev: Device object to initialize.
1364 void pm_runtime_init(struct device
*dev
)
1366 dev
->power
.runtime_status
= RPM_SUSPENDED
;
1367 dev
->power
.idle_notification
= false;
1369 dev
->power
.disable_depth
= 1;
1370 atomic_set(&dev
->power
.usage_count
, 0);
1372 dev
->power
.runtime_error
= 0;
1374 atomic_set(&dev
->power
.child_count
, 0);
1375 pm_suspend_ignore_children(dev
, false);
1376 dev
->power
.runtime_auto
= true;
1378 dev
->power
.request_pending
= false;
1379 dev
->power
.request
= RPM_REQ_NONE
;
1380 dev
->power
.deferred_resume
= false;
1381 dev
->power
.accounting_timestamp
= jiffies
;
1382 INIT_WORK(&dev
->power
.work
, pm_runtime_work
);
1384 dev
->power
.timer_expires
= 0;
1385 setup_timer(&dev
->power
.suspend_timer
, pm_suspend_timer_fn
,
1386 (unsigned long)dev
);
1388 init_waitqueue_head(&dev
->power
.wait_queue
);
1392 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1393 * @dev: Device object being removed from device hierarchy.
1395 void pm_runtime_remove(struct device
*dev
)
1397 __pm_runtime_disable(dev
, false);
1399 /* Change the status back to 'suspended' to match the initial status. */
1400 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
1401 pm_runtime_set_suspended(dev
);
1402 if (dev
->power
.irq_safe
&& dev
->parent
)
1403 pm_runtime_put(dev
->parent
);
1408 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1409 * @dev: Device to suspend.
1411 * Disable runtime PM so we safely can check the device's runtime PM status and
1412 * if it is active, invoke it's .runtime_suspend callback to bring it into
1413 * suspend state. Keep runtime PM disabled to preserve the state unless we
1416 * Typically this function may be invoked from a system suspend callback to make
1417 * sure the device is put into low power state.
1419 int pm_runtime_force_suspend(struct device
*dev
)
1421 int (*callback
)(struct device
*);
1424 pm_runtime_disable(dev
);
1427 * Note that pm_runtime_status_suspended() returns false while
1428 * !CONFIG_PM_RUNTIME, which means the device will be put into low
1431 if (pm_runtime_status_suspended(dev
))
1434 callback
= rpm_get_suspend_cb(dev
);
1441 ret
= callback(dev
);
1445 pm_runtime_set_suspended(dev
);
1448 pm_runtime_enable(dev
);
1451 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend
);
1454 * pm_runtime_force_resume - Force a device into resume state.
1455 * @dev: Device to resume.
1457 * Prior invoking this function we expect the user to have brought the device
1458 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1459 * those actions and brings the device into full power. We update the runtime PM
1460 * status and re-enables runtime PM.
1462 * Typically this function may be invoked from a system resume callback to make
1463 * sure the device is put into full power state.
1465 int pm_runtime_force_resume(struct device
*dev
)
1467 int (*callback
)(struct device
*);
1470 callback
= rpm_get_resume_cb(dev
);
1477 ret
= callback(dev
);
1481 pm_runtime_set_active(dev
);
1482 pm_runtime_mark_last_busy(dev
);
1484 pm_runtime_enable(dev
);
1487 EXPORT_SYMBOL_GPL(pm_runtime_force_resume
);