cris: add arch/cris/include/asm/serial.h
[linux-2.6/next.git] / drivers / base / power / domain.c
blob1c374579407c14316b35e8b9d7faff71077b18f6
1 /*
2 * drivers/base/power/domain.c - Common code related to device power domains.
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 * This file is released under the GPLv2.
7 */
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sched.h>
17 #include <linux/suspend.h>
19 static LIST_HEAD(gpd_list);
20 static DEFINE_MUTEX(gpd_list_lock);
22 #ifdef CONFIG_PM
24 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
26 if (IS_ERR_OR_NULL(dev->pm_domain))
27 return ERR_PTR(-EINVAL);
29 return pd_to_genpd(dev->pm_domain);
32 static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
34 if (!WARN_ON(genpd->sd_count == 0))
35 genpd->sd_count--;
38 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
40 DEFINE_WAIT(wait);
42 mutex_lock(&genpd->lock);
44 * Wait for the domain to transition into either the active,
45 * or the power off state.
47 for (;;) {
48 prepare_to_wait(&genpd->status_wait_queue, &wait,
49 TASK_UNINTERRUPTIBLE);
50 if (genpd->status == GPD_STATE_ACTIVE
51 || genpd->status == GPD_STATE_POWER_OFF)
52 break;
53 mutex_unlock(&genpd->lock);
55 schedule();
57 mutex_lock(&genpd->lock);
59 finish_wait(&genpd->status_wait_queue, &wait);
62 static void genpd_release_lock(struct generic_pm_domain *genpd)
64 mutex_unlock(&genpd->lock);
67 static void genpd_set_active(struct generic_pm_domain *genpd)
69 if (genpd->resume_count == 0)
70 genpd->status = GPD_STATE_ACTIVE;
73 /**
74 * pm_genpd_poweron - Restore power to a given PM domain and its parents.
75 * @genpd: PM domain to power up.
77 * Restore power to @genpd and all of its parents so that it is possible to
78 * resume a device belonging to it.
80 int pm_genpd_poweron(struct generic_pm_domain *genpd)
82 struct generic_pm_domain *parent = genpd->parent;
83 int ret = 0;
85 start:
86 if (parent) {
87 genpd_acquire_lock(parent);
88 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
89 } else {
90 mutex_lock(&genpd->lock);
93 if (genpd->status == GPD_STATE_ACTIVE
94 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
95 goto out;
97 if (genpd->status != GPD_STATE_POWER_OFF) {
98 genpd_set_active(genpd);
99 goto out;
102 if (parent && parent->status != GPD_STATE_ACTIVE) {
103 mutex_unlock(&genpd->lock);
104 genpd_release_lock(parent);
106 ret = pm_genpd_poweron(parent);
107 if (ret)
108 return ret;
110 goto start;
113 if (genpd->power_on) {
114 ret = genpd->power_on(genpd);
115 if (ret)
116 goto out;
119 genpd_set_active(genpd);
120 if (parent)
121 parent->sd_count++;
123 out:
124 mutex_unlock(&genpd->lock);
125 if (parent)
126 genpd_release_lock(parent);
128 return ret;
131 #endif /* CONFIG_PM */
133 #ifdef CONFIG_PM_RUNTIME
136 * __pm_genpd_save_device - Save the pre-suspend state of a device.
137 * @dle: Device list entry of the device to save the state of.
138 * @genpd: PM domain the device belongs to.
140 static int __pm_genpd_save_device(struct dev_list_entry *dle,
141 struct generic_pm_domain *genpd)
142 __releases(&genpd->lock) __acquires(&genpd->lock)
144 struct device *dev = dle->dev;
145 struct device_driver *drv = dev->driver;
146 int ret = 0;
148 if (dle->need_restore)
149 return 0;
151 mutex_unlock(&genpd->lock);
153 if (drv && drv->pm && drv->pm->runtime_suspend) {
154 if (genpd->start_device)
155 genpd->start_device(dev);
157 ret = drv->pm->runtime_suspend(dev);
159 if (genpd->stop_device)
160 genpd->stop_device(dev);
163 mutex_lock(&genpd->lock);
165 if (!ret)
166 dle->need_restore = true;
168 return ret;
172 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
173 * @dle: Device list entry of the device to restore the state of.
174 * @genpd: PM domain the device belongs to.
176 static void __pm_genpd_restore_device(struct dev_list_entry *dle,
177 struct generic_pm_domain *genpd)
178 __releases(&genpd->lock) __acquires(&genpd->lock)
180 struct device *dev = dle->dev;
181 struct device_driver *drv = dev->driver;
183 if (!dle->need_restore)
184 return;
186 mutex_unlock(&genpd->lock);
188 if (drv && drv->pm && drv->pm->runtime_resume) {
189 if (genpd->start_device)
190 genpd->start_device(dev);
192 drv->pm->runtime_resume(dev);
194 if (genpd->stop_device)
195 genpd->stop_device(dev);
198 mutex_lock(&genpd->lock);
200 dle->need_restore = false;
204 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
205 * @genpd: PM domain to check.
207 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
208 * a "power off" operation, which means that a "power on" has occured in the
209 * meantime, or if its resume_count field is different from zero, which means
210 * that one of its devices has been resumed in the meantime.
212 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
214 return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
218 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
219 * @genpd: PM domait to power off.
221 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
222 * before.
224 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
226 if (!work_pending(&genpd->power_off_work))
227 queue_work(pm_wq, &genpd->power_off_work);
231 * pm_genpd_poweroff - Remove power from a given PM domain.
232 * @genpd: PM domain to power down.
234 * If all of the @genpd's devices have been suspended and all of its subdomains
235 * have been powered down, run the runtime suspend callbacks provided by all of
236 * the @genpd's devices' drivers and remove power from @genpd.
238 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
239 __releases(&genpd->lock) __acquires(&genpd->lock)
241 struct generic_pm_domain *parent;
242 struct dev_list_entry *dle;
243 unsigned int not_suspended;
244 int ret = 0;
246 start:
248 * Do not try to power off the domain in the following situations:
249 * (1) The domain is already in the "power off" state.
250 * (2) System suspend is in progress.
251 * (3) One of the domain's devices is being resumed right now.
253 if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
254 || genpd->resume_count > 0)
255 return 0;
257 if (genpd->sd_count > 0)
258 return -EBUSY;
260 not_suspended = 0;
261 list_for_each_entry(dle, &genpd->dev_list, node)
262 if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
263 not_suspended++;
265 if (not_suspended > genpd->in_progress)
266 return -EBUSY;
268 if (genpd->poweroff_task) {
270 * Another instance of pm_genpd_poweroff() is executing
271 * callbacks, so tell it to start over and return.
273 genpd->status = GPD_STATE_REPEAT;
274 return 0;
277 if (genpd->gov && genpd->gov->power_down_ok) {
278 if (!genpd->gov->power_down_ok(&genpd->domain))
279 return -EAGAIN;
282 genpd->status = GPD_STATE_BUSY;
283 genpd->poweroff_task = current;
285 list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
286 ret = __pm_genpd_save_device(dle, genpd);
287 if (ret) {
288 genpd_set_active(genpd);
289 goto out;
292 if (genpd_abort_poweroff(genpd))
293 goto out;
295 if (genpd->status == GPD_STATE_REPEAT) {
296 genpd->poweroff_task = NULL;
297 goto start;
301 parent = genpd->parent;
302 if (parent) {
303 mutex_unlock(&genpd->lock);
305 genpd_acquire_lock(parent);
306 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
308 if (genpd_abort_poweroff(genpd)) {
309 genpd_release_lock(parent);
310 goto out;
314 if (genpd->power_off) {
315 ret = genpd->power_off(genpd);
316 if (ret == -EBUSY) {
317 genpd_set_active(genpd);
318 if (parent)
319 genpd_release_lock(parent);
321 goto out;
325 genpd->status = GPD_STATE_POWER_OFF;
327 if (parent) {
328 genpd_sd_counter_dec(parent);
329 if (parent->sd_count == 0)
330 genpd_queue_power_off_work(parent);
332 genpd_release_lock(parent);
335 out:
336 genpd->poweroff_task = NULL;
337 wake_up_all(&genpd->status_wait_queue);
338 return ret;
342 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
343 * @work: Work structure used for scheduling the execution of this function.
345 static void genpd_power_off_work_fn(struct work_struct *work)
347 struct generic_pm_domain *genpd;
349 genpd = container_of(work, struct generic_pm_domain, power_off_work);
351 genpd_acquire_lock(genpd);
352 pm_genpd_poweroff(genpd);
353 genpd_release_lock(genpd);
357 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
358 * @dev: Device to suspend.
360 * Carry out a runtime suspend of a device under the assumption that its
361 * pm_domain field points to the domain member of an object of type
362 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
364 static int pm_genpd_runtime_suspend(struct device *dev)
366 struct generic_pm_domain *genpd;
368 dev_dbg(dev, "%s()\n", __func__);
370 genpd = dev_to_genpd(dev);
371 if (IS_ERR(genpd))
372 return -EINVAL;
374 if (genpd->stop_device) {
375 int ret = genpd->stop_device(dev);
376 if (ret)
377 return ret;
380 mutex_lock(&genpd->lock);
381 genpd->in_progress++;
382 pm_genpd_poweroff(genpd);
383 genpd->in_progress--;
384 mutex_unlock(&genpd->lock);
386 return 0;
390 * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
391 * @dev: Device to resume.
392 * @genpd: PM domain the device belongs to.
394 static void __pm_genpd_runtime_resume(struct device *dev,
395 struct generic_pm_domain *genpd)
397 struct dev_list_entry *dle;
399 list_for_each_entry(dle, &genpd->dev_list, node) {
400 if (dle->dev == dev) {
401 __pm_genpd_restore_device(dle, genpd);
402 break;
408 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
409 * @dev: Device to resume.
411 * Carry out a runtime resume of a device under the assumption that its
412 * pm_domain field points to the domain member of an object of type
413 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
415 static int pm_genpd_runtime_resume(struct device *dev)
417 struct generic_pm_domain *genpd;
418 DEFINE_WAIT(wait);
419 int ret;
421 dev_dbg(dev, "%s()\n", __func__);
423 genpd = dev_to_genpd(dev);
424 if (IS_ERR(genpd))
425 return -EINVAL;
427 ret = pm_genpd_poweron(genpd);
428 if (ret)
429 return ret;
431 mutex_lock(&genpd->lock);
432 genpd->status = GPD_STATE_BUSY;
433 genpd->resume_count++;
434 for (;;) {
435 prepare_to_wait(&genpd->status_wait_queue, &wait,
436 TASK_UNINTERRUPTIBLE);
438 * If current is the powering off task, we have been called
439 * reentrantly from one of the device callbacks, so we should
440 * not wait.
442 if (!genpd->poweroff_task || genpd->poweroff_task == current)
443 break;
444 mutex_unlock(&genpd->lock);
446 schedule();
448 mutex_lock(&genpd->lock);
450 finish_wait(&genpd->status_wait_queue, &wait);
451 __pm_genpd_runtime_resume(dev, genpd);
452 genpd->resume_count--;
453 genpd_set_active(genpd);
454 wake_up_all(&genpd->status_wait_queue);
455 mutex_unlock(&genpd->lock);
457 if (genpd->start_device)
458 genpd->start_device(dev);
460 return 0;
464 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
466 void pm_genpd_poweroff_unused(void)
468 struct generic_pm_domain *genpd;
470 mutex_lock(&gpd_list_lock);
472 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
473 genpd_queue_power_off_work(genpd);
475 mutex_unlock(&gpd_list_lock);
478 #else
480 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
481 static inline void __pm_genpd_runtime_resume(struct device *dev,
482 struct generic_pm_domain *genpd) {}
484 #define pm_genpd_runtime_suspend NULL
485 #define pm_genpd_runtime_resume NULL
487 #endif /* CONFIG_PM_RUNTIME */
489 #ifdef CONFIG_PM_SLEEP
492 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
493 * @genpd: PM domain to power off, if possible.
495 * Check if the given PM domain can be powered off (during system suspend or
496 * hibernation) and do that if so. Also, in that case propagate to its parent.
498 * This function is only called in "noirq" stages of system power transitions,
499 * so it need not acquire locks (all of the "noirq" callbacks are executed
500 * sequentially, so it is guaranteed that it will never run twice in parallel).
502 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
504 struct generic_pm_domain *parent = genpd->parent;
506 if (genpd->status == GPD_STATE_POWER_OFF)
507 return;
509 if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
510 return;
512 if (genpd->power_off)
513 genpd->power_off(genpd);
515 genpd->status = GPD_STATE_POWER_OFF;
516 if (parent) {
517 genpd_sd_counter_dec(parent);
518 pm_genpd_sync_poweroff(parent);
523 * resume_needed - Check whether to resume a device before system suspend.
524 * @dev: Device to check.
525 * @genpd: PM domain the device belongs to.
527 * There are two cases in which a device that can wake up the system from sleep
528 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
529 * to wake up the system and it has to remain active for this purpose while the
530 * system is in the sleep state and (2) if the device is not enabled to wake up
531 * the system from sleep states and it generally doesn't generate wakeup signals
532 * by itself (those signals are generated on its behalf by other parts of the
533 * system). In the latter case it may be necessary to reconfigure the device's
534 * wakeup settings during system suspend, because it may have been set up to
535 * signal remote wakeup from the system's working state as needed by runtime PM.
536 * Return 'true' in either of the above cases.
538 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
540 bool active_wakeup;
542 if (!device_can_wakeup(dev))
543 return false;
545 active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
546 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
550 * pm_genpd_prepare - Start power transition of a device in a PM domain.
551 * @dev: Device to start the transition of.
553 * Start a power transition of a device (during a system-wide power transition)
554 * under the assumption that its pm_domain field points to the domain member of
555 * an object of type struct generic_pm_domain representing a PM domain
556 * consisting of I/O devices.
558 static int pm_genpd_prepare(struct device *dev)
560 struct generic_pm_domain *genpd;
561 int ret;
563 dev_dbg(dev, "%s()\n", __func__);
565 genpd = dev_to_genpd(dev);
566 if (IS_ERR(genpd))
567 return -EINVAL;
570 * If a wakeup request is pending for the device, it should be woken up
571 * at this point and a system wakeup event should be reported if it's
572 * set up to wake up the system from sleep states.
574 pm_runtime_get_noresume(dev);
575 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
576 pm_wakeup_event(dev, 0);
578 if (pm_wakeup_pending()) {
579 pm_runtime_put_sync(dev);
580 return -EBUSY;
583 if (resume_needed(dev, genpd))
584 pm_runtime_resume(dev);
586 genpd_acquire_lock(genpd);
588 if (genpd->prepared_count++ == 0)
589 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
591 genpd_release_lock(genpd);
593 if (genpd->suspend_power_off) {
594 pm_runtime_put_noidle(dev);
595 return 0;
599 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
600 * so pm_genpd_poweron() will return immediately, but if the device
601 * is suspended (e.g. it's been stopped by .stop_device()), we need
602 * to make it operational.
604 pm_runtime_resume(dev);
605 __pm_runtime_disable(dev, false);
607 ret = pm_generic_prepare(dev);
608 if (ret) {
609 mutex_lock(&genpd->lock);
611 if (--genpd->prepared_count == 0)
612 genpd->suspend_power_off = false;
614 mutex_unlock(&genpd->lock);
615 pm_runtime_enable(dev);
618 pm_runtime_put_sync(dev);
619 return ret;
623 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
624 * @dev: Device to suspend.
626 * Suspend a device under the assumption that its pm_domain field points to the
627 * domain member of an object of type struct generic_pm_domain representing
628 * a PM domain consisting of I/O devices.
630 static int pm_genpd_suspend(struct device *dev)
632 struct generic_pm_domain *genpd;
634 dev_dbg(dev, "%s()\n", __func__);
636 genpd = dev_to_genpd(dev);
637 if (IS_ERR(genpd))
638 return -EINVAL;
640 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
644 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
645 * @dev: Device to suspend.
647 * Carry out a late suspend of a device under the assumption that its
648 * pm_domain field points to the domain member of an object of type
649 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
651 static int pm_genpd_suspend_noirq(struct device *dev)
653 struct generic_pm_domain *genpd;
654 int ret;
656 dev_dbg(dev, "%s()\n", __func__);
658 genpd = dev_to_genpd(dev);
659 if (IS_ERR(genpd))
660 return -EINVAL;
662 if (genpd->suspend_power_off)
663 return 0;
665 ret = pm_generic_suspend_noirq(dev);
666 if (ret)
667 return ret;
669 if (device_may_wakeup(dev)
670 && genpd->active_wakeup && genpd->active_wakeup(dev))
671 return 0;
673 if (genpd->stop_device)
674 genpd->stop_device(dev);
677 * Since all of the "noirq" callbacks are executed sequentially, it is
678 * guaranteed that this function will never run twice in parallel for
679 * the same PM domain, so it is not necessary to use locking here.
681 genpd->suspended_count++;
682 pm_genpd_sync_poweroff(genpd);
684 return 0;
688 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
689 * @dev: Device to resume.
691 * Carry out an early resume of a device under the assumption that its
692 * pm_domain field points to the domain member of an object of type
693 * struct generic_pm_domain representing a power domain consisting of I/O
694 * devices.
696 static int pm_genpd_resume_noirq(struct device *dev)
698 struct generic_pm_domain *genpd;
700 dev_dbg(dev, "%s()\n", __func__);
702 genpd = dev_to_genpd(dev);
703 if (IS_ERR(genpd))
704 return -EINVAL;
706 if (genpd->suspend_power_off)
707 return 0;
710 * Since all of the "noirq" callbacks are executed sequentially, it is
711 * guaranteed that this function will never run twice in parallel for
712 * the same PM domain, so it is not necessary to use locking here.
714 pm_genpd_poweron(genpd);
715 genpd->suspended_count--;
716 if (genpd->start_device)
717 genpd->start_device(dev);
719 return pm_generic_resume_noirq(dev);
723 * pm_genpd_resume - Resume a device belonging to an I/O power domain.
724 * @dev: Device to resume.
726 * Resume a device under the assumption that its pm_domain field points to the
727 * domain member of an object of type struct generic_pm_domain representing
728 * a power domain consisting of I/O devices.
730 static int pm_genpd_resume(struct device *dev)
732 struct generic_pm_domain *genpd;
734 dev_dbg(dev, "%s()\n", __func__);
736 genpd = dev_to_genpd(dev);
737 if (IS_ERR(genpd))
738 return -EINVAL;
740 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
744 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
745 * @dev: Device to freeze.
747 * Freeze a device under the assumption that its pm_domain field points to the
748 * domain member of an object of type struct generic_pm_domain representing
749 * a power domain consisting of I/O devices.
751 static int pm_genpd_freeze(struct device *dev)
753 struct generic_pm_domain *genpd;
755 dev_dbg(dev, "%s()\n", __func__);
757 genpd = dev_to_genpd(dev);
758 if (IS_ERR(genpd))
759 return -EINVAL;
761 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
765 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
766 * @dev: Device to freeze.
768 * Carry out a late freeze of a device under the assumption that its
769 * pm_domain field points to the domain member of an object of type
770 * struct generic_pm_domain representing a power domain consisting of I/O
771 * devices.
773 static int pm_genpd_freeze_noirq(struct device *dev)
775 struct generic_pm_domain *genpd;
776 int ret;
778 dev_dbg(dev, "%s()\n", __func__);
780 genpd = dev_to_genpd(dev);
781 if (IS_ERR(genpd))
782 return -EINVAL;
784 if (genpd->suspend_power_off)
785 return 0;
787 ret = pm_generic_freeze_noirq(dev);
788 if (ret)
789 return ret;
791 if (genpd->stop_device)
792 genpd->stop_device(dev);
794 return 0;
798 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
799 * @dev: Device to thaw.
801 * Carry out an early thaw of a device under the assumption that its
802 * pm_domain field points to the domain member of an object of type
803 * struct generic_pm_domain representing a power domain consisting of I/O
804 * devices.
806 static int pm_genpd_thaw_noirq(struct device *dev)
808 struct generic_pm_domain *genpd;
810 dev_dbg(dev, "%s()\n", __func__);
812 genpd = dev_to_genpd(dev);
813 if (IS_ERR(genpd))
814 return -EINVAL;
816 if (genpd->suspend_power_off)
817 return 0;
819 if (genpd->start_device)
820 genpd->start_device(dev);
822 return pm_generic_thaw_noirq(dev);
826 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
827 * @dev: Device to thaw.
829 * Thaw a device under the assumption that its pm_domain field points to the
830 * domain member of an object of type struct generic_pm_domain representing
831 * a power domain consisting of I/O devices.
833 static int pm_genpd_thaw(struct device *dev)
835 struct generic_pm_domain *genpd;
837 dev_dbg(dev, "%s()\n", __func__);
839 genpd = dev_to_genpd(dev);
840 if (IS_ERR(genpd))
841 return -EINVAL;
843 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
847 * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
848 * @dev: Device to suspend.
850 * Power off a device under the assumption that its pm_domain field points to
851 * the domain member of an object of type struct generic_pm_domain representing
852 * a PM domain consisting of I/O devices.
854 static int pm_genpd_dev_poweroff(struct device *dev)
856 struct generic_pm_domain *genpd;
858 dev_dbg(dev, "%s()\n", __func__);
860 genpd = dev_to_genpd(dev);
861 if (IS_ERR(genpd))
862 return -EINVAL;
864 return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
868 * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
869 * @dev: Device to suspend.
871 * Carry out a late powering off of a device under the assumption that its
872 * pm_domain field points to the domain member of an object of type
873 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
875 static int pm_genpd_dev_poweroff_noirq(struct device *dev)
877 struct generic_pm_domain *genpd;
878 int ret;
880 dev_dbg(dev, "%s()\n", __func__);
882 genpd = dev_to_genpd(dev);
883 if (IS_ERR(genpd))
884 return -EINVAL;
886 if (genpd->suspend_power_off)
887 return 0;
889 ret = pm_generic_poweroff_noirq(dev);
890 if (ret)
891 return ret;
893 if (device_may_wakeup(dev)
894 && genpd->active_wakeup && genpd->active_wakeup(dev))
895 return 0;
897 if (genpd->stop_device)
898 genpd->stop_device(dev);
901 * Since all of the "noirq" callbacks are executed sequentially, it is
902 * guaranteed that this function will never run twice in parallel for
903 * the same PM domain, so it is not necessary to use locking here.
905 genpd->suspended_count++;
906 pm_genpd_sync_poweroff(genpd);
908 return 0;
912 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
913 * @dev: Device to resume.
915 * Carry out an early restore of a device under the assumption that its
916 * pm_domain field points to the domain member of an object of type
917 * struct generic_pm_domain representing a power domain consisting of I/O
918 * devices.
920 static int pm_genpd_restore_noirq(struct device *dev)
922 struct generic_pm_domain *genpd;
924 dev_dbg(dev, "%s()\n", __func__);
926 genpd = dev_to_genpd(dev);
927 if (IS_ERR(genpd))
928 return -EINVAL;
931 * Since all of the "noirq" callbacks are executed sequentially, it is
932 * guaranteed that this function will never run twice in parallel for
933 * the same PM domain, so it is not necessary to use locking here.
935 genpd->status = GPD_STATE_POWER_OFF;
936 if (genpd->suspend_power_off) {
938 * The boot kernel might put the domain into the power on state,
939 * so make sure it really is powered off.
941 if (genpd->power_off)
942 genpd->power_off(genpd);
943 return 0;
946 pm_genpd_poweron(genpd);
947 genpd->suspended_count--;
948 if (genpd->start_device)
949 genpd->start_device(dev);
951 return pm_generic_restore_noirq(dev);
955 * pm_genpd_restore - Restore a device belonging to an I/O power domain.
956 * @dev: Device to resume.
958 * Restore a device under the assumption that its pm_domain field points to the
959 * domain member of an object of type struct generic_pm_domain representing
960 * a power domain consisting of I/O devices.
962 static int pm_genpd_restore(struct device *dev)
964 struct generic_pm_domain *genpd;
966 dev_dbg(dev, "%s()\n", __func__);
968 genpd = dev_to_genpd(dev);
969 if (IS_ERR(genpd))
970 return -EINVAL;
972 return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
976 * pm_genpd_complete - Complete power transition of a device in a power domain.
977 * @dev: Device to complete the transition of.
979 * Complete a power transition of a device (during a system-wide power
980 * transition) under the assumption that its pm_domain field points to the
981 * domain member of an object of type struct generic_pm_domain representing
982 * a power domain consisting of I/O devices.
984 static void pm_genpd_complete(struct device *dev)
986 struct generic_pm_domain *genpd;
987 bool run_complete;
989 dev_dbg(dev, "%s()\n", __func__);
991 genpd = dev_to_genpd(dev);
992 if (IS_ERR(genpd))
993 return;
995 mutex_lock(&genpd->lock);
997 run_complete = !genpd->suspend_power_off;
998 if (--genpd->prepared_count == 0)
999 genpd->suspend_power_off = false;
1001 mutex_unlock(&genpd->lock);
1003 if (run_complete) {
1004 pm_generic_complete(dev);
1005 pm_runtime_set_active(dev);
1006 pm_runtime_enable(dev);
1007 pm_runtime_idle(dev);
1011 #else
1013 #define pm_genpd_prepare NULL
1014 #define pm_genpd_suspend NULL
1015 #define pm_genpd_suspend_noirq NULL
1016 #define pm_genpd_resume_noirq NULL
1017 #define pm_genpd_resume NULL
1018 #define pm_genpd_freeze NULL
1019 #define pm_genpd_freeze_noirq NULL
1020 #define pm_genpd_thaw_noirq NULL
1021 #define pm_genpd_thaw NULL
1022 #define pm_genpd_dev_poweroff_noirq NULL
1023 #define pm_genpd_dev_poweroff NULL
1024 #define pm_genpd_restore_noirq NULL
1025 #define pm_genpd_restore NULL
1026 #define pm_genpd_complete NULL
1028 #endif /* CONFIG_PM_SLEEP */
1031 * pm_genpd_add_device - Add a device to an I/O PM domain.
1032 * @genpd: PM domain to add the device to.
1033 * @dev: Device to be added.
1035 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1037 struct dev_list_entry *dle;
1038 int ret = 0;
1040 dev_dbg(dev, "%s()\n", __func__);
1042 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1043 return -EINVAL;
1045 genpd_acquire_lock(genpd);
1047 if (genpd->status == GPD_STATE_POWER_OFF) {
1048 ret = -EINVAL;
1049 goto out;
1052 if (genpd->prepared_count > 0) {
1053 ret = -EAGAIN;
1054 goto out;
1057 list_for_each_entry(dle, &genpd->dev_list, node)
1058 if (dle->dev == dev) {
1059 ret = -EINVAL;
1060 goto out;
1063 dle = kzalloc(sizeof(*dle), GFP_KERNEL);
1064 if (!dle) {
1065 ret = -ENOMEM;
1066 goto out;
1069 dle->dev = dev;
1070 dle->need_restore = false;
1071 list_add_tail(&dle->node, &genpd->dev_list);
1072 genpd->device_count++;
1074 spin_lock_irq(&dev->power.lock);
1075 dev->pm_domain = &genpd->domain;
1076 spin_unlock_irq(&dev->power.lock);
1078 out:
1079 genpd_release_lock(genpd);
1081 return ret;
1085 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1086 * @genpd: PM domain to remove the device from.
1087 * @dev: Device to be removed.
1089 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1090 struct device *dev)
1092 struct dev_list_entry *dle;
1093 int ret = -EINVAL;
1095 dev_dbg(dev, "%s()\n", __func__);
1097 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1098 return -EINVAL;
1100 genpd_acquire_lock(genpd);
1102 if (genpd->prepared_count > 0) {
1103 ret = -EAGAIN;
1104 goto out;
1107 list_for_each_entry(dle, &genpd->dev_list, node) {
1108 if (dle->dev != dev)
1109 continue;
1111 spin_lock_irq(&dev->power.lock);
1112 dev->pm_domain = NULL;
1113 spin_unlock_irq(&dev->power.lock);
1115 genpd->device_count--;
1116 list_del(&dle->node);
1117 kfree(dle);
1119 ret = 0;
1120 break;
1123 out:
1124 genpd_release_lock(genpd);
1126 return ret;
1130 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1131 * @genpd: Master PM domain to add the subdomain to.
1132 * @new_subdomain: Subdomain to be added.
1134 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1135 struct generic_pm_domain *new_subdomain)
1137 struct generic_pm_domain *subdomain;
1138 int ret = 0;
1140 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
1141 return -EINVAL;
1143 start:
1144 genpd_acquire_lock(genpd);
1145 mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
1147 if (new_subdomain->status != GPD_STATE_POWER_OFF
1148 && new_subdomain->status != GPD_STATE_ACTIVE) {
1149 mutex_unlock(&new_subdomain->lock);
1150 genpd_release_lock(genpd);
1151 goto start;
1154 if (genpd->status == GPD_STATE_POWER_OFF
1155 && new_subdomain->status != GPD_STATE_POWER_OFF) {
1156 ret = -EINVAL;
1157 goto out;
1160 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
1161 if (subdomain == new_subdomain) {
1162 ret = -EINVAL;
1163 goto out;
1167 list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
1168 new_subdomain->parent = genpd;
1169 if (subdomain->status != GPD_STATE_POWER_OFF)
1170 genpd->sd_count++;
1172 out:
1173 mutex_unlock(&new_subdomain->lock);
1174 genpd_release_lock(genpd);
1176 return ret;
1180 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1181 * @genpd: Master PM domain to remove the subdomain from.
1182 * @target: Subdomain to be removed.
1184 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1185 struct generic_pm_domain *target)
1187 struct generic_pm_domain *subdomain;
1188 int ret = -EINVAL;
1190 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
1191 return -EINVAL;
1193 start:
1194 genpd_acquire_lock(genpd);
1196 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
1197 if (subdomain != target)
1198 continue;
1200 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1202 if (subdomain->status != GPD_STATE_POWER_OFF
1203 && subdomain->status != GPD_STATE_ACTIVE) {
1204 mutex_unlock(&subdomain->lock);
1205 genpd_release_lock(genpd);
1206 goto start;
1209 list_del(&subdomain->sd_node);
1210 subdomain->parent = NULL;
1211 if (subdomain->status != GPD_STATE_POWER_OFF)
1212 genpd_sd_counter_dec(genpd);
1214 mutex_unlock(&subdomain->lock);
1216 ret = 0;
1217 break;
1220 genpd_release_lock(genpd);
1222 return ret;
1226 * pm_genpd_init - Initialize a generic I/O PM domain object.
1227 * @genpd: PM domain object to initialize.
1228 * @gov: PM domain governor to associate with the domain (may be NULL).
1229 * @is_off: Initial value of the domain's power_is_off field.
1231 void pm_genpd_init(struct generic_pm_domain *genpd,
1232 struct dev_power_governor *gov, bool is_off)
1234 if (IS_ERR_OR_NULL(genpd))
1235 return;
1237 INIT_LIST_HEAD(&genpd->sd_node);
1238 genpd->parent = NULL;
1239 INIT_LIST_HEAD(&genpd->dev_list);
1240 INIT_LIST_HEAD(&genpd->sd_list);
1241 mutex_init(&genpd->lock);
1242 genpd->gov = gov;
1243 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1244 genpd->in_progress = 0;
1245 genpd->sd_count = 0;
1246 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1247 init_waitqueue_head(&genpd->status_wait_queue);
1248 genpd->poweroff_task = NULL;
1249 genpd->resume_count = 0;
1250 genpd->device_count = 0;
1251 genpd->suspended_count = 0;
1252 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1253 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1254 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1255 genpd->domain.ops.prepare = pm_genpd_prepare;
1256 genpd->domain.ops.suspend = pm_genpd_suspend;
1257 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1258 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1259 genpd->domain.ops.resume = pm_genpd_resume;
1260 genpd->domain.ops.freeze = pm_genpd_freeze;
1261 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1262 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1263 genpd->domain.ops.thaw = pm_genpd_thaw;
1264 genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
1265 genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
1266 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1267 genpd->domain.ops.restore = pm_genpd_restore;
1268 genpd->domain.ops.complete = pm_genpd_complete;
1269 mutex_lock(&gpd_list_lock);
1270 list_add(&genpd->gpd_list_node, &gpd_list);
1271 mutex_unlock(&gpd_list_lock);