Add linux-next specific files for 20110831
[linux-2.6/next.git] / drivers / base / power / domain.c
blobc2468a7e5b219e226100a4a1aea75553a23794e3
1 /*
2 * drivers/base/power/domain.c - Common code related to device power domains.
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 * This file is released under the GPLv2.
7 */
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sched.h>
17 #include <linux/suspend.h>
19 static LIST_HEAD(gpd_list);
20 static DEFINE_MUTEX(gpd_list_lock);
22 #ifdef CONFIG_PM
24 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
26 if (IS_ERR_OR_NULL(dev->pm_domain))
27 return ERR_PTR(-EINVAL);
29 return pd_to_genpd(dev->pm_domain);
32 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
34 bool ret = false;
36 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
37 ret = !!atomic_dec_and_test(&genpd->sd_count);
39 return ret;
42 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
44 atomic_inc(&genpd->sd_count);
45 smp_mb__after_atomic_inc();
48 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
50 DEFINE_WAIT(wait);
52 mutex_lock(&genpd->lock);
54 * Wait for the domain to transition into either the active,
55 * or the power off state.
57 for (;;) {
58 prepare_to_wait(&genpd->status_wait_queue, &wait,
59 TASK_UNINTERRUPTIBLE);
60 if (genpd->status == GPD_STATE_ACTIVE
61 || genpd->status == GPD_STATE_POWER_OFF)
62 break;
63 mutex_unlock(&genpd->lock);
65 schedule();
67 mutex_lock(&genpd->lock);
69 finish_wait(&genpd->status_wait_queue, &wait);
72 static void genpd_release_lock(struct generic_pm_domain *genpd)
74 mutex_unlock(&genpd->lock);
77 static void genpd_set_active(struct generic_pm_domain *genpd)
79 if (genpd->resume_count == 0)
80 genpd->status = GPD_STATE_ACTIVE;
83 /**
84 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
85 * @genpd: PM domain to power up.
87 * Restore power to @genpd and all of its masters so that it is possible to
88 * resume a device belonging to it.
90 int __pm_genpd_poweron(struct generic_pm_domain *genpd)
91 __releases(&genpd->lock) __acquires(&genpd->lock)
93 struct gpd_link *link;
94 DEFINE_WAIT(wait);
95 int ret = 0;
97 /* If the domain's master is being waited for, we have to wait too. */
98 for (;;) {
99 prepare_to_wait(&genpd->status_wait_queue, &wait,
100 TASK_UNINTERRUPTIBLE);
101 if (genpd->status != GPD_STATE_WAIT_MASTER)
102 break;
103 mutex_unlock(&genpd->lock);
105 schedule();
107 mutex_lock(&genpd->lock);
109 finish_wait(&genpd->status_wait_queue, &wait);
111 if (genpd->status == GPD_STATE_ACTIVE
112 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
113 return 0;
115 if (genpd->status != GPD_STATE_POWER_OFF) {
116 genpd_set_active(genpd);
117 return 0;
121 * The list is guaranteed not to change while the loop below is being
122 * executed, unless one of the masters' .power_on() callbacks fiddles
123 * with it.
125 list_for_each_entry(link, &genpd->slave_links, slave_node) {
126 genpd_sd_counter_inc(link->master);
127 genpd->status = GPD_STATE_WAIT_MASTER;
129 mutex_unlock(&genpd->lock);
131 ret = pm_genpd_poweron(link->master);
133 mutex_lock(&genpd->lock);
136 * The "wait for parent" status is guaranteed not to change
137 * while the master is powering on.
139 genpd->status = GPD_STATE_POWER_OFF;
140 wake_up_all(&genpd->status_wait_queue);
141 if (ret) {
142 genpd_sd_counter_dec(link->master);
143 goto err;
147 if (genpd->power_on) {
148 ret = genpd->power_on(genpd);
149 if (ret)
150 goto err;
153 genpd_set_active(genpd);
155 return 0;
157 err:
158 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
159 genpd_sd_counter_dec(link->master);
161 return ret;
165 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
166 * @genpd: PM domain to power up.
168 int pm_genpd_poweron(struct generic_pm_domain *genpd)
170 int ret;
172 mutex_lock(&genpd->lock);
173 ret = __pm_genpd_poweron(genpd);
174 mutex_unlock(&genpd->lock);
175 return ret;
178 #endif /* CONFIG_PM */
180 #ifdef CONFIG_PM_RUNTIME
183 * __pm_genpd_save_device - Save the pre-suspend state of a device.
184 * @pdd: Domain data of the device to save the state of.
185 * @genpd: PM domain the device belongs to.
187 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
188 struct generic_pm_domain *genpd)
189 __releases(&genpd->lock) __acquires(&genpd->lock)
191 struct device *dev = pdd->dev;
192 struct device_driver *drv = dev->driver;
193 int ret = 0;
195 if (pdd->need_restore)
196 return 0;
198 mutex_unlock(&genpd->lock);
200 if (drv && drv->pm && drv->pm->runtime_suspend) {
201 if (genpd->start_device)
202 genpd->start_device(dev);
204 ret = drv->pm->runtime_suspend(dev);
206 if (genpd->stop_device)
207 genpd->stop_device(dev);
210 mutex_lock(&genpd->lock);
212 if (!ret)
213 pdd->need_restore = true;
215 return ret;
219 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
220 * @pdd: Domain data of the device to restore the state of.
221 * @genpd: PM domain the device belongs to.
223 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
224 struct generic_pm_domain *genpd)
225 __releases(&genpd->lock) __acquires(&genpd->lock)
227 struct device *dev = pdd->dev;
228 struct device_driver *drv = dev->driver;
230 if (!pdd->need_restore)
231 return;
233 mutex_unlock(&genpd->lock);
235 if (drv && drv->pm && drv->pm->runtime_resume) {
236 if (genpd->start_device)
237 genpd->start_device(dev);
239 drv->pm->runtime_resume(dev);
241 if (genpd->stop_device)
242 genpd->stop_device(dev);
245 mutex_lock(&genpd->lock);
247 pdd->need_restore = false;
251 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
252 * @genpd: PM domain to check.
254 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
255 * a "power off" operation, which means that a "power on" has occured in the
256 * meantime, or if its resume_count field is different from zero, which means
257 * that one of its devices has been resumed in the meantime.
259 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
261 return genpd->status == GPD_STATE_WAIT_MASTER
262 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
266 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
267 * @genpd: PM domait to power off.
269 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
270 * before.
272 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
274 if (!work_pending(&genpd->power_off_work))
275 queue_work(pm_wq, &genpd->power_off_work);
279 * pm_genpd_poweroff - Remove power from a given PM domain.
280 * @genpd: PM domain to power down.
282 * If all of the @genpd's devices have been suspended and all of its subdomains
283 * have been powered down, run the runtime suspend callbacks provided by all of
284 * the @genpd's devices' drivers and remove power from @genpd.
286 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
287 __releases(&genpd->lock) __acquires(&genpd->lock)
289 struct pm_domain_data *pdd;
290 struct gpd_link *link;
291 unsigned int not_suspended;
292 int ret = 0;
294 start:
296 * Do not try to power off the domain in the following situations:
297 * (1) The domain is already in the "power off" state.
298 * (2) The domain is waiting for its master to power up.
299 * (3) One of the domain's devices is being resumed right now.
300 * (4) System suspend is in progress.
302 if (genpd->status == GPD_STATE_POWER_OFF
303 || genpd->status == GPD_STATE_WAIT_MASTER
304 || genpd->resume_count > 0 || genpd->prepared_count > 0)
305 return 0;
307 if (atomic_read(&genpd->sd_count) > 0)
308 return -EBUSY;
310 not_suspended = 0;
311 list_for_each_entry(pdd, &genpd->dev_list, list_node)
312 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
313 || pdd->dev->power.irq_safe))
314 not_suspended++;
316 if (not_suspended > genpd->in_progress)
317 return -EBUSY;
319 if (genpd->poweroff_task) {
321 * Another instance of pm_genpd_poweroff() is executing
322 * callbacks, so tell it to start over and return.
324 genpd->status = GPD_STATE_REPEAT;
325 return 0;
328 if (genpd->gov && genpd->gov->power_down_ok) {
329 if (!genpd->gov->power_down_ok(&genpd->domain))
330 return -EAGAIN;
333 genpd->status = GPD_STATE_BUSY;
334 genpd->poweroff_task = current;
336 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
337 ret = atomic_read(&genpd->sd_count) == 0 ?
338 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
340 if (genpd_abort_poweroff(genpd))
341 goto out;
343 if (ret) {
344 genpd_set_active(genpd);
345 goto out;
348 if (genpd->status == GPD_STATE_REPEAT) {
349 genpd->poweroff_task = NULL;
350 goto start;
354 if (genpd->power_off) {
355 if (atomic_read(&genpd->sd_count) > 0) {
356 ret = -EBUSY;
357 goto out;
361 * If sd_count > 0 at this point, one of the subdomains hasn't
362 * managed to call pm_genpd_poweron() for the master yet after
363 * incrementing it. In that case pm_genpd_poweron() will wait
364 * for us to drop the lock, so we can call .power_off() and let
365 * the pm_genpd_poweron() restore power for us (this shouldn't
366 * happen very often).
368 ret = genpd->power_off(genpd);
369 if (ret == -EBUSY) {
370 genpd_set_active(genpd);
371 goto out;
375 genpd->status = GPD_STATE_POWER_OFF;
377 list_for_each_entry(link, &genpd->slave_links, slave_node) {
378 genpd_sd_counter_dec(link->master);
379 genpd_queue_power_off_work(link->master);
382 out:
383 genpd->poweroff_task = NULL;
384 wake_up_all(&genpd->status_wait_queue);
385 return ret;
389 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
390 * @work: Work structure used for scheduling the execution of this function.
392 static void genpd_power_off_work_fn(struct work_struct *work)
394 struct generic_pm_domain *genpd;
396 genpd = container_of(work, struct generic_pm_domain, power_off_work);
398 genpd_acquire_lock(genpd);
399 pm_genpd_poweroff(genpd);
400 genpd_release_lock(genpd);
404 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
405 * @dev: Device to suspend.
407 * Carry out a runtime suspend of a device under the assumption that its
408 * pm_domain field points to the domain member of an object of type
409 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
411 static int pm_genpd_runtime_suspend(struct device *dev)
413 struct generic_pm_domain *genpd;
415 dev_dbg(dev, "%s()\n", __func__);
417 genpd = dev_to_genpd(dev);
418 if (IS_ERR(genpd))
419 return -EINVAL;
421 might_sleep_if(!genpd->dev_irq_safe);
423 if (genpd->stop_device) {
424 int ret = genpd->stop_device(dev);
425 if (ret)
426 return ret;
430 * If power.irq_safe is set, this routine will be run with interrupts
431 * off, so it can't use mutexes.
433 if (dev->power.irq_safe)
434 return 0;
436 mutex_lock(&genpd->lock);
437 genpd->in_progress++;
438 pm_genpd_poweroff(genpd);
439 genpd->in_progress--;
440 mutex_unlock(&genpd->lock);
442 return 0;
446 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
447 * @dev: Device to resume.
449 * Carry out a runtime resume of a device under the assumption that its
450 * pm_domain field points to the domain member of an object of type
451 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
453 static int pm_genpd_runtime_resume(struct device *dev)
455 struct generic_pm_domain *genpd;
456 DEFINE_WAIT(wait);
457 int ret;
459 dev_dbg(dev, "%s()\n", __func__);
461 genpd = dev_to_genpd(dev);
462 if (IS_ERR(genpd))
463 return -EINVAL;
465 might_sleep_if(!genpd->dev_irq_safe);
467 /* If power.irq_safe, the PM domain is never powered off. */
468 if (dev->power.irq_safe)
469 goto out;
471 mutex_lock(&genpd->lock);
472 ret = __pm_genpd_poweron(genpd);
473 if (ret) {
474 mutex_unlock(&genpd->lock);
475 return ret;
477 genpd->status = GPD_STATE_BUSY;
478 genpd->resume_count++;
479 for (;;) {
480 prepare_to_wait(&genpd->status_wait_queue, &wait,
481 TASK_UNINTERRUPTIBLE);
483 * If current is the powering off task, we have been called
484 * reentrantly from one of the device callbacks, so we should
485 * not wait.
487 if (!genpd->poweroff_task || genpd->poweroff_task == current)
488 break;
489 mutex_unlock(&genpd->lock);
491 schedule();
493 mutex_lock(&genpd->lock);
495 finish_wait(&genpd->status_wait_queue, &wait);
496 __pm_genpd_restore_device(&dev->power.subsys_data->domain_data, genpd);
497 genpd->resume_count--;
498 genpd_set_active(genpd);
499 wake_up_all(&genpd->status_wait_queue);
500 mutex_unlock(&genpd->lock);
502 out:
503 if (genpd->start_device)
504 genpd->start_device(dev);
506 return 0;
510 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
512 void pm_genpd_poweroff_unused(void)
514 struct generic_pm_domain *genpd;
516 mutex_lock(&gpd_list_lock);
518 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
519 genpd_queue_power_off_work(genpd);
521 mutex_unlock(&gpd_list_lock);
524 #else
526 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
528 #define pm_genpd_runtime_suspend NULL
529 #define pm_genpd_runtime_resume NULL
531 #endif /* CONFIG_PM_RUNTIME */
533 #ifdef CONFIG_PM_SLEEP
536 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
537 * @genpd: PM domain to power off, if possible.
539 * Check if the given PM domain can be powered off (during system suspend or
540 * hibernation) and do that if so. Also, in that case propagate to its masters.
542 * This function is only called in "noirq" stages of system power transitions,
543 * so it need not acquire locks (all of the "noirq" callbacks are executed
544 * sequentially, so it is guaranteed that it will never run twice in parallel).
546 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
548 struct gpd_link *link;
550 if (genpd->status == GPD_STATE_POWER_OFF)
551 return;
553 if (genpd->suspended_count != genpd->device_count
554 || atomic_read(&genpd->sd_count) > 0)
555 return;
557 if (genpd->power_off)
558 genpd->power_off(genpd);
560 genpd->status = GPD_STATE_POWER_OFF;
562 list_for_each_entry(link, &genpd->slave_links, slave_node) {
563 genpd_sd_counter_dec(link->master);
564 pm_genpd_sync_poweroff(link->master);
569 * resume_needed - Check whether to resume a device before system suspend.
570 * @dev: Device to check.
571 * @genpd: PM domain the device belongs to.
573 * There are two cases in which a device that can wake up the system from sleep
574 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
575 * to wake up the system and it has to remain active for this purpose while the
576 * system is in the sleep state and (2) if the device is not enabled to wake up
577 * the system from sleep states and it generally doesn't generate wakeup signals
578 * by itself (those signals are generated on its behalf by other parts of the
579 * system). In the latter case it may be necessary to reconfigure the device's
580 * wakeup settings during system suspend, because it may have been set up to
581 * signal remote wakeup from the system's working state as needed by runtime PM.
582 * Return 'true' in either of the above cases.
584 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
586 bool active_wakeup;
588 if (!device_can_wakeup(dev))
589 return false;
591 active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
592 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
596 * pm_genpd_prepare - Start power transition of a device in a PM domain.
597 * @dev: Device to start the transition of.
599 * Start a power transition of a device (during a system-wide power transition)
600 * under the assumption that its pm_domain field points to the domain member of
601 * an object of type struct generic_pm_domain representing a PM domain
602 * consisting of I/O devices.
604 static int pm_genpd_prepare(struct device *dev)
606 struct generic_pm_domain *genpd;
607 int ret;
609 dev_dbg(dev, "%s()\n", __func__);
611 genpd = dev_to_genpd(dev);
612 if (IS_ERR(genpd))
613 return -EINVAL;
616 * If a wakeup request is pending for the device, it should be woken up
617 * at this point and a system wakeup event should be reported if it's
618 * set up to wake up the system from sleep states.
620 pm_runtime_get_noresume(dev);
621 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
622 pm_wakeup_event(dev, 0);
624 if (pm_wakeup_pending()) {
625 pm_runtime_put_sync(dev);
626 return -EBUSY;
629 if (resume_needed(dev, genpd))
630 pm_runtime_resume(dev);
632 genpd_acquire_lock(genpd);
634 if (genpd->prepared_count++ == 0)
635 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
637 genpd_release_lock(genpd);
639 if (genpd->suspend_power_off) {
640 pm_runtime_put_noidle(dev);
641 return 0;
645 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
646 * so pm_genpd_poweron() will return immediately, but if the device
647 * is suspended (e.g. it's been stopped by .stop_device()), we need
648 * to make it operational.
650 pm_runtime_resume(dev);
651 __pm_runtime_disable(dev, false);
653 ret = pm_generic_prepare(dev);
654 if (ret) {
655 mutex_lock(&genpd->lock);
657 if (--genpd->prepared_count == 0)
658 genpd->suspend_power_off = false;
660 mutex_unlock(&genpd->lock);
661 pm_runtime_enable(dev);
664 pm_runtime_put_sync(dev);
665 return ret;
669 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
670 * @dev: Device to suspend.
672 * Suspend a device under the assumption that its pm_domain field points to the
673 * domain member of an object of type struct generic_pm_domain representing
674 * a PM domain consisting of I/O devices.
676 static int pm_genpd_suspend(struct device *dev)
678 struct generic_pm_domain *genpd;
680 dev_dbg(dev, "%s()\n", __func__);
682 genpd = dev_to_genpd(dev);
683 if (IS_ERR(genpd))
684 return -EINVAL;
686 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
690 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
691 * @dev: Device to suspend.
693 * Carry out a late suspend of a device under the assumption that its
694 * pm_domain field points to the domain member of an object of type
695 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
697 static int pm_genpd_suspend_noirq(struct device *dev)
699 struct generic_pm_domain *genpd;
700 int ret;
702 dev_dbg(dev, "%s()\n", __func__);
704 genpd = dev_to_genpd(dev);
705 if (IS_ERR(genpd))
706 return -EINVAL;
708 if (genpd->suspend_power_off)
709 return 0;
711 ret = pm_generic_suspend_noirq(dev);
712 if (ret)
713 return ret;
715 if (device_may_wakeup(dev)
716 && genpd->active_wakeup && genpd->active_wakeup(dev))
717 return 0;
719 if (genpd->stop_device)
720 genpd->stop_device(dev);
723 * Since all of the "noirq" callbacks are executed sequentially, it is
724 * guaranteed that this function will never run twice in parallel for
725 * the same PM domain, so it is not necessary to use locking here.
727 genpd->suspended_count++;
728 pm_genpd_sync_poweroff(genpd);
730 return 0;
734 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
735 * @dev: Device to resume.
737 * Carry out an early resume of a device under the assumption that its
738 * pm_domain field points to the domain member of an object of type
739 * struct generic_pm_domain representing a power domain consisting of I/O
740 * devices.
742 static int pm_genpd_resume_noirq(struct device *dev)
744 struct generic_pm_domain *genpd;
746 dev_dbg(dev, "%s()\n", __func__);
748 genpd = dev_to_genpd(dev);
749 if (IS_ERR(genpd))
750 return -EINVAL;
752 if (genpd->suspend_power_off)
753 return 0;
756 * Since all of the "noirq" callbacks are executed sequentially, it is
757 * guaranteed that this function will never run twice in parallel for
758 * the same PM domain, so it is not necessary to use locking here.
760 pm_genpd_poweron(genpd);
761 genpd->suspended_count--;
762 if (genpd->start_device)
763 genpd->start_device(dev);
765 return pm_generic_resume_noirq(dev);
769 * pm_genpd_resume - Resume a device belonging to an I/O power domain.
770 * @dev: Device to resume.
772 * Resume a device under the assumption that its pm_domain field points to the
773 * domain member of an object of type struct generic_pm_domain representing
774 * a power domain consisting of I/O devices.
776 static int pm_genpd_resume(struct device *dev)
778 struct generic_pm_domain *genpd;
780 dev_dbg(dev, "%s()\n", __func__);
782 genpd = dev_to_genpd(dev);
783 if (IS_ERR(genpd))
784 return -EINVAL;
786 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
790 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
791 * @dev: Device to freeze.
793 * Freeze a device under the assumption that its pm_domain field points to the
794 * domain member of an object of type struct generic_pm_domain representing
795 * a power domain consisting of I/O devices.
797 static int pm_genpd_freeze(struct device *dev)
799 struct generic_pm_domain *genpd;
801 dev_dbg(dev, "%s()\n", __func__);
803 genpd = dev_to_genpd(dev);
804 if (IS_ERR(genpd))
805 return -EINVAL;
807 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
811 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
812 * @dev: Device to freeze.
814 * Carry out a late freeze of a device under the assumption that its
815 * pm_domain field points to the domain member of an object of type
816 * struct generic_pm_domain representing a power domain consisting of I/O
817 * devices.
819 static int pm_genpd_freeze_noirq(struct device *dev)
821 struct generic_pm_domain *genpd;
822 int ret;
824 dev_dbg(dev, "%s()\n", __func__);
826 genpd = dev_to_genpd(dev);
827 if (IS_ERR(genpd))
828 return -EINVAL;
830 if (genpd->suspend_power_off)
831 return 0;
833 ret = pm_generic_freeze_noirq(dev);
834 if (ret)
835 return ret;
837 if (genpd->stop_device)
838 genpd->stop_device(dev);
840 return 0;
844 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
845 * @dev: Device to thaw.
847 * Carry out an early thaw of a device under the assumption that its
848 * pm_domain field points to the domain member of an object of type
849 * struct generic_pm_domain representing a power domain consisting of I/O
850 * devices.
852 static int pm_genpd_thaw_noirq(struct device *dev)
854 struct generic_pm_domain *genpd;
856 dev_dbg(dev, "%s()\n", __func__);
858 genpd = dev_to_genpd(dev);
859 if (IS_ERR(genpd))
860 return -EINVAL;
862 if (genpd->suspend_power_off)
863 return 0;
865 if (genpd->start_device)
866 genpd->start_device(dev);
868 return pm_generic_thaw_noirq(dev);
872 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
873 * @dev: Device to thaw.
875 * Thaw a device under the assumption that its pm_domain field points to the
876 * domain member of an object of type struct generic_pm_domain representing
877 * a power domain consisting of I/O devices.
879 static int pm_genpd_thaw(struct device *dev)
881 struct generic_pm_domain *genpd;
883 dev_dbg(dev, "%s()\n", __func__);
885 genpd = dev_to_genpd(dev);
886 if (IS_ERR(genpd))
887 return -EINVAL;
889 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
893 * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
894 * @dev: Device to suspend.
896 * Power off a device under the assumption that its pm_domain field points to
897 * the domain member of an object of type struct generic_pm_domain representing
898 * a PM domain consisting of I/O devices.
900 static int pm_genpd_dev_poweroff(struct device *dev)
902 struct generic_pm_domain *genpd;
904 dev_dbg(dev, "%s()\n", __func__);
906 genpd = dev_to_genpd(dev);
907 if (IS_ERR(genpd))
908 return -EINVAL;
910 return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
914 * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
915 * @dev: Device to suspend.
917 * Carry out a late powering off of a device under the assumption that its
918 * pm_domain field points to the domain member of an object of type
919 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
921 static int pm_genpd_dev_poweroff_noirq(struct device *dev)
923 struct generic_pm_domain *genpd;
924 int ret;
926 dev_dbg(dev, "%s()\n", __func__);
928 genpd = dev_to_genpd(dev);
929 if (IS_ERR(genpd))
930 return -EINVAL;
932 if (genpd->suspend_power_off)
933 return 0;
935 ret = pm_generic_poweroff_noirq(dev);
936 if (ret)
937 return ret;
939 if (device_may_wakeup(dev)
940 && genpd->active_wakeup && genpd->active_wakeup(dev))
941 return 0;
943 if (genpd->stop_device)
944 genpd->stop_device(dev);
947 * Since all of the "noirq" callbacks are executed sequentially, it is
948 * guaranteed that this function will never run twice in parallel for
949 * the same PM domain, so it is not necessary to use locking here.
951 genpd->suspended_count++;
952 pm_genpd_sync_poweroff(genpd);
954 return 0;
958 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
959 * @dev: Device to resume.
961 * Carry out an early restore of a device under the assumption that its
962 * pm_domain field points to the domain member of an object of type
963 * struct generic_pm_domain representing a power domain consisting of I/O
964 * devices.
966 static int pm_genpd_restore_noirq(struct device *dev)
968 struct generic_pm_domain *genpd;
970 dev_dbg(dev, "%s()\n", __func__);
972 genpd = dev_to_genpd(dev);
973 if (IS_ERR(genpd))
974 return -EINVAL;
977 * Since all of the "noirq" callbacks are executed sequentially, it is
978 * guaranteed that this function will never run twice in parallel for
979 * the same PM domain, so it is not necessary to use locking here.
981 genpd->status = GPD_STATE_POWER_OFF;
982 if (genpd->suspend_power_off) {
984 * The boot kernel might put the domain into the power on state,
985 * so make sure it really is powered off.
987 if (genpd->power_off)
988 genpd->power_off(genpd);
989 return 0;
992 pm_genpd_poweron(genpd);
993 genpd->suspended_count--;
994 if (genpd->start_device)
995 genpd->start_device(dev);
997 return pm_generic_restore_noirq(dev);
1001 * pm_genpd_restore - Restore a device belonging to an I/O power domain.
1002 * @dev: Device to resume.
1004 * Restore a device under the assumption that its pm_domain field points to the
1005 * domain member of an object of type struct generic_pm_domain representing
1006 * a power domain consisting of I/O devices.
1008 static int pm_genpd_restore(struct device *dev)
1010 struct generic_pm_domain *genpd;
1012 dev_dbg(dev, "%s()\n", __func__);
1014 genpd = dev_to_genpd(dev);
1015 if (IS_ERR(genpd))
1016 return -EINVAL;
1018 return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
1022 * pm_genpd_complete - Complete power transition of a device in a power domain.
1023 * @dev: Device to complete the transition of.
1025 * Complete a power transition of a device (during a system-wide power
1026 * transition) under the assumption that its pm_domain field points to the
1027 * domain member of an object of type struct generic_pm_domain representing
1028 * a power domain consisting of I/O devices.
1030 static void pm_genpd_complete(struct device *dev)
1032 struct generic_pm_domain *genpd;
1033 bool run_complete;
1035 dev_dbg(dev, "%s()\n", __func__);
1037 genpd = dev_to_genpd(dev);
1038 if (IS_ERR(genpd))
1039 return;
1041 mutex_lock(&genpd->lock);
1043 run_complete = !genpd->suspend_power_off;
1044 if (--genpd->prepared_count == 0)
1045 genpd->suspend_power_off = false;
1047 mutex_unlock(&genpd->lock);
1049 if (run_complete) {
1050 pm_generic_complete(dev);
1051 pm_runtime_set_active(dev);
1052 pm_runtime_enable(dev);
1053 pm_runtime_idle(dev);
1057 #else
1059 #define pm_genpd_prepare NULL
1060 #define pm_genpd_suspend NULL
1061 #define pm_genpd_suspend_noirq NULL
1062 #define pm_genpd_resume_noirq NULL
1063 #define pm_genpd_resume NULL
1064 #define pm_genpd_freeze NULL
1065 #define pm_genpd_freeze_noirq NULL
1066 #define pm_genpd_thaw_noirq NULL
1067 #define pm_genpd_thaw NULL
1068 #define pm_genpd_dev_poweroff_noirq NULL
1069 #define pm_genpd_dev_poweroff NULL
1070 #define pm_genpd_restore_noirq NULL
1071 #define pm_genpd_restore NULL
1072 #define pm_genpd_complete NULL
1074 #endif /* CONFIG_PM_SLEEP */
1077 * pm_genpd_add_device - Add a device to an I/O PM domain.
1078 * @genpd: PM domain to add the device to.
1079 * @dev: Device to be added.
1081 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1083 struct pm_domain_data *pdd;
1084 int ret = 0;
1086 dev_dbg(dev, "%s()\n", __func__);
1088 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1089 return -EINVAL;
1091 genpd_acquire_lock(genpd);
1093 if (genpd->status == GPD_STATE_POWER_OFF) {
1094 ret = -EINVAL;
1095 goto out;
1098 if (genpd->prepared_count > 0) {
1099 ret = -EAGAIN;
1100 goto out;
1103 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1104 if (pdd->dev == dev) {
1105 ret = -EINVAL;
1106 goto out;
1109 genpd->device_count++;
1111 dev->pm_domain = &genpd->domain;
1112 dev_pm_get_subsys_data(dev);
1113 pdd = &dev->power.subsys_data->domain_data;
1114 pdd->dev = dev;
1115 pdd->need_restore = false;
1116 list_add_tail(&pdd->list_node, &genpd->dev_list);
1118 out:
1119 genpd_release_lock(genpd);
1121 return ret;
1125 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1126 * @genpd: PM domain to remove the device from.
1127 * @dev: Device to be removed.
1129 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1130 struct device *dev)
1132 struct pm_domain_data *pdd;
1133 int ret = -EINVAL;
1135 dev_dbg(dev, "%s()\n", __func__);
1137 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1138 return -EINVAL;
1140 genpd_acquire_lock(genpd);
1142 if (genpd->prepared_count > 0) {
1143 ret = -EAGAIN;
1144 goto out;
1147 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
1148 if (pdd->dev != dev)
1149 continue;
1151 list_del_init(&pdd->list_node);
1152 pdd->dev = NULL;
1153 dev_pm_put_subsys_data(dev);
1154 dev->pm_domain = NULL;
1156 genpd->device_count--;
1158 ret = 0;
1159 break;
1162 out:
1163 genpd_release_lock(genpd);
1165 return ret;
1169 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1170 * @genpd: Master PM domain to add the subdomain to.
1171 * @subdomain: Subdomain to be added.
1173 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1174 struct generic_pm_domain *subdomain)
1176 struct gpd_link *link;
1177 int ret = 0;
1179 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1180 return -EINVAL;
1182 start:
1183 genpd_acquire_lock(genpd);
1184 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1186 if (subdomain->status != GPD_STATE_POWER_OFF
1187 && subdomain->status != GPD_STATE_ACTIVE) {
1188 mutex_unlock(&subdomain->lock);
1189 genpd_release_lock(genpd);
1190 goto start;
1193 if (genpd->status == GPD_STATE_POWER_OFF
1194 && subdomain->status != GPD_STATE_POWER_OFF) {
1195 ret = -EINVAL;
1196 goto out;
1199 list_for_each_entry(link, &genpd->slave_links, slave_node) {
1200 if (link->slave == subdomain && link->master == genpd) {
1201 ret = -EINVAL;
1202 goto out;
1206 link = kzalloc(sizeof(*link), GFP_KERNEL);
1207 if (!link) {
1208 ret = -ENOMEM;
1209 goto out;
1211 link->master = genpd;
1212 list_add_tail(&link->master_node, &genpd->master_links);
1213 link->slave = subdomain;
1214 list_add_tail(&link->slave_node, &subdomain->slave_links);
1215 if (subdomain->status != GPD_STATE_POWER_OFF)
1216 genpd_sd_counter_inc(genpd);
1218 out:
1219 mutex_unlock(&subdomain->lock);
1220 genpd_release_lock(genpd);
1222 return ret;
1226 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1227 * @genpd: Master PM domain to remove the subdomain from.
1228 * @subdomain: Subdomain to be removed.
1230 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1231 struct generic_pm_domain *subdomain)
1233 struct gpd_link *link;
1234 int ret = -EINVAL;
1236 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1237 return -EINVAL;
1239 start:
1240 genpd_acquire_lock(genpd);
1242 list_for_each_entry(link, &genpd->master_links, master_node) {
1243 if (link->slave != subdomain)
1244 continue;
1246 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1248 if (subdomain->status != GPD_STATE_POWER_OFF
1249 && subdomain->status != GPD_STATE_ACTIVE) {
1250 mutex_unlock(&subdomain->lock);
1251 genpd_release_lock(genpd);
1252 goto start;
1255 list_del(&link->master_node);
1256 list_del(&link->slave_node);
1257 kfree(link);
1258 if (subdomain->status != GPD_STATE_POWER_OFF)
1259 genpd_sd_counter_dec(genpd);
1261 mutex_unlock(&subdomain->lock);
1263 ret = 0;
1264 break;
1267 genpd_release_lock(genpd);
1269 return ret;
1273 * pm_genpd_init - Initialize a generic I/O PM domain object.
1274 * @genpd: PM domain object to initialize.
1275 * @gov: PM domain governor to associate with the domain (may be NULL).
1276 * @is_off: Initial value of the domain's power_is_off field.
1278 void pm_genpd_init(struct generic_pm_domain *genpd,
1279 struct dev_power_governor *gov, bool is_off)
1281 if (IS_ERR_OR_NULL(genpd))
1282 return;
1284 INIT_LIST_HEAD(&genpd->master_links);
1285 INIT_LIST_HEAD(&genpd->slave_links);
1286 INIT_LIST_HEAD(&genpd->dev_list);
1287 mutex_init(&genpd->lock);
1288 genpd->gov = gov;
1289 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1290 genpd->in_progress = 0;
1291 atomic_set(&genpd->sd_count, 0);
1292 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1293 init_waitqueue_head(&genpd->status_wait_queue);
1294 genpd->poweroff_task = NULL;
1295 genpd->resume_count = 0;
1296 genpd->device_count = 0;
1297 genpd->suspended_count = 0;
1298 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1299 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1300 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1301 genpd->domain.ops.prepare = pm_genpd_prepare;
1302 genpd->domain.ops.suspend = pm_genpd_suspend;
1303 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1304 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1305 genpd->domain.ops.resume = pm_genpd_resume;
1306 genpd->domain.ops.freeze = pm_genpd_freeze;
1307 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1308 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1309 genpd->domain.ops.thaw = pm_genpd_thaw;
1310 genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
1311 genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
1312 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1313 genpd->domain.ops.restore = pm_genpd_restore;
1314 genpd->domain.ops.complete = pm_genpd_complete;
1315 mutex_lock(&gpd_list_lock);
1316 list_add(&genpd->gpd_list_node, &gpd_list);
1317 mutex_unlock(&gpd_list_lock);