sh_eth: fix EESIPR values for SH77{34|63}
[linux/fpc-iii.git] / drivers / base / power / domain.c
bloba5e1262b964b8f987480152cb5feb0dbc9d81e47
1 /*
2 * drivers/base/power/domain.c - Common code related to device power domains.
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 * This file is released under the GPLv2.
7 */
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_qos.h>
16 #include <linux/pm_clock.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/sched.h>
20 #include <linux/suspend.h>
21 #include <linux/export.h>
23 #include "power.h"
25 #define GENPD_RETRY_MAX_MS 250 /* Approximate */
27 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
28 ({ \
29 type (*__routine)(struct device *__d); \
30 type __ret = (type)0; \
32 __routine = genpd->dev_ops.callback; \
33 if (__routine) { \
34 __ret = __routine(dev); \
35 } \
36 __ret; \
39 static LIST_HEAD(gpd_list);
40 static DEFINE_MUTEX(gpd_list_lock);
42 struct genpd_lock_ops {
43 void (*lock)(struct generic_pm_domain *genpd);
44 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
45 int (*lock_interruptible)(struct generic_pm_domain *genpd);
46 void (*unlock)(struct generic_pm_domain *genpd);
49 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
51 mutex_lock(&genpd->mlock);
54 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
55 int depth)
57 mutex_lock_nested(&genpd->mlock, depth);
60 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
62 return mutex_lock_interruptible(&genpd->mlock);
65 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
67 return mutex_unlock(&genpd->mlock);
70 static const struct genpd_lock_ops genpd_mtx_ops = {
71 .lock = genpd_lock_mtx,
72 .lock_nested = genpd_lock_nested_mtx,
73 .lock_interruptible = genpd_lock_interruptible_mtx,
74 .unlock = genpd_unlock_mtx,
77 static void genpd_lock_spin(struct generic_pm_domain *genpd)
78 __acquires(&genpd->slock)
80 unsigned long flags;
82 spin_lock_irqsave(&genpd->slock, flags);
83 genpd->lock_flags = flags;
86 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
87 int depth)
88 __acquires(&genpd->slock)
90 unsigned long flags;
92 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
93 genpd->lock_flags = flags;
96 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
97 __acquires(&genpd->slock)
99 unsigned long flags;
101 spin_lock_irqsave(&genpd->slock, flags);
102 genpd->lock_flags = flags;
103 return 0;
106 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
107 __releases(&genpd->slock)
109 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
112 static const struct genpd_lock_ops genpd_spin_ops = {
113 .lock = genpd_lock_spin,
114 .lock_nested = genpd_lock_nested_spin,
115 .lock_interruptible = genpd_lock_interruptible_spin,
116 .unlock = genpd_unlock_spin,
119 #define genpd_lock(p) p->lock_ops->lock(p)
120 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
121 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
122 #define genpd_unlock(p) p->lock_ops->unlock(p)
124 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
126 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
127 struct generic_pm_domain *genpd)
129 bool ret;
131 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
133 /* Warn once for each IRQ safe dev in no sleep domain */
134 if (ret)
135 dev_warn_once(dev, "PM domain %s will not be powered off\n",
136 genpd->name);
138 return ret;
142 * Get the generic PM domain for a particular struct device.
143 * This validates the struct device pointer, the PM domain pointer,
144 * and checks that the PM domain pointer is a real generic PM domain.
145 * Any failure results in NULL being returned.
147 static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
149 struct generic_pm_domain *genpd = NULL, *gpd;
151 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
152 return NULL;
154 mutex_lock(&gpd_list_lock);
155 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
156 if (&gpd->domain == dev->pm_domain) {
157 genpd = gpd;
158 break;
161 mutex_unlock(&gpd_list_lock);
163 return genpd;
167 * This should only be used where we are certain that the pm_domain
168 * attached to the device is a genpd domain.
170 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
172 if (IS_ERR_OR_NULL(dev->pm_domain))
173 return ERR_PTR(-EINVAL);
175 return pd_to_genpd(dev->pm_domain);
178 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
180 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
183 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
185 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
188 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
190 bool ret = false;
192 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
193 ret = !!atomic_dec_and_test(&genpd->sd_count);
195 return ret;
198 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
200 atomic_inc(&genpd->sd_count);
201 smp_mb__after_atomic();
204 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
206 unsigned int state_idx = genpd->state_idx;
207 ktime_t time_start;
208 s64 elapsed_ns;
209 int ret;
211 if (!genpd->power_on)
212 return 0;
214 if (!timed)
215 return genpd->power_on(genpd);
217 time_start = ktime_get();
218 ret = genpd->power_on(genpd);
219 if (ret)
220 return ret;
222 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
223 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
224 return ret;
226 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
227 genpd->max_off_time_changed = true;
228 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
229 genpd->name, "on", elapsed_ns);
231 return ret;
234 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
236 unsigned int state_idx = genpd->state_idx;
237 ktime_t time_start;
238 s64 elapsed_ns;
239 int ret;
241 if (!genpd->power_off)
242 return 0;
244 if (!timed)
245 return genpd->power_off(genpd);
247 time_start = ktime_get();
248 ret = genpd->power_off(genpd);
249 if (ret == -EBUSY)
250 return ret;
252 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
253 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
254 return ret;
256 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
257 genpd->max_off_time_changed = true;
258 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
259 genpd->name, "off", elapsed_ns);
261 return ret;
265 * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
266 * @genpd: PM domain to power off.
268 * Queue up the execution of genpd_poweroff() unless it's already been done
269 * before.
271 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
273 queue_work(pm_wq, &genpd->power_off_work);
277 * genpd_poweron - Restore power to a given PM domain and its masters.
278 * @genpd: PM domain to power up.
279 * @depth: nesting count for lockdep.
281 * Restore power to @genpd and all of its masters so that it is possible to
282 * resume a device belonging to it.
284 static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
286 struct gpd_link *link;
287 int ret = 0;
289 if (genpd->status == GPD_STATE_ACTIVE)
290 return 0;
293 * The list is guaranteed not to change while the loop below is being
294 * executed, unless one of the masters' .power_on() callbacks fiddles
295 * with it.
297 list_for_each_entry(link, &genpd->slave_links, slave_node) {
298 struct generic_pm_domain *master = link->master;
300 genpd_sd_counter_inc(master);
302 genpd_lock_nested(master, depth + 1);
303 ret = genpd_poweron(master, depth + 1);
304 genpd_unlock(master);
306 if (ret) {
307 genpd_sd_counter_dec(master);
308 goto err;
312 ret = genpd_power_on(genpd, true);
313 if (ret)
314 goto err;
316 genpd->status = GPD_STATE_ACTIVE;
317 return 0;
319 err:
320 list_for_each_entry_continue_reverse(link,
321 &genpd->slave_links,
322 slave_node) {
323 genpd_sd_counter_dec(link->master);
324 genpd_queue_power_off_work(link->master);
327 return ret;
330 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
331 unsigned long val, void *ptr)
333 struct generic_pm_domain_data *gpd_data;
334 struct device *dev;
336 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
337 dev = gpd_data->base.dev;
339 for (;;) {
340 struct generic_pm_domain *genpd;
341 struct pm_domain_data *pdd;
343 spin_lock_irq(&dev->power.lock);
345 pdd = dev->power.subsys_data ?
346 dev->power.subsys_data->domain_data : NULL;
347 if (pdd && pdd->dev) {
348 to_gpd_data(pdd)->td.constraint_changed = true;
349 genpd = dev_to_genpd(dev);
350 } else {
351 genpd = ERR_PTR(-ENODATA);
354 spin_unlock_irq(&dev->power.lock);
356 if (!IS_ERR(genpd)) {
357 genpd_lock(genpd);
358 genpd->max_off_time_changed = true;
359 genpd_unlock(genpd);
362 dev = dev->parent;
363 if (!dev || dev->power.ignore_children)
364 break;
367 return NOTIFY_DONE;
371 * genpd_poweroff - Remove power from a given PM domain.
372 * @genpd: PM domain to power down.
373 * @is_async: PM domain is powered down from a scheduled work
375 * If all of the @genpd's devices have been suspended and all of its subdomains
376 * have been powered down, remove power from @genpd.
378 static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
380 struct pm_domain_data *pdd;
381 struct gpd_link *link;
382 unsigned int not_suspended = 0;
385 * Do not try to power off the domain in the following situations:
386 * (1) The domain is already in the "power off" state.
387 * (2) System suspend is in progress.
389 if (genpd->status == GPD_STATE_POWER_OFF
390 || genpd->prepared_count > 0)
391 return 0;
393 if (atomic_read(&genpd->sd_count) > 0)
394 return -EBUSY;
396 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
397 enum pm_qos_flags_status stat;
399 stat = dev_pm_qos_flags(pdd->dev,
400 PM_QOS_FLAG_NO_POWER_OFF
401 | PM_QOS_FLAG_REMOTE_WAKEUP);
402 if (stat > PM_QOS_FLAGS_NONE)
403 return -EBUSY;
406 * Do not allow PM domain to be powered off, when an IRQ safe
407 * device is part of a non-IRQ safe domain.
409 if (!pm_runtime_suspended(pdd->dev) ||
410 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
411 not_suspended++;
414 if (not_suspended > 1 || (not_suspended == 1 && is_async))
415 return -EBUSY;
417 if (genpd->gov && genpd->gov->power_down_ok) {
418 if (!genpd->gov->power_down_ok(&genpd->domain))
419 return -EAGAIN;
422 if (genpd->power_off) {
423 int ret;
425 if (atomic_read(&genpd->sd_count) > 0)
426 return -EBUSY;
429 * If sd_count > 0 at this point, one of the subdomains hasn't
430 * managed to call genpd_poweron() for the master yet after
431 * incrementing it. In that case genpd_poweron() will wait
432 * for us to drop the lock, so we can call .power_off() and let
433 * the genpd_poweron() restore power for us (this shouldn't
434 * happen very often).
436 ret = genpd_power_off(genpd, true);
437 if (ret)
438 return ret;
441 genpd->status = GPD_STATE_POWER_OFF;
443 list_for_each_entry(link, &genpd->slave_links, slave_node) {
444 genpd_sd_counter_dec(link->master);
445 genpd_queue_power_off_work(link->master);
448 return 0;
452 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
453 * @work: Work structure used for scheduling the execution of this function.
455 static void genpd_power_off_work_fn(struct work_struct *work)
457 struct generic_pm_domain *genpd;
459 genpd = container_of(work, struct generic_pm_domain, power_off_work);
461 genpd_lock(genpd);
462 genpd_poweroff(genpd, true);
463 genpd_unlock(genpd);
467 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
468 * @dev: Device to handle.
470 static int __genpd_runtime_suspend(struct device *dev)
472 int (*cb)(struct device *__dev);
474 if (dev->type && dev->type->pm)
475 cb = dev->type->pm->runtime_suspend;
476 else if (dev->class && dev->class->pm)
477 cb = dev->class->pm->runtime_suspend;
478 else if (dev->bus && dev->bus->pm)
479 cb = dev->bus->pm->runtime_suspend;
480 else
481 cb = NULL;
483 if (!cb && dev->driver && dev->driver->pm)
484 cb = dev->driver->pm->runtime_suspend;
486 return cb ? cb(dev) : 0;
490 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
491 * @dev: Device to handle.
493 static int __genpd_runtime_resume(struct device *dev)
495 int (*cb)(struct device *__dev);
497 if (dev->type && dev->type->pm)
498 cb = dev->type->pm->runtime_resume;
499 else if (dev->class && dev->class->pm)
500 cb = dev->class->pm->runtime_resume;
501 else if (dev->bus && dev->bus->pm)
502 cb = dev->bus->pm->runtime_resume;
503 else
504 cb = NULL;
506 if (!cb && dev->driver && dev->driver->pm)
507 cb = dev->driver->pm->runtime_resume;
509 return cb ? cb(dev) : 0;
513 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
514 * @dev: Device to suspend.
516 * Carry out a runtime suspend of a device under the assumption that its
517 * pm_domain field points to the domain member of an object of type
518 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
520 static int genpd_runtime_suspend(struct device *dev)
522 struct generic_pm_domain *genpd;
523 bool (*suspend_ok)(struct device *__dev);
524 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
525 bool runtime_pm = pm_runtime_enabled(dev);
526 ktime_t time_start;
527 s64 elapsed_ns;
528 int ret;
530 dev_dbg(dev, "%s()\n", __func__);
532 genpd = dev_to_genpd(dev);
533 if (IS_ERR(genpd))
534 return -EINVAL;
537 * A runtime PM centric subsystem/driver may re-use the runtime PM
538 * callbacks for other purposes than runtime PM. In those scenarios
539 * runtime PM is disabled. Under these circumstances, we shall skip
540 * validating/measuring the PM QoS latency.
542 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
543 if (runtime_pm && suspend_ok && !suspend_ok(dev))
544 return -EBUSY;
546 /* Measure suspend latency. */
547 time_start = 0;
548 if (runtime_pm)
549 time_start = ktime_get();
551 ret = __genpd_runtime_suspend(dev);
552 if (ret)
553 return ret;
555 ret = genpd_stop_dev(genpd, dev);
556 if (ret) {
557 __genpd_runtime_resume(dev);
558 return ret;
561 /* Update suspend latency value if the measured time exceeds it. */
562 if (runtime_pm) {
563 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
564 if (elapsed_ns > td->suspend_latency_ns) {
565 td->suspend_latency_ns = elapsed_ns;
566 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
567 elapsed_ns);
568 genpd->max_off_time_changed = true;
569 td->constraint_changed = true;
574 * If power.irq_safe is set, this routine may be run with
575 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
577 if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
578 return 0;
580 genpd_lock(genpd);
581 genpd_poweroff(genpd, false);
582 genpd_unlock(genpd);
584 return 0;
588 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
589 * @dev: Device to resume.
591 * Carry out a runtime resume of a device under the assumption that its
592 * pm_domain field points to the domain member of an object of type
593 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
595 static int genpd_runtime_resume(struct device *dev)
597 struct generic_pm_domain *genpd;
598 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
599 bool runtime_pm = pm_runtime_enabled(dev);
600 ktime_t time_start;
601 s64 elapsed_ns;
602 int ret;
603 bool timed = true;
605 dev_dbg(dev, "%s()\n", __func__);
607 genpd = dev_to_genpd(dev);
608 if (IS_ERR(genpd))
609 return -EINVAL;
612 * As we don't power off a non IRQ safe domain, which holds
613 * an IRQ safe device, we don't need to restore power to it.
615 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
616 timed = false;
617 goto out;
620 genpd_lock(genpd);
621 ret = genpd_poweron(genpd, 0);
622 genpd_unlock(genpd);
624 if (ret)
625 return ret;
627 out:
628 /* Measure resume latency. */
629 if (timed && runtime_pm)
630 time_start = ktime_get();
632 ret = genpd_start_dev(genpd, dev);
633 if (ret)
634 goto err_poweroff;
636 ret = __genpd_runtime_resume(dev);
637 if (ret)
638 goto err_stop;
640 /* Update resume latency value if the measured time exceeds it. */
641 if (timed && runtime_pm) {
642 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
643 if (elapsed_ns > td->resume_latency_ns) {
644 td->resume_latency_ns = elapsed_ns;
645 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
646 elapsed_ns);
647 genpd->max_off_time_changed = true;
648 td->constraint_changed = true;
652 return 0;
654 err_stop:
655 genpd_stop_dev(genpd, dev);
656 err_poweroff:
657 if (!pm_runtime_is_irq_safe(dev) ||
658 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
659 genpd_lock(genpd);
660 genpd_poweroff(genpd, 0);
661 genpd_unlock(genpd);
664 return ret;
667 static bool pd_ignore_unused;
668 static int __init pd_ignore_unused_setup(char *__unused)
670 pd_ignore_unused = true;
671 return 1;
673 __setup("pd_ignore_unused", pd_ignore_unused_setup);
676 * genpd_poweroff_unused - Power off all PM domains with no devices in use.
678 static int __init genpd_poweroff_unused(void)
680 struct generic_pm_domain *genpd;
682 if (pd_ignore_unused) {
683 pr_warn("genpd: Not disabling unused power domains\n");
684 return 0;
687 mutex_lock(&gpd_list_lock);
689 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
690 genpd_queue_power_off_work(genpd);
692 mutex_unlock(&gpd_list_lock);
694 return 0;
696 late_initcall(genpd_poweroff_unused);
698 #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
701 * pm_genpd_present - Check if the given PM domain has been initialized.
702 * @genpd: PM domain to check.
704 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
706 const struct generic_pm_domain *gpd;
708 if (IS_ERR_OR_NULL(genpd))
709 return false;
711 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
712 if (gpd == genpd)
713 return true;
715 return false;
718 #endif
720 #ifdef CONFIG_PM_SLEEP
722 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
723 struct device *dev)
725 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
729 * genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
730 * @genpd: PM domain to power off, if possible.
732 * Check if the given PM domain can be powered off (during system suspend or
733 * hibernation) and do that if so. Also, in that case propagate to its masters.
735 * This function is only called in "noirq" and "syscore" stages of system power
736 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
737 * executed sequentially, so it is guaranteed that it will never run twice in
738 * parallel).
740 static void genpd_sync_poweroff(struct generic_pm_domain *genpd)
742 struct gpd_link *link;
744 if (genpd->status == GPD_STATE_POWER_OFF)
745 return;
747 if (genpd->suspended_count != genpd->device_count
748 || atomic_read(&genpd->sd_count) > 0)
749 return;
751 /* Choose the deepest state when suspending */
752 genpd->state_idx = genpd->state_count - 1;
753 genpd_power_off(genpd, false);
755 genpd->status = GPD_STATE_POWER_OFF;
757 list_for_each_entry(link, &genpd->slave_links, slave_node) {
758 genpd_sd_counter_dec(link->master);
759 genpd_sync_poweroff(link->master);
764 * genpd_sync_poweron - Synchronously power on a PM domain and its masters.
765 * @genpd: PM domain to power on.
767 * This function is only called in "noirq" and "syscore" stages of system power
768 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
769 * executed sequentially, so it is guaranteed that it will never run twice in
770 * parallel).
772 static void genpd_sync_poweron(struct generic_pm_domain *genpd)
774 struct gpd_link *link;
776 if (genpd->status == GPD_STATE_ACTIVE)
777 return;
779 list_for_each_entry(link, &genpd->slave_links, slave_node) {
780 genpd_sync_poweron(link->master);
781 genpd_sd_counter_inc(link->master);
784 genpd_power_on(genpd, false);
786 genpd->status = GPD_STATE_ACTIVE;
790 * resume_needed - Check whether to resume a device before system suspend.
791 * @dev: Device to check.
792 * @genpd: PM domain the device belongs to.
794 * There are two cases in which a device that can wake up the system from sleep
795 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
796 * to wake up the system and it has to remain active for this purpose while the
797 * system is in the sleep state and (2) if the device is not enabled to wake up
798 * the system from sleep states and it generally doesn't generate wakeup signals
799 * by itself (those signals are generated on its behalf by other parts of the
800 * system). In the latter case it may be necessary to reconfigure the device's
801 * wakeup settings during system suspend, because it may have been set up to
802 * signal remote wakeup from the system's working state as needed by runtime PM.
803 * Return 'true' in either of the above cases.
805 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
807 bool active_wakeup;
809 if (!device_can_wakeup(dev))
810 return false;
812 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
813 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
817 * pm_genpd_prepare - Start power transition of a device in a PM domain.
818 * @dev: Device to start the transition of.
820 * Start a power transition of a device (during a system-wide power transition)
821 * under the assumption that its pm_domain field points to the domain member of
822 * an object of type struct generic_pm_domain representing a PM domain
823 * consisting of I/O devices.
825 static int pm_genpd_prepare(struct device *dev)
827 struct generic_pm_domain *genpd;
828 int ret;
830 dev_dbg(dev, "%s()\n", __func__);
832 genpd = dev_to_genpd(dev);
833 if (IS_ERR(genpd))
834 return -EINVAL;
837 * If a wakeup request is pending for the device, it should be woken up
838 * at this point and a system wakeup event should be reported if it's
839 * set up to wake up the system from sleep states.
841 if (resume_needed(dev, genpd))
842 pm_runtime_resume(dev);
844 genpd_lock(genpd);
846 if (genpd->prepared_count++ == 0)
847 genpd->suspended_count = 0;
849 genpd_unlock(genpd);
851 ret = pm_generic_prepare(dev);
852 if (ret) {
853 genpd_lock(genpd);
855 genpd->prepared_count--;
857 genpd_unlock(genpd);
860 return ret;
864 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
865 * @dev: Device to suspend.
867 * Stop the device and remove power from the domain if all devices in it have
868 * been stopped.
870 static int pm_genpd_suspend_noirq(struct device *dev)
872 struct generic_pm_domain *genpd;
873 int ret;
875 dev_dbg(dev, "%s()\n", __func__);
877 genpd = dev_to_genpd(dev);
878 if (IS_ERR(genpd))
879 return -EINVAL;
881 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
882 return 0;
884 if (genpd->dev_ops.stop && genpd->dev_ops.start) {
885 ret = pm_runtime_force_suspend(dev);
886 if (ret)
887 return ret;
891 * Since all of the "noirq" callbacks are executed sequentially, it is
892 * guaranteed that this function will never run twice in parallel for
893 * the same PM domain, so it is not necessary to use locking here.
895 genpd->suspended_count++;
896 genpd_sync_poweroff(genpd);
898 return 0;
902 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
903 * @dev: Device to resume.
905 * Restore power to the device's PM domain, if necessary, and start the device.
907 static int pm_genpd_resume_noirq(struct device *dev)
909 struct generic_pm_domain *genpd;
910 int ret = 0;
912 dev_dbg(dev, "%s()\n", __func__);
914 genpd = dev_to_genpd(dev);
915 if (IS_ERR(genpd))
916 return -EINVAL;
918 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
919 return 0;
922 * Since all of the "noirq" callbacks are executed sequentially, it is
923 * guaranteed that this function will never run twice in parallel for
924 * the same PM domain, so it is not necessary to use locking here.
926 genpd_sync_poweron(genpd);
927 genpd->suspended_count--;
929 if (genpd->dev_ops.stop && genpd->dev_ops.start)
930 ret = pm_runtime_force_resume(dev);
932 return ret;
936 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
937 * @dev: Device to freeze.
939 * Carry out a late freeze of a device under the assumption that its
940 * pm_domain field points to the domain member of an object of type
941 * struct generic_pm_domain representing a power domain consisting of I/O
942 * devices.
944 static int pm_genpd_freeze_noirq(struct device *dev)
946 struct generic_pm_domain *genpd;
947 int ret = 0;
949 dev_dbg(dev, "%s()\n", __func__);
951 genpd = dev_to_genpd(dev);
952 if (IS_ERR(genpd))
953 return -EINVAL;
955 if (genpd->dev_ops.stop && genpd->dev_ops.start)
956 ret = pm_runtime_force_suspend(dev);
958 return ret;
962 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
963 * @dev: Device to thaw.
965 * Start the device, unless power has been removed from the domain already
966 * before the system transition.
968 static int pm_genpd_thaw_noirq(struct device *dev)
970 struct generic_pm_domain *genpd;
971 int ret = 0;
973 dev_dbg(dev, "%s()\n", __func__);
975 genpd = dev_to_genpd(dev);
976 if (IS_ERR(genpd))
977 return -EINVAL;
979 if (genpd->dev_ops.stop && genpd->dev_ops.start)
980 ret = pm_runtime_force_resume(dev);
982 return ret;
986 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
987 * @dev: Device to resume.
989 * Make sure the domain will be in the same power state as before the
990 * hibernation the system is resuming from and start the device if necessary.
992 static int pm_genpd_restore_noirq(struct device *dev)
994 struct generic_pm_domain *genpd;
995 int ret = 0;
997 dev_dbg(dev, "%s()\n", __func__);
999 genpd = dev_to_genpd(dev);
1000 if (IS_ERR(genpd))
1001 return -EINVAL;
1004 * Since all of the "noirq" callbacks are executed sequentially, it is
1005 * guaranteed that this function will never run twice in parallel for
1006 * the same PM domain, so it is not necessary to use locking here.
1008 * At this point suspended_count == 0 means we are being run for the
1009 * first time for the given domain in the present cycle.
1011 if (genpd->suspended_count++ == 0)
1013 * The boot kernel might put the domain into arbitrary state,
1014 * so make it appear as powered off to genpd_sync_poweron(),
1015 * so that it tries to power it on in case it was really off.
1017 genpd->status = GPD_STATE_POWER_OFF;
1019 genpd_sync_poweron(genpd);
1021 if (genpd->dev_ops.stop && genpd->dev_ops.start)
1022 ret = pm_runtime_force_resume(dev);
1024 return ret;
1028 * pm_genpd_complete - Complete power transition of a device in a power domain.
1029 * @dev: Device to complete the transition of.
1031 * Complete a power transition of a device (during a system-wide power
1032 * transition) under the assumption that its pm_domain field points to the
1033 * domain member of an object of type struct generic_pm_domain representing
1034 * a power domain consisting of I/O devices.
1036 static void pm_genpd_complete(struct device *dev)
1038 struct generic_pm_domain *genpd;
1040 dev_dbg(dev, "%s()\n", __func__);
1042 genpd = dev_to_genpd(dev);
1043 if (IS_ERR(genpd))
1044 return;
1046 pm_generic_complete(dev);
1048 genpd_lock(genpd);
1050 genpd->prepared_count--;
1051 if (!genpd->prepared_count)
1052 genpd_queue_power_off_work(genpd);
1054 genpd_unlock(genpd);
1058 * genpd_syscore_switch - Switch power during system core suspend or resume.
1059 * @dev: Device that normally is marked as "always on" to switch power for.
1061 * This routine may only be called during the system core (syscore) suspend or
1062 * resume phase for devices whose "always on" flags are set.
1064 static void genpd_syscore_switch(struct device *dev, bool suspend)
1066 struct generic_pm_domain *genpd;
1068 genpd = dev_to_genpd(dev);
1069 if (!pm_genpd_present(genpd))
1070 return;
1072 if (suspend) {
1073 genpd->suspended_count++;
1074 genpd_sync_poweroff(genpd);
1075 } else {
1076 genpd_sync_poweron(genpd);
1077 genpd->suspended_count--;
1081 void pm_genpd_syscore_poweroff(struct device *dev)
1083 genpd_syscore_switch(dev, true);
1085 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1087 void pm_genpd_syscore_poweron(struct device *dev)
1089 genpd_syscore_switch(dev, false);
1091 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1093 #else /* !CONFIG_PM_SLEEP */
1095 #define pm_genpd_prepare NULL
1096 #define pm_genpd_suspend_noirq NULL
1097 #define pm_genpd_resume_noirq NULL
1098 #define pm_genpd_freeze_noirq NULL
1099 #define pm_genpd_thaw_noirq NULL
1100 #define pm_genpd_restore_noirq NULL
1101 #define pm_genpd_complete NULL
1103 #endif /* CONFIG_PM_SLEEP */
1105 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1106 struct generic_pm_domain *genpd,
1107 struct gpd_timing_data *td)
1109 struct generic_pm_domain_data *gpd_data;
1110 int ret;
1112 ret = dev_pm_get_subsys_data(dev);
1113 if (ret)
1114 return ERR_PTR(ret);
1116 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1117 if (!gpd_data) {
1118 ret = -ENOMEM;
1119 goto err_put;
1122 if (td)
1123 gpd_data->td = *td;
1125 gpd_data->base.dev = dev;
1126 gpd_data->td.constraint_changed = true;
1127 gpd_data->td.effective_constraint_ns = -1;
1128 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1130 spin_lock_irq(&dev->power.lock);
1132 if (dev->power.subsys_data->domain_data) {
1133 ret = -EINVAL;
1134 goto err_free;
1137 dev->power.subsys_data->domain_data = &gpd_data->base;
1139 spin_unlock_irq(&dev->power.lock);
1141 dev_pm_domain_set(dev, &genpd->domain);
1143 return gpd_data;
1145 err_free:
1146 spin_unlock_irq(&dev->power.lock);
1147 kfree(gpd_data);
1148 err_put:
1149 dev_pm_put_subsys_data(dev);
1150 return ERR_PTR(ret);
1153 static void genpd_free_dev_data(struct device *dev,
1154 struct generic_pm_domain_data *gpd_data)
1156 dev_pm_domain_set(dev, NULL);
1158 spin_lock_irq(&dev->power.lock);
1160 dev->power.subsys_data->domain_data = NULL;
1162 spin_unlock_irq(&dev->power.lock);
1164 kfree(gpd_data);
1165 dev_pm_put_subsys_data(dev);
1168 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1169 struct gpd_timing_data *td)
1171 struct generic_pm_domain_data *gpd_data;
1172 int ret = 0;
1174 dev_dbg(dev, "%s()\n", __func__);
1176 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1177 return -EINVAL;
1179 gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1180 if (IS_ERR(gpd_data))
1181 return PTR_ERR(gpd_data);
1183 genpd_lock(genpd);
1185 if (genpd->prepared_count > 0) {
1186 ret = -EAGAIN;
1187 goto out;
1190 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1191 if (ret)
1192 goto out;
1194 genpd->device_count++;
1195 genpd->max_off_time_changed = true;
1197 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1199 out:
1200 genpd_unlock(genpd);
1202 if (ret)
1203 genpd_free_dev_data(dev, gpd_data);
1204 else
1205 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1207 return ret;
1211 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1212 * @genpd: PM domain to add the device to.
1213 * @dev: Device to be added.
1214 * @td: Set of PM QoS timing parameters to attach to the device.
1216 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1217 struct gpd_timing_data *td)
1219 int ret;
1221 mutex_lock(&gpd_list_lock);
1222 ret = genpd_add_device(genpd, dev, td);
1223 mutex_unlock(&gpd_list_lock);
1225 return ret;
1227 EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
1229 static int genpd_remove_device(struct generic_pm_domain *genpd,
1230 struct device *dev)
1232 struct generic_pm_domain_data *gpd_data;
1233 struct pm_domain_data *pdd;
1234 int ret = 0;
1236 dev_dbg(dev, "%s()\n", __func__);
1238 pdd = dev->power.subsys_data->domain_data;
1239 gpd_data = to_gpd_data(pdd);
1240 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1242 genpd_lock(genpd);
1244 if (genpd->prepared_count > 0) {
1245 ret = -EAGAIN;
1246 goto out;
1249 genpd->device_count--;
1250 genpd->max_off_time_changed = true;
1252 if (genpd->detach_dev)
1253 genpd->detach_dev(genpd, dev);
1255 list_del_init(&pdd->list_node);
1257 genpd_unlock(genpd);
1259 genpd_free_dev_data(dev, gpd_data);
1261 return 0;
1263 out:
1264 genpd_unlock(genpd);
1265 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1267 return ret;
1271 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1272 * @genpd: PM domain to remove the device from.
1273 * @dev: Device to be removed.
1275 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1276 struct device *dev)
1278 if (!genpd || genpd != genpd_lookup_dev(dev))
1279 return -EINVAL;
1281 return genpd_remove_device(genpd, dev);
1283 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1285 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1286 struct generic_pm_domain *subdomain)
1288 struct gpd_link *link, *itr;
1289 int ret = 0;
1291 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1292 || genpd == subdomain)
1293 return -EINVAL;
1296 * If the domain can be powered on/off in an IRQ safe
1297 * context, ensure that the subdomain can also be
1298 * powered on/off in that context.
1300 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1301 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1302 genpd->name, subdomain->name);
1303 return -EINVAL;
1306 link = kzalloc(sizeof(*link), GFP_KERNEL);
1307 if (!link)
1308 return -ENOMEM;
1310 genpd_lock(subdomain);
1311 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1313 if (genpd->status == GPD_STATE_POWER_OFF
1314 && subdomain->status != GPD_STATE_POWER_OFF) {
1315 ret = -EINVAL;
1316 goto out;
1319 list_for_each_entry(itr, &genpd->master_links, master_node) {
1320 if (itr->slave == subdomain && itr->master == genpd) {
1321 ret = -EINVAL;
1322 goto out;
1326 link->master = genpd;
1327 list_add_tail(&link->master_node, &genpd->master_links);
1328 link->slave = subdomain;
1329 list_add_tail(&link->slave_node, &subdomain->slave_links);
1330 if (subdomain->status != GPD_STATE_POWER_OFF)
1331 genpd_sd_counter_inc(genpd);
1333 out:
1334 genpd_unlock(genpd);
1335 genpd_unlock(subdomain);
1336 if (ret)
1337 kfree(link);
1338 return ret;
1342 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1343 * @genpd: Master PM domain to add the subdomain to.
1344 * @subdomain: Subdomain to be added.
1346 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1347 struct generic_pm_domain *subdomain)
1349 int ret;
1351 mutex_lock(&gpd_list_lock);
1352 ret = genpd_add_subdomain(genpd, subdomain);
1353 mutex_unlock(&gpd_list_lock);
1355 return ret;
1357 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1360 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1361 * @genpd: Master PM domain to remove the subdomain from.
1362 * @subdomain: Subdomain to be removed.
1364 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1365 struct generic_pm_domain *subdomain)
1367 struct gpd_link *link;
1368 int ret = -EINVAL;
1370 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1371 return -EINVAL;
1373 genpd_lock(subdomain);
1374 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1376 if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1377 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1378 subdomain->name);
1379 ret = -EBUSY;
1380 goto out;
1383 list_for_each_entry(link, &genpd->master_links, master_node) {
1384 if (link->slave != subdomain)
1385 continue;
1387 list_del(&link->master_node);
1388 list_del(&link->slave_node);
1389 kfree(link);
1390 if (subdomain->status != GPD_STATE_POWER_OFF)
1391 genpd_sd_counter_dec(genpd);
1393 ret = 0;
1394 break;
1397 out:
1398 genpd_unlock(genpd);
1399 genpd_unlock(subdomain);
1401 return ret;
1403 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1405 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1407 struct genpd_power_state *state;
1409 state = kzalloc(sizeof(*state), GFP_KERNEL);
1410 if (!state)
1411 return -ENOMEM;
1413 genpd->states = state;
1414 genpd->state_count = 1;
1415 genpd->free = state;
1417 return 0;
1420 static void genpd_lock_init(struct generic_pm_domain *genpd)
1422 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1423 spin_lock_init(&genpd->slock);
1424 genpd->lock_ops = &genpd_spin_ops;
1425 } else {
1426 mutex_init(&genpd->mlock);
1427 genpd->lock_ops = &genpd_mtx_ops;
1432 * pm_genpd_init - Initialize a generic I/O PM domain object.
1433 * @genpd: PM domain object to initialize.
1434 * @gov: PM domain governor to associate with the domain (may be NULL).
1435 * @is_off: Initial value of the domain's power_is_off field.
1437 * Returns 0 on successful initialization, else a negative error code.
1439 int pm_genpd_init(struct generic_pm_domain *genpd,
1440 struct dev_power_governor *gov, bool is_off)
1442 int ret;
1444 if (IS_ERR_OR_NULL(genpd))
1445 return -EINVAL;
1447 INIT_LIST_HEAD(&genpd->master_links);
1448 INIT_LIST_HEAD(&genpd->slave_links);
1449 INIT_LIST_HEAD(&genpd->dev_list);
1450 genpd_lock_init(genpd);
1451 genpd->gov = gov;
1452 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1453 atomic_set(&genpd->sd_count, 0);
1454 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1455 genpd->device_count = 0;
1456 genpd->max_off_time_ns = -1;
1457 genpd->max_off_time_changed = true;
1458 genpd->provider = NULL;
1459 genpd->has_provider = false;
1460 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1461 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1462 genpd->domain.ops.prepare = pm_genpd_prepare;
1463 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1464 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1465 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1466 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1467 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1468 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1469 genpd->domain.ops.complete = pm_genpd_complete;
1471 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1472 genpd->dev_ops.stop = pm_clk_suspend;
1473 genpd->dev_ops.start = pm_clk_resume;
1476 /* Use only one "off" state if there were no states declared */
1477 if (genpd->state_count == 0) {
1478 ret = genpd_set_default_power_state(genpd);
1479 if (ret)
1480 return ret;
1483 mutex_lock(&gpd_list_lock);
1484 list_add(&genpd->gpd_list_node, &gpd_list);
1485 mutex_unlock(&gpd_list_lock);
1487 return 0;
1489 EXPORT_SYMBOL_GPL(pm_genpd_init);
1491 static int genpd_remove(struct generic_pm_domain *genpd)
1493 struct gpd_link *l, *link;
1495 if (IS_ERR_OR_NULL(genpd))
1496 return -EINVAL;
1498 genpd_lock(genpd);
1500 if (genpd->has_provider) {
1501 genpd_unlock(genpd);
1502 pr_err("Provider present, unable to remove %s\n", genpd->name);
1503 return -EBUSY;
1506 if (!list_empty(&genpd->master_links) || genpd->device_count) {
1507 genpd_unlock(genpd);
1508 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1509 return -EBUSY;
1512 list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1513 list_del(&link->master_node);
1514 list_del(&link->slave_node);
1515 kfree(link);
1518 list_del(&genpd->gpd_list_node);
1519 genpd_unlock(genpd);
1520 cancel_work_sync(&genpd->power_off_work);
1521 kfree(genpd->free);
1522 pr_debug("%s: removed %s\n", __func__, genpd->name);
1524 return 0;
1528 * pm_genpd_remove - Remove a generic I/O PM domain
1529 * @genpd: Pointer to PM domain that is to be removed.
1531 * To remove the PM domain, this function:
1532 * - Removes the PM domain as a subdomain to any parent domains,
1533 * if it was added.
1534 * - Removes the PM domain from the list of registered PM domains.
1536 * The PM domain will only be removed, if the associated provider has
1537 * been removed, it is not a parent to any other PM domain and has no
1538 * devices associated with it.
1540 int pm_genpd_remove(struct generic_pm_domain *genpd)
1542 int ret;
1544 mutex_lock(&gpd_list_lock);
1545 ret = genpd_remove(genpd);
1546 mutex_unlock(&gpd_list_lock);
1548 return ret;
1550 EXPORT_SYMBOL_GPL(pm_genpd_remove);
1552 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1554 typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
1555 void *data);
1558 * Device Tree based PM domain providers.
1560 * The code below implements generic device tree based PM domain providers that
1561 * bind device tree nodes with generic PM domains registered in the system.
1563 * Any driver that registers generic PM domains and needs to support binding of
1564 * devices to these domains is supposed to register a PM domain provider, which
1565 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1567 * Two simple mapping functions have been provided for convenience:
1568 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1569 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1570 * index.
1574 * struct of_genpd_provider - PM domain provider registration structure
1575 * @link: Entry in global list of PM domain providers
1576 * @node: Pointer to device tree node of PM domain provider
1577 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1578 * into a PM domain.
1579 * @data: context pointer to be passed into @xlate callback
1581 struct of_genpd_provider {
1582 struct list_head link;
1583 struct device_node *node;
1584 genpd_xlate_t xlate;
1585 void *data;
1588 /* List of registered PM domain providers. */
1589 static LIST_HEAD(of_genpd_providers);
1590 /* Mutex to protect the list above. */
1591 static DEFINE_MUTEX(of_genpd_mutex);
1594 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1595 * @genpdspec: OF phandle args to map into a PM domain
1596 * @data: xlate function private data - pointer to struct generic_pm_domain
1598 * This is a generic xlate function that can be used to model PM domains that
1599 * have their own device tree nodes. The private data of xlate function needs
1600 * to be a valid pointer to struct generic_pm_domain.
1602 static struct generic_pm_domain *genpd_xlate_simple(
1603 struct of_phandle_args *genpdspec,
1604 void *data)
1606 if (genpdspec->args_count != 0)
1607 return ERR_PTR(-EINVAL);
1608 return data;
1612 * genpd_xlate_onecell() - Xlate function using a single index.
1613 * @genpdspec: OF phandle args to map into a PM domain
1614 * @data: xlate function private data - pointer to struct genpd_onecell_data
1616 * This is a generic xlate function that can be used to model simple PM domain
1617 * controllers that have one device tree node and provide multiple PM domains.
1618 * A single cell is used as an index into an array of PM domains specified in
1619 * the genpd_onecell_data struct when registering the provider.
1621 static struct generic_pm_domain *genpd_xlate_onecell(
1622 struct of_phandle_args *genpdspec,
1623 void *data)
1625 struct genpd_onecell_data *genpd_data = data;
1626 unsigned int idx = genpdspec->args[0];
1628 if (genpdspec->args_count != 1)
1629 return ERR_PTR(-EINVAL);
1631 if (idx >= genpd_data->num_domains) {
1632 pr_err("%s: invalid domain index %u\n", __func__, idx);
1633 return ERR_PTR(-EINVAL);
1636 if (!genpd_data->domains[idx])
1637 return ERR_PTR(-ENOENT);
1639 return genpd_data->domains[idx];
1643 * genpd_add_provider() - Register a PM domain provider for a node
1644 * @np: Device node pointer associated with the PM domain provider.
1645 * @xlate: Callback for decoding PM domain from phandle arguments.
1646 * @data: Context pointer for @xlate callback.
1648 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1649 void *data)
1651 struct of_genpd_provider *cp;
1653 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1654 if (!cp)
1655 return -ENOMEM;
1657 cp->node = of_node_get(np);
1658 cp->data = data;
1659 cp->xlate = xlate;
1661 mutex_lock(&of_genpd_mutex);
1662 list_add(&cp->link, &of_genpd_providers);
1663 mutex_unlock(&of_genpd_mutex);
1664 pr_debug("Added domain provider from %s\n", np->full_name);
1666 return 0;
1670 * of_genpd_add_provider_simple() - Register a simple PM domain provider
1671 * @np: Device node pointer associated with the PM domain provider.
1672 * @genpd: Pointer to PM domain associated with the PM domain provider.
1674 int of_genpd_add_provider_simple(struct device_node *np,
1675 struct generic_pm_domain *genpd)
1677 int ret = -EINVAL;
1679 if (!np || !genpd)
1680 return -EINVAL;
1682 mutex_lock(&gpd_list_lock);
1684 if (pm_genpd_present(genpd))
1685 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
1687 if (!ret) {
1688 genpd->provider = &np->fwnode;
1689 genpd->has_provider = true;
1692 mutex_unlock(&gpd_list_lock);
1694 return ret;
1696 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
1699 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
1700 * @np: Device node pointer associated with the PM domain provider.
1701 * @data: Pointer to the data associated with the PM domain provider.
1703 int of_genpd_add_provider_onecell(struct device_node *np,
1704 struct genpd_onecell_data *data)
1706 unsigned int i;
1707 int ret = -EINVAL;
1709 if (!np || !data)
1710 return -EINVAL;
1712 mutex_lock(&gpd_list_lock);
1714 for (i = 0; i < data->num_domains; i++) {
1715 if (!data->domains[i])
1716 continue;
1717 if (!pm_genpd_present(data->domains[i]))
1718 goto error;
1720 data->domains[i]->provider = &np->fwnode;
1721 data->domains[i]->has_provider = true;
1724 ret = genpd_add_provider(np, genpd_xlate_onecell, data);
1725 if (ret < 0)
1726 goto error;
1728 mutex_unlock(&gpd_list_lock);
1730 return 0;
1732 error:
1733 while (i--) {
1734 if (!data->domains[i])
1735 continue;
1736 data->domains[i]->provider = NULL;
1737 data->domains[i]->has_provider = false;
1740 mutex_unlock(&gpd_list_lock);
1742 return ret;
1744 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
1747 * of_genpd_del_provider() - Remove a previously registered PM domain provider
1748 * @np: Device node pointer associated with the PM domain provider
1750 void of_genpd_del_provider(struct device_node *np)
1752 struct of_genpd_provider *cp;
1753 struct generic_pm_domain *gpd;
1755 mutex_lock(&gpd_list_lock);
1756 mutex_lock(&of_genpd_mutex);
1757 list_for_each_entry(cp, &of_genpd_providers, link) {
1758 if (cp->node == np) {
1760 * For each PM domain associated with the
1761 * provider, set the 'has_provider' to false
1762 * so that the PM domain can be safely removed.
1764 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
1765 if (gpd->provider == &np->fwnode)
1766 gpd->has_provider = false;
1768 list_del(&cp->link);
1769 of_node_put(cp->node);
1770 kfree(cp);
1771 break;
1774 mutex_unlock(&of_genpd_mutex);
1775 mutex_unlock(&gpd_list_lock);
1777 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1780 * genpd_get_from_provider() - Look-up PM domain
1781 * @genpdspec: OF phandle args to use for look-up
1783 * Looks for a PM domain provider under the node specified by @genpdspec and if
1784 * found, uses xlate function of the provider to map phandle args to a PM
1785 * domain.
1787 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1788 * on failure.
1790 static struct generic_pm_domain *genpd_get_from_provider(
1791 struct of_phandle_args *genpdspec)
1793 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1794 struct of_genpd_provider *provider;
1796 if (!genpdspec)
1797 return ERR_PTR(-EINVAL);
1799 mutex_lock(&of_genpd_mutex);
1801 /* Check if we have such a provider in our array */
1802 list_for_each_entry(provider, &of_genpd_providers, link) {
1803 if (provider->node == genpdspec->np)
1804 genpd = provider->xlate(genpdspec, provider->data);
1805 if (!IS_ERR(genpd))
1806 break;
1809 mutex_unlock(&of_genpd_mutex);
1811 return genpd;
1815 * of_genpd_add_device() - Add a device to an I/O PM domain
1816 * @genpdspec: OF phandle args to use for look-up PM domain
1817 * @dev: Device to be added.
1819 * Looks-up an I/O PM domain based upon phandle args provided and adds
1820 * the device to the PM domain. Returns a negative error code on failure.
1822 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
1824 struct generic_pm_domain *genpd;
1825 int ret;
1827 mutex_lock(&gpd_list_lock);
1829 genpd = genpd_get_from_provider(genpdspec);
1830 if (IS_ERR(genpd)) {
1831 ret = PTR_ERR(genpd);
1832 goto out;
1835 ret = genpd_add_device(genpd, dev, NULL);
1837 out:
1838 mutex_unlock(&gpd_list_lock);
1840 return ret;
1842 EXPORT_SYMBOL_GPL(of_genpd_add_device);
1845 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1846 * @parent_spec: OF phandle args to use for parent PM domain look-up
1847 * @subdomain_spec: OF phandle args to use for subdomain look-up
1849 * Looks-up a parent PM domain and subdomain based upon phandle args
1850 * provided and adds the subdomain to the parent PM domain. Returns a
1851 * negative error code on failure.
1853 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
1854 struct of_phandle_args *subdomain_spec)
1856 struct generic_pm_domain *parent, *subdomain;
1857 int ret;
1859 mutex_lock(&gpd_list_lock);
1861 parent = genpd_get_from_provider(parent_spec);
1862 if (IS_ERR(parent)) {
1863 ret = PTR_ERR(parent);
1864 goto out;
1867 subdomain = genpd_get_from_provider(subdomain_spec);
1868 if (IS_ERR(subdomain)) {
1869 ret = PTR_ERR(subdomain);
1870 goto out;
1873 ret = genpd_add_subdomain(parent, subdomain);
1875 out:
1876 mutex_unlock(&gpd_list_lock);
1878 return ret;
1880 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
1883 * of_genpd_remove_last - Remove the last PM domain registered for a provider
1884 * @provider: Pointer to device structure associated with provider
1886 * Find the last PM domain that was added by a particular provider and
1887 * remove this PM domain from the list of PM domains. The provider is
1888 * identified by the 'provider' device structure that is passed. The PM
1889 * domain will only be removed, if the provider associated with domain
1890 * has been removed.
1892 * Returns a valid pointer to struct generic_pm_domain on success or
1893 * ERR_PTR() on failure.
1895 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
1897 struct generic_pm_domain *gpd, *genpd = ERR_PTR(-ENOENT);
1898 int ret;
1900 if (IS_ERR_OR_NULL(np))
1901 return ERR_PTR(-EINVAL);
1903 mutex_lock(&gpd_list_lock);
1904 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1905 if (gpd->provider == &np->fwnode) {
1906 ret = genpd_remove(gpd);
1907 genpd = ret ? ERR_PTR(ret) : gpd;
1908 break;
1911 mutex_unlock(&gpd_list_lock);
1913 return genpd;
1915 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
1918 * genpd_dev_pm_detach - Detach a device from its PM domain.
1919 * @dev: Device to detach.
1920 * @power_off: Currently not used
1922 * Try to locate a corresponding generic PM domain, which the device was
1923 * attached to previously. If such is found, the device is detached from it.
1925 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
1927 struct generic_pm_domain *pd;
1928 unsigned int i;
1929 int ret = 0;
1931 pd = dev_to_genpd(dev);
1932 if (IS_ERR(pd))
1933 return;
1935 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
1937 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1938 ret = genpd_remove_device(pd, dev);
1939 if (ret != -EAGAIN)
1940 break;
1942 mdelay(i);
1943 cond_resched();
1946 if (ret < 0) {
1947 dev_err(dev, "failed to remove from PM domain %s: %d",
1948 pd->name, ret);
1949 return;
1952 /* Check if PM domain can be powered off after removing this device. */
1953 genpd_queue_power_off_work(pd);
1956 static void genpd_dev_pm_sync(struct device *dev)
1958 struct generic_pm_domain *pd;
1960 pd = dev_to_genpd(dev);
1961 if (IS_ERR(pd))
1962 return;
1964 genpd_queue_power_off_work(pd);
1968 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
1969 * @dev: Device to attach.
1971 * Parse device's OF node to find a PM domain specifier. If such is found,
1972 * attaches the device to retrieved pm_domain ops.
1974 * Both generic and legacy Samsung-specific DT bindings are supported to keep
1975 * backwards compatibility with existing DTBs.
1977 * Returns 0 on successfully attached PM domain or negative error code. Note
1978 * that if a power-domain exists for the device, but it cannot be found or
1979 * turned on, then return -EPROBE_DEFER to ensure that the device is not
1980 * probed and to re-try again later.
1982 int genpd_dev_pm_attach(struct device *dev)
1984 struct of_phandle_args pd_args;
1985 struct generic_pm_domain *pd;
1986 unsigned int i;
1987 int ret;
1989 if (!dev->of_node)
1990 return -ENODEV;
1992 if (dev->pm_domain)
1993 return -EEXIST;
1995 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
1996 "#power-domain-cells", 0, &pd_args);
1997 if (ret < 0) {
1998 if (ret != -ENOENT)
1999 return ret;
2002 * Try legacy Samsung-specific bindings
2003 * (for backwards compatibility of DT ABI)
2005 pd_args.args_count = 0;
2006 pd_args.np = of_parse_phandle(dev->of_node,
2007 "samsung,power-domain", 0);
2008 if (!pd_args.np)
2009 return -ENOENT;
2012 mutex_lock(&gpd_list_lock);
2013 pd = genpd_get_from_provider(&pd_args);
2014 of_node_put(pd_args.np);
2015 if (IS_ERR(pd)) {
2016 mutex_unlock(&gpd_list_lock);
2017 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2018 __func__, PTR_ERR(pd));
2019 return -EPROBE_DEFER;
2022 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2024 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2025 ret = genpd_add_device(pd, dev, NULL);
2026 if (ret != -EAGAIN)
2027 break;
2029 mdelay(i);
2030 cond_resched();
2032 mutex_unlock(&gpd_list_lock);
2034 if (ret < 0) {
2035 if (ret != -EPROBE_DEFER)
2036 dev_err(dev, "failed to add to PM domain %s: %d",
2037 pd->name, ret);
2038 goto out;
2041 dev->pm_domain->detach = genpd_dev_pm_detach;
2042 dev->pm_domain->sync = genpd_dev_pm_sync;
2044 genpd_lock(pd);
2045 ret = genpd_poweron(pd, 0);
2046 genpd_unlock(pd);
2047 out:
2048 return ret ? -EPROBE_DEFER : 0;
2050 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2052 static const struct of_device_id idle_state_match[] = {
2053 { .compatible = "domain-idle-state", },
2057 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2058 struct device_node *state_node)
2060 int err;
2061 u32 residency;
2062 u32 entry_latency, exit_latency;
2063 const struct of_device_id *match_id;
2065 match_id = of_match_node(idle_state_match, state_node);
2066 if (!match_id)
2067 return -EINVAL;
2069 err = of_property_read_u32(state_node, "entry-latency-us",
2070 &entry_latency);
2071 if (err) {
2072 pr_debug(" * %s missing entry-latency-us property\n",
2073 state_node->full_name);
2074 return -EINVAL;
2077 err = of_property_read_u32(state_node, "exit-latency-us",
2078 &exit_latency);
2079 if (err) {
2080 pr_debug(" * %s missing exit-latency-us property\n",
2081 state_node->full_name);
2082 return -EINVAL;
2085 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2086 if (!err)
2087 genpd_state->residency_ns = 1000 * residency;
2089 genpd_state->power_on_latency_ns = 1000 * exit_latency;
2090 genpd_state->power_off_latency_ns = 1000 * entry_latency;
2091 genpd_state->fwnode = &state_node->fwnode;
2093 return 0;
2097 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2099 * @dn: The genpd device node
2100 * @states: The pointer to which the state array will be saved.
2101 * @n: The count of elements in the array returned from this function.
2103 * Returns the device states parsed from the OF node. The memory for the states
2104 * is allocated by this function and is the responsibility of the caller to
2105 * free the memory after use.
2107 int of_genpd_parse_idle_states(struct device_node *dn,
2108 struct genpd_power_state **states, int *n)
2110 struct genpd_power_state *st;
2111 struct device_node *np;
2112 int i = 0;
2113 int err, ret;
2114 int count;
2115 struct of_phandle_iterator it;
2117 count = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2118 if (count <= 0)
2119 return -EINVAL;
2121 st = kcalloc(count, sizeof(*st), GFP_KERNEL);
2122 if (!st)
2123 return -ENOMEM;
2125 /* Loop over the phandles until all the requested entry is found */
2126 of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) {
2127 np = it.node;
2128 ret = genpd_parse_state(&st[i++], np);
2129 if (ret) {
2130 pr_err
2131 ("Parsing idle state node %s failed with err %d\n",
2132 np->full_name, ret);
2133 of_node_put(np);
2134 kfree(st);
2135 return ret;
2139 *n = count;
2140 *states = st;
2142 return 0;
2144 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2146 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2149 /*** debugfs support ***/
2151 #ifdef CONFIG_DEBUG_FS
2152 #include <linux/pm.h>
2153 #include <linux/device.h>
2154 #include <linux/debugfs.h>
2155 #include <linux/seq_file.h>
2156 #include <linux/init.h>
2157 #include <linux/kobject.h>
2158 static struct dentry *pm_genpd_debugfs_dir;
2161 * TODO: This function is a slightly modified version of rtpm_status_show
2162 * from sysfs.c, so generalize it.
2164 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2166 static const char * const status_lookup[] = {
2167 [RPM_ACTIVE] = "active",
2168 [RPM_RESUMING] = "resuming",
2169 [RPM_SUSPENDED] = "suspended",
2170 [RPM_SUSPENDING] = "suspending"
2172 const char *p = "";
2174 if (dev->power.runtime_error)
2175 p = "error";
2176 else if (dev->power.disable_depth)
2177 p = "unsupported";
2178 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2179 p = status_lookup[dev->power.runtime_status];
2180 else
2181 WARN_ON(1);
2183 seq_puts(s, p);
2186 static int pm_genpd_summary_one(struct seq_file *s,
2187 struct generic_pm_domain *genpd)
2189 static const char * const status_lookup[] = {
2190 [GPD_STATE_ACTIVE] = "on",
2191 [GPD_STATE_POWER_OFF] = "off"
2193 struct pm_domain_data *pm_data;
2194 const char *kobj_path;
2195 struct gpd_link *link;
2196 char state[16];
2197 int ret;
2199 ret = genpd_lock_interruptible(genpd);
2200 if (ret)
2201 return -ERESTARTSYS;
2203 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2204 goto exit;
2205 if (genpd->status == GPD_STATE_POWER_OFF)
2206 snprintf(state, sizeof(state), "%s-%u",
2207 status_lookup[genpd->status], genpd->state_idx);
2208 else
2209 snprintf(state, sizeof(state), "%s",
2210 status_lookup[genpd->status]);
2211 seq_printf(s, "%-30s %-15s ", genpd->name, state);
2214 * Modifications on the list require holding locks on both
2215 * master and slave, so we are safe.
2216 * Also genpd->name is immutable.
2218 list_for_each_entry(link, &genpd->master_links, master_node) {
2219 seq_printf(s, "%s", link->slave->name);
2220 if (!list_is_last(&link->master_node, &genpd->master_links))
2221 seq_puts(s, ", ");
2224 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2225 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2226 genpd_is_irq_safe(genpd) ?
2227 GFP_ATOMIC : GFP_KERNEL);
2228 if (kobj_path == NULL)
2229 continue;
2231 seq_printf(s, "\n %-50s ", kobj_path);
2232 rtpm_status_str(s, pm_data->dev);
2233 kfree(kobj_path);
2236 seq_puts(s, "\n");
2237 exit:
2238 genpd_unlock(genpd);
2240 return 0;
2243 static int pm_genpd_summary_show(struct seq_file *s, void *data)
2245 struct generic_pm_domain *genpd;
2246 int ret = 0;
2248 seq_puts(s, "domain status slaves\n");
2249 seq_puts(s, " /device runtime status\n");
2250 seq_puts(s, "----------------------------------------------------------------------\n");
2252 ret = mutex_lock_interruptible(&gpd_list_lock);
2253 if (ret)
2254 return -ERESTARTSYS;
2256 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2257 ret = pm_genpd_summary_one(s, genpd);
2258 if (ret)
2259 break;
2261 mutex_unlock(&gpd_list_lock);
2263 return ret;
2266 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2268 return single_open(file, pm_genpd_summary_show, NULL);
2271 static const struct file_operations pm_genpd_summary_fops = {
2272 .open = pm_genpd_summary_open,
2273 .read = seq_read,
2274 .llseek = seq_lseek,
2275 .release = single_release,
2278 static int __init pm_genpd_debug_init(void)
2280 struct dentry *d;
2282 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2284 if (!pm_genpd_debugfs_dir)
2285 return -ENOMEM;
2287 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2288 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2289 if (!d)
2290 return -ENOMEM;
2292 return 0;
2294 late_initcall(pm_genpd_debug_init);
2296 static void __exit pm_genpd_debug_exit(void)
2298 debugfs_remove_recursive(pm_genpd_debugfs_dir);
2300 __exitcall(pm_genpd_debug_exit);
2301 #endif /* CONFIG_DEBUG_FS */