ARM: cpu topology: Add debugfs interface for cpu_power
[cmplus.git] / arch / arm / mach-omap2 / cpuidle44xx.c
blob718994c344b52a773f5efc8104943366213cf58c
1 /*
2 * OMAP4 CPU idle Routines
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Rajendra Nayak <rnayak@ti.com>
6 * Santosh Shilimkar <santosh.shilimkar@ti.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/cpuidle.h>
17 #include <linux/clockchips.h>
18 #include <linux/notifier.h>
19 #include <linux/cpu.h>
20 #include <linux/delay.h>
21 #include <linux/cpu_pm.h>
23 #include <asm/cacheflush.h>
24 #include <asm/proc-fns.h>
25 #include <asm/hardware/gic.h>
27 #include <mach/omap4-common.h>
28 #include <mach/omap-wakeupgen.h>
30 #include <plat/gpio.h>
32 #include "clockdomain.h"
33 #include "pm.h"
34 #include "prm.h"
36 #ifdef CONFIG_CPU_IDLE
38 #ifdef CONFIG_OMAP_ALLOW_OSWR
39 #define CPU_IDLE_ALLOW_OSWR 1
40 #else
41 #define CPU_IDLE_ALLOW_OSWR 0
42 #endif
44 /* C1 is a single-cpu C-state, it can be entered by each cpu independently */
45 /* C1 - CPUx WFI + MPU ON + CORE ON */
46 #define OMAP4_STATE_C1 0
47 /* C2 through C4 are shared C-states, both CPUs must agree to enter */
48 /* C2 - CPUx OFF + MPU INA + CORE INA */
49 #define OMAP4_STATE_C2 1
50 /* C3 - CPUx OFF + MPU CSWR + CORE OSWR */
51 #define OMAP4_STATE_C3 2
52 /* C4 - CPUx OFF + MPU OSWR + CORE OSWR */
53 #define OMAP4_STATE_C4 3
55 #define OMAP4_MAX_STATES 4
57 static bool disallow_smp_idle;
58 module_param(disallow_smp_idle, bool, S_IRUGO | S_IWUSR);
59 MODULE_PARM_DESC(disallow_smp_idle,
60 "Don't enter idle if multiple cpus are active");
62 static bool skip_off;
63 module_param(skip_off, bool, S_IRUGO | S_IWUSR);
64 MODULE_PARM_DESC(skip_off,
65 "Do everything except actually enter the low power state (debugging)");
67 static bool keep_core_on;
68 module_param(keep_core_on, bool, S_IRUGO | S_IWUSR);
69 MODULE_PARM_DESC(keep_core_on,
70 "Prevent core powerdomain from entering any low power states (debugging)");
72 static bool keep_mpu_on;
73 module_param(keep_mpu_on, bool, S_IRUGO | S_IWUSR);
74 MODULE_PARM_DESC(keep_mpu_on,
75 "Prevent mpu powerdomain from entering any low power states (debugging)");
77 static int max_state;
78 module_param(max_state, int, S_IRUGO | S_IWUSR);
79 MODULE_PARM_DESC(max_state,
80 "Select deepest power state allowed (0=any, 1=WFI, 2=INA, 3=CSWR, 4=OSWR)");
82 static int only_state;
83 module_param(only_state, int, S_IRUGO | S_IWUSR);
84 MODULE_PARM_DESC(only_state,
85 "Select only power state allowed (0=any, 1=WFI, 2=INA, 3=CSWR, 4=OSWR)");
87 static const int omap4_poke_interrupt[2] = {
88 OMAP44XX_IRQ_CPUIDLE_POKE0,
89 OMAP44XX_IRQ_CPUIDLE_POKE1
92 struct omap4_processor_cx {
93 u8 valid;
94 u8 type;
95 u32 exit_latency;
96 u32 target_residency;
97 u32 mpu_state;
98 u32 mpu_logic_state;
99 u32 core_state;
100 u32 core_logic_state;
101 const char *desc;
104 struct omap4_processor_cx omap4_power_states[OMAP4_MAX_STATES];
105 static struct powerdomain *mpu_pd, *cpu1_pd, *core_pd;
106 static struct omap4_processor_cx *omap4_idle_requested_cx[NR_CPUS];
107 static int omap4_idle_ready_count;
108 static DEFINE_SPINLOCK(omap4_idle_lock);
109 static struct clockdomain *cpu1_cd;
112 * Raw measured exit latency numbers (us):
113 * state average max
114 * C2 383 1068
115 * C3 641 1190
116 * C4 769 1323
119 static __initdata struct cpuidle_params omap443x_cpuidle_params_table[] = {
120 /* C1 - CPUx WFI + MPU ON + CORE ON */
122 .exit_latency = 4,
123 .target_residency = 4,
124 .valid = 1,
126 /* C2 - CPUx OFF + MPU INA + CORE INA */
128 .exit_latency = 300,
129 .target_residency = 300,
130 .valid = 1,
132 /* C3 - CPUx OFF + MPU CSWR + CORE OSWR */
134 .exit_latency = 5000,
135 .target_residency = 10000,
136 .valid = 1,
138 /* C4 - CPUx OFF + MPU CSWR + CORE OSWR */
140 .exit_latency = 5200,
141 .target_residency = 35000,
142 .valid = CPU_IDLE_ALLOW_OSWR,
146 static __initdata struct cpuidle_params omap446x_cpuidle_params_table[] = {
147 /* C1 - CPUx WFI + MPU ON + CORE ON */
149 .exit_latency = 4,
150 .target_residency = 4,
151 .valid = 1,
153 /* C2 - CPUx OFF + MPU INA + CORE INA */
155 .exit_latency = 300,
156 .target_residency = 1800,
157 .valid = 1,
159 /* C3 - CPUx OFF + MPU CSWR + CORE OSWR */
161 .exit_latency = 1200,
162 .target_residency = 4000,
163 .valid = 1,
165 /* C4 - CPUx OFF + MPU CSWR + CORE OSWR */
167 .exit_latency = 1400,
168 .target_residency = 4200,
169 .valid = CPU_IDLE_ALLOW_OSWR,
173 static __initdata struct cpuidle_params omap447x_cpuidle_params_table[] = {
174 /* C1 - CPUx WFI + MPU ON + CORE ON */
176 .exit_latency = 4,
177 .target_residency = 4,
178 .valid = 1,
180 /* C2 - CPUx OFF + MPU INA + CORE INA */
182 .exit_latency = 500,
183 .target_residency = 1200,
184 .valid = 1,
186 /* C3 - CPUx OFF + MPU CSWR + CORE OSWR */
188 .exit_latency = 5300,
189 .target_residency = 5300,
190 .valid = 1,
192 /* C4 - CPUx OFF + MPU CSWR + CORE OSWR */
194 .exit_latency = 5500,
195 .target_residency = 15000,
196 .valid = CPU_IDLE_ALLOW_OSWR,
200 static void omap4_update_actual_state(struct cpuidle_device *dev,
201 struct omap4_processor_cx *cx)
203 int i;
205 for (i = 0; i < dev->state_count; i++) {
206 if (dev->states[i].driver_data == cx) {
207 dev->last_state = &dev->states[i];
208 return;
213 static bool omap4_gic_interrupt_pending(void)
215 void __iomem *gic_cpu = omap4_get_gic_cpu_base();
217 return (__raw_readl(gic_cpu + GIC_CPU_HIGHPRI) != 0x3FF);
221 * omap4_wfi_until_interrupt
223 * wfi can sometimes return with no interrupts pending, for example on a
224 * broadcast cache flush or tlb op. This function will call wfi repeatedly
225 * until an interrupt is actually pending. Returning without looping would
226 * cause very short idle times to be reported to the idle governor, messing
227 * with repeating interrupt detection, and causing deep idle states to be
228 * avoided.
230 static void omap4_wfi_until_interrupt(void)
232 retry:
233 omap_do_wfi();
235 if (!omap4_gic_interrupt_pending())
236 goto retry;
240 * omap4_idle_wait
242 * similar to WFE, but can be woken by an interrupt even though interrupts
243 * are masked. An "event" is emulated by per-cpu unused interrupt in the GIC.
244 * Returns false if wake caused by an interrupt, true if by an "event".
246 static bool omap4_idle_wait(void)
248 int cpu = hard_smp_processor_id();
249 void __iomem *gic_dist = omap4_get_gic_dist_base();
250 u32 bit = BIT(omap4_poke_interrupt[cpu] % 32);
251 u32 reg = (omap4_poke_interrupt[cpu] / 32) * 4;
252 bool poked;
254 /* Unmask the "event" interrupt */
255 __raw_writel(bit, gic_dist + GIC_DIST_ENABLE_SET + reg);
257 omap4_wfi_until_interrupt();
259 /* Read the "event" interrupt pending bit */
260 poked = __raw_readl(gic_dist + GIC_DIST_PENDING_SET + reg) & bit;
262 /* Mask the "event" */
263 __raw_writel(bit, gic_dist + GIC_DIST_ENABLE_CLEAR + reg);
265 /* Clear the event */
266 if (poked)
267 __raw_writel(bit, gic_dist + GIC_DIST_PENDING_CLEAR + reg);
269 return poked;
273 * omap4_poke_cpu
274 * @cpu: cpu to wake
276 * trigger an "event" to wake a cpu from omap4_idle_wait.
278 static void omap4_poke_cpu(int cpu)
280 void __iomem *gic_dist = omap4_get_gic_dist_base();
281 u32 bit = BIT(omap4_poke_interrupt[cpu] % 32);
282 u32 reg = (omap4_poke_interrupt[cpu] / 32) * 4;
284 __raw_writel(bit, gic_dist + GIC_DIST_PENDING_SET + reg);
288 * omap4_enter_idle
289 * @dev: cpuidle device
290 * @state: The target state to be programmed
292 * Idle function for C1 state, WFI on a single CPU.
293 * Called with irqs off, returns with irqs on.
294 * Returns the amount of time spent in the low power state.
296 static int omap4_enter_idle_wfi(struct cpuidle_device *dev,
297 struct cpuidle_state *state)
299 ktime_t preidle, postidle;
301 local_fiq_disable();
303 preidle = ktime_get();
305 omap4_wfi_until_interrupt();
307 postidle = ktime_get();
309 local_fiq_enable();
310 local_irq_enable();
312 omap4_update_actual_state(dev, &omap4_power_states[OMAP4_STATE_C1]);
314 return ktime_to_us(ktime_sub(postidle, preidle));
317 static inline bool omap4_all_cpus_idle(void)
319 int i;
321 assert_spin_locked(&omap4_idle_lock);
323 for_each_online_cpu(i)
324 if (omap4_idle_requested_cx[i] == NULL)
325 return false;
327 return true;
330 static inline struct omap4_processor_cx *omap4_get_idle_state(void)
332 struct omap4_processor_cx *cx = NULL;
333 int i;
335 assert_spin_locked(&omap4_idle_lock);
337 for_each_online_cpu(i)
338 if (!cx || omap4_idle_requested_cx[i]->type < cx->type)
339 cx = omap4_idle_requested_cx[i];
341 return cx;
344 static void omap4_cpu_poke_others(int cpu)
346 int i;
348 for_each_online_cpu(i)
349 if (i != cpu)
350 omap4_poke_cpu(i);
353 static void omap4_cpu_update_state(int cpu, struct omap4_processor_cx *cx)
355 assert_spin_locked(&omap4_idle_lock);
357 omap4_idle_requested_cx[cpu] = cx;
358 omap4_cpu_poke_others(cpu);
362 * omap4_enter_idle_primary
363 * @cx: target idle state
365 * Waits for cpu1 to be off, then starts the transition to the target power
366 * state for cpu0, mpu and core power domains.
368 static void omap4_enter_idle_primary(struct omap4_processor_cx *cx)
370 int cpu = 0;
371 int ret;
372 int count = 1000000;
374 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
376 cpu_pm_enter();
378 if (skip_off)
379 goto out;
381 /* spin until cpu1 is really off */
382 while ((pwrdm_read_pwrst(cpu1_pd) != PWRDM_POWER_OFF) && count--)
383 cpu_relax();
385 if (pwrdm_read_pwrst(cpu1_pd) != PWRDM_POWER_OFF)
386 goto wake_cpu1;
388 ret = pwrdm_wait_transition(cpu1_pd);
389 if (ret)
390 goto wake_cpu1;
392 if (!keep_mpu_on) {
393 pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
394 omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
397 if (!keep_core_on) {
398 pwrdm_set_logic_retst(core_pd, cx->core_logic_state);
399 omap_set_pwrdm_state(core_pd, cx->core_state);
402 pr_debug("%s: cpu0 down\n", __func__);
404 omap4_enter_sleep(0, PWRDM_POWER_OFF, false);
406 pr_debug("%s: cpu0 up\n", __func__);
408 /* restore the MPU and CORE states to ON */
409 omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
410 omap_set_pwrdm_state(core_pd, PWRDM_POWER_ON);
412 wake_cpu1:
413 if (!cpu_is_offline(1)) {
415 * Work around a ROM bug that causes CPU1 to corrupt the
416 * gic distributor enable register on 4460 by disabling
417 * the gic distributor before waking CPU1, and then waiting
418 * for CPU1 to re-enable the gic distributor before continuing.
420 if (!cpu_is_omap443x())
421 gic_dist_disable();
423 clkdm_wakeup(cpu1_cd);
425 if (!cpu_is_omap443x())
426 while (gic_dist_disabled())
427 cpu_relax();
430 * cpu1 mucks with page tables while it is starting,
431 * prevent cpu0 executing any processes until cpu1 is up
433 while (omap4_idle_requested_cx[1] && omap4_idle_ready_count)
434 cpu_relax();
437 out:
438 cpu_pm_exit();
440 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
444 * omap4_enter_idle_secondary
445 * @cpu: target cpu number
447 * Puts target cpu powerdomain into OFF.
449 static void omap4_enter_idle_secondary(int cpu)
451 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
453 cpu_pm_enter();
455 pr_debug("%s: cpu1 down\n", __func__);
456 flush_cache_all();
457 dsb();
459 /* TODO: merge CPU1 wakeup masks into CPU0 */
460 omap_wakeupgen_irqmask_all(cpu, 1);
461 gic_cpu_disable();
463 if (!skip_off)
464 omap4_enter_lowpower(cpu, PWRDM_POWER_OFF);
466 omap_wakeupgen_irqmask_all(cpu, 0);
467 gic_cpu_enable();
469 pr_debug("%s: cpu1 up\n", __func__);
471 cpu_pm_exit();
473 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
477 * omap4_enter_idle - Programs OMAP4 to enter the specified state
478 * @dev: cpuidle device
479 * @state: The target state to be programmed
481 * Called from the CPUidle framework to program the device to the
482 * specified low power state selected by the governor.
483 * Called with irqs off, returns with irqs on.
484 * Returns the amount of time spent in the low power state.
486 static int omap4_enter_idle(struct cpuidle_device *dev,
487 struct cpuidle_state *state)
489 struct omap4_processor_cx *cx = cpuidle_get_statedata(state);
490 struct omap4_processor_cx *actual_cx;
491 ktime_t preidle, postidle;
492 bool idle = true;
493 int cpu = dev->cpu;
496 * If disallow_smp_idle is set, revert to the old hotplug governor
497 * behavior
499 if (dev->cpu != 0 && disallow_smp_idle)
500 return omap4_enter_idle_wfi(dev, state);
502 /* Clamp the power state at max_state */
503 if (max_state > 0 && (cx->type > max_state - 1))
504 cx = &omap4_power_states[max_state - 1];
507 * If only_state is set, use wfi if asking for a shallower idle state,
508 * or the specified state if asking for a deeper idle state
510 if (only_state > 0) {
511 if (cx->type < only_state - 1)
512 cx = &omap4_power_states[OMAP4_STATE_C1];
513 else
514 cx = &omap4_power_states[only_state - 1];
517 if (cx->type == OMAP4_STATE_C1)
518 return omap4_enter_idle_wfi(dev, state);
520 preidle = ktime_get();
522 local_fiq_disable();
524 actual_cx = &omap4_power_states[OMAP4_STATE_C1];
526 spin_lock(&omap4_idle_lock);
527 omap4_cpu_update_state(cpu, cx);
529 /* Wait for both cpus to be idle, exiting if an interrupt occurs */
530 while (idle && !omap4_all_cpus_idle()) {
531 spin_unlock(&omap4_idle_lock);
532 idle = omap4_idle_wait();
533 spin_lock(&omap4_idle_lock);
537 * If we waited for longer than a millisecond, pop out to the governor
538 * to let it recalculate the desired state.
540 if (ktime_to_us(ktime_sub(preidle, ktime_get())) > 1000)
541 idle = false;
543 if (!idle) {
544 omap4_cpu_update_state(cpu, NULL);
545 spin_unlock(&omap4_idle_lock);
546 goto out;
550 * If we go to sleep with an IPI pending, we will lose it. Once we
551 * reach this point, the other cpu is either already idle or will
552 * shortly abort idle. If it is already idle it can't send us an IPI,
553 * so it is safe to check for pending IPIs here. If it aborts idle
554 * we will abort as well, and any future IPIs will be processed.
556 if (omap4_gic_interrupt_pending()) {
557 omap4_cpu_update_state(cpu, NULL);
558 spin_unlock(&omap4_idle_lock);
559 goto out;
563 * Both cpus are probably idle. There is a small chance the other cpu
564 * just became active. cpu 0 will set omap4_idle_ready_count to 1,
565 * then each other cpu will increment it. Once a cpu has incremented
566 * the count, it cannot abort idle and must spin until either the count
567 * has hit num_online_cpus(), or is reset to 0 by an aborting cpu.
569 if (cpu == 0) {
570 BUG_ON(omap4_idle_ready_count != 0);
571 /* cpu0 requests shared-OFF */
572 omap4_idle_ready_count = 1;
573 /* cpu0 can no longer abort shared-OFF, but cpu1 can */
575 /* wait for cpu1 to ack shared-OFF, or leave idle */
576 while (omap4_idle_ready_count != num_online_cpus() &&
577 omap4_idle_ready_count != 0 && omap4_all_cpus_idle()) {
578 spin_unlock(&omap4_idle_lock);
579 cpu_relax();
580 spin_lock(&omap4_idle_lock);
583 if (omap4_idle_ready_count != num_online_cpus() ||
584 !omap4_all_cpus_idle()) {
585 pr_debug("%s: cpu1 aborted: %d %p\n", __func__,
586 omap4_idle_ready_count,
587 omap4_idle_requested_cx[1]);
588 omap4_idle_ready_count = 0;
589 omap4_cpu_update_state(cpu, NULL);
590 spin_unlock(&omap4_idle_lock);
591 goto out;
594 actual_cx = omap4_get_idle_state();
595 spin_unlock(&omap4_idle_lock);
597 /* cpu1 is turning itself off, continue with turning cpu0 off */
599 omap4_enter_idle_primary(actual_cx);
601 spin_lock(&omap4_idle_lock);
602 omap4_idle_ready_count = 0;
603 omap4_cpu_update_state(cpu, NULL);
604 spin_unlock(&omap4_idle_lock);
605 } else {
606 /* wait for cpu0 to request the shared-OFF, or leave idle */
607 while ((omap4_idle_ready_count == 0) && omap4_all_cpus_idle()) {
608 spin_unlock(&omap4_idle_lock);
609 cpu_relax();
610 spin_lock(&omap4_idle_lock);
613 if (!omap4_all_cpus_idle()) {
614 pr_debug("%s: cpu0 aborted: %d %p\n", __func__,
615 omap4_idle_ready_count,
616 omap4_idle_requested_cx[0]);
617 omap4_cpu_update_state(cpu, NULL);
618 spin_unlock(&omap4_idle_lock);
619 goto out;
622 pr_debug("%s: cpu1 acks\n", __func__);
623 /* ack shared-OFF */
624 if (omap4_idle_ready_count > 0)
625 omap4_idle_ready_count++;
626 BUG_ON(omap4_idle_ready_count > num_online_cpus());
628 while (omap4_idle_ready_count != num_online_cpus() &&
629 omap4_idle_ready_count != 0) {
630 spin_unlock(&omap4_idle_lock);
631 cpu_relax();
632 spin_lock(&omap4_idle_lock);
635 if (omap4_idle_ready_count == 0) {
636 pr_debug("%s: cpu0 aborted: %d %p\n", __func__,
637 omap4_idle_ready_count,
638 omap4_idle_requested_cx[0]);
639 omap4_cpu_update_state(cpu, NULL);
640 spin_unlock(&omap4_idle_lock);
641 goto out;
644 /* cpu1 can no longer abort shared-OFF */
646 actual_cx = omap4_get_idle_state();
647 spin_unlock(&omap4_idle_lock);
649 omap4_enter_idle_secondary(cpu);
651 spin_lock(&omap4_idle_lock);
652 omap4_idle_ready_count = 0;
653 omap4_cpu_update_state(cpu, NULL);
654 spin_unlock(&omap4_idle_lock);
656 clkdm_allow_idle(cpu1_cd);
660 out:
661 postidle = ktime_get();
663 omap4_update_actual_state(dev, actual_cx);
665 local_irq_enable();
666 local_fiq_enable();
668 return ktime_to_us(ktime_sub(postidle, preidle));
671 DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
674 * omap4_init_power_states - Initialises the OMAP4 specific C states.
676 * Below is the desciption of each C state.
677 * C1 : CPUx wfi + MPU inative + Core inactive
679 static void omap4_init_power_states(
680 const struct cpuidle_params *cpuidle_params_table)
683 * C1 - CPUx WFI + MPU ON + CORE ON
685 omap4_power_states[OMAP4_STATE_C1].valid =
686 cpuidle_params_table[OMAP4_STATE_C1].valid;
687 omap4_power_states[OMAP4_STATE_C1].type = OMAP4_STATE_C1;
688 omap4_power_states[OMAP4_STATE_C1].exit_latency=
689 cpuidle_params_table[OMAP4_STATE_C1].exit_latency;
690 omap4_power_states[OMAP4_STATE_C1].target_residency =
691 cpuidle_params_table[OMAP4_STATE_C1].target_residency;
692 omap4_power_states[OMAP4_STATE_C1].desc = "CPU WFI";
695 * C2 - CPUx OFF + MPU INA + CORE INA
697 omap4_power_states[OMAP4_STATE_C2].valid =
698 cpuidle_params_table[OMAP4_STATE_C2].valid;
699 omap4_power_states[OMAP4_STATE_C2].type = OMAP4_STATE_C2;
700 omap4_power_states[OMAP4_STATE_C2].exit_latency =
701 cpuidle_params_table[OMAP4_STATE_C2].exit_latency;
702 omap4_power_states[OMAP4_STATE_C2].target_residency =
703 cpuidle_params_table[OMAP4_STATE_C2].target_residency;
704 omap4_power_states[OMAP4_STATE_C2].mpu_state = PWRDM_POWER_INACTIVE;
705 omap4_power_states[OMAP4_STATE_C2].mpu_logic_state = PWRDM_POWER_RET;
706 omap4_power_states[OMAP4_STATE_C2].core_state = PWRDM_POWER_INACTIVE;
707 omap4_power_states[OMAP4_STATE_C2].core_logic_state = PWRDM_POWER_RET;
708 omap4_power_states[OMAP4_STATE_C2].desc =
709 "CPUs OFF, MPU INA + CORE INA";
712 * C3 - CPUx OFF + MPU CSWR + CORE OSWR
714 omap4_power_states[OMAP4_STATE_C3].valid =
715 cpuidle_params_table[OMAP4_STATE_C3].valid;
716 omap4_power_states[OMAP4_STATE_C3].type = OMAP4_STATE_C3;
717 omap4_power_states[OMAP4_STATE_C3].exit_latency =
718 cpuidle_params_table[OMAP4_STATE_C3].exit_latency;
719 omap4_power_states[OMAP4_STATE_C3].target_residency =
720 cpuidle_params_table[OMAP4_STATE_C3].target_residency;
721 omap4_power_states[OMAP4_STATE_C3].mpu_state = PWRDM_POWER_RET;
722 omap4_power_states[OMAP4_STATE_C3].mpu_logic_state = PWRDM_POWER_RET;
723 omap4_power_states[OMAP4_STATE_C3].core_state = PWRDM_POWER_RET;
724 omap4_power_states[OMAP4_STATE_C3].core_logic_state = PWRDM_POWER_OFF;
725 omap4_power_states[OMAP4_STATE_C3].desc =
726 "CPUs OFF, MPU CSWR + CORE OSWR";
729 * C4 - CPUx OFF + MPU OSWR + CORE OSWR
731 omap4_power_states[OMAP4_STATE_C4].valid =
732 cpuidle_params_table[OMAP4_STATE_C4].valid;
733 omap4_power_states[OMAP4_STATE_C4].type = OMAP4_STATE_C4;
734 omap4_power_states[OMAP4_STATE_C4].exit_latency =
735 cpuidle_params_table[OMAP4_STATE_C4].exit_latency;
736 omap4_power_states[OMAP4_STATE_C4].target_residency =
737 cpuidle_params_table[OMAP4_STATE_C4].target_residency;
738 omap4_power_states[OMAP4_STATE_C4].mpu_state = PWRDM_POWER_RET;
739 omap4_power_states[OMAP4_STATE_C4].mpu_logic_state = PWRDM_POWER_OFF;
740 omap4_power_states[OMAP4_STATE_C4].core_state = PWRDM_POWER_RET;
741 omap4_power_states[OMAP4_STATE_C4].core_logic_state = PWRDM_POWER_OFF;
742 omap4_power_states[OMAP4_STATE_C4].desc =
743 "CPUs OFF, MPU OSWR + CORE OSWR";
747 struct cpuidle_driver omap4_idle_driver = {
748 .name = "omap4_idle",
749 .owner = THIS_MODULE,
753 * omap4_idle_init - Init routine for OMAP4 idle
755 * Registers the OMAP4 specific cpuidle driver with the cpuidle
756 * framework with the valid set of states.
758 int __init omap4_idle_init(void)
760 int cpu_id = 0, i, count = 0;
761 struct omap4_processor_cx *cx;
762 struct cpuidle_state *state;
763 struct cpuidle_device *dev;
764 const struct cpuidle_params *idle_params;
766 mpu_pd = pwrdm_lookup("mpu_pwrdm");
767 BUG_ON(!mpu_pd);
768 cpu1_pd = pwrdm_lookup("cpu1_pwrdm");
769 BUG_ON(!cpu1_pd);
770 cpu1_cd = clkdm_lookup("mpu1_clkdm");
771 BUG_ON(!cpu1_cd);
772 core_pd = pwrdm_lookup("core_pwrdm");
773 BUG_ON(!core_pd);
775 if (cpu_is_omap443x())
776 idle_params = omap443x_cpuidle_params_table;
777 else if (cpu_is_omap446x())
778 idle_params = omap446x_cpuidle_params_table;
779 else
780 idle_params = omap447x_cpuidle_params_table;
782 omap4_init_power_states(idle_params);
784 cpuidle_register_driver(&omap4_idle_driver);
786 for_each_possible_cpu(cpu_id) {
787 dev = &per_cpu(omap4_idle_dev, cpu_id);
788 dev->cpu = cpu_id;
789 count = 0;
790 for (i = OMAP4_STATE_C1; i < OMAP4_MAX_STATES; i++) {
791 cx = &omap4_power_states[i];
792 state = &dev->states[count];
794 if (!cx->valid)
795 continue;
796 cpuidle_set_statedata(state, cx);
797 state->exit_latency = cx->exit_latency;
798 state->target_residency = cx->target_residency;
799 state->flags = CPUIDLE_FLAG_TIME_VALID;
800 if (cx->type == OMAP4_STATE_C1) {
801 dev->safe_state = state;
802 state->enter = omap4_enter_idle_wfi;
803 } else {
804 state->enter = omap4_enter_idle;
807 sprintf(state->name, "C%d", count+1);
808 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
809 count++;
812 if (!count)
813 return -EINVAL;
814 dev->state_count = count;
816 if (cpuidle_register_device(dev)) {
817 pr_err("%s: CPUidle register device failed\n", __func__);
818 return -EIO;
821 __raw_writeb(BIT(cpu_id), omap4_get_gic_dist_base() +
822 GIC_DIST_TARGET + omap4_poke_interrupt[cpu_id]);
825 return 0;
827 #else
828 int __init omap4_idle_init(void)
830 return 0;
832 #endif /* CONFIG_CPU_IDLE */