2 * OMAP4 CPU idle Routines
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Rajendra Nayak <rnayak@ti.com>
6 * Santosh Shilimkar <santosh.shilimkar@ti.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/cpuidle.h>
17 #include <linux/clockchips.h>
18 #include <linux/notifier.h>
19 #include <linux/cpu.h>
20 #include <linux/delay.h>
21 #include <linux/cpu_pm.h>
23 #include <asm/cacheflush.h>
24 #include <asm/proc-fns.h>
25 #include <asm/hardware/gic.h>
27 #include <mach/omap4-common.h>
28 #include <mach/omap-wakeupgen.h>
30 #include <plat/gpio.h>
32 #include "clockdomain.h"
36 #ifdef CONFIG_CPU_IDLE
38 #ifdef CONFIG_OMAP_ALLOW_OSWR
39 #define CPU_IDLE_ALLOW_OSWR 1
41 #define CPU_IDLE_ALLOW_OSWR 0
44 /* C1 is a single-cpu C-state, it can be entered by each cpu independently */
45 /* C1 - CPUx WFI + MPU ON + CORE ON */
46 #define OMAP4_STATE_C1 0
47 /* C2 through C4 are shared C-states, both CPUs must agree to enter */
48 /* C2 - CPUx OFF + MPU INA + CORE INA */
49 #define OMAP4_STATE_C2 1
50 /* C3 - CPUx OFF + MPU CSWR + CORE OSWR */
51 #define OMAP4_STATE_C3 2
52 /* C4 - CPUx OFF + MPU OSWR + CORE OSWR */
53 #define OMAP4_STATE_C4 3
55 #define OMAP4_MAX_STATES 4
57 static bool disallow_smp_idle
;
58 module_param(disallow_smp_idle
, bool, S_IRUGO
| S_IWUSR
);
59 MODULE_PARM_DESC(disallow_smp_idle
,
60 "Don't enter idle if multiple cpus are active");
63 module_param(skip_off
, bool, S_IRUGO
| S_IWUSR
);
64 MODULE_PARM_DESC(skip_off
,
65 "Do everything except actually enter the low power state (debugging)");
67 static bool keep_core_on
;
68 module_param(keep_core_on
, bool, S_IRUGO
| S_IWUSR
);
69 MODULE_PARM_DESC(keep_core_on
,
70 "Prevent core powerdomain from entering any low power states (debugging)");
72 static bool keep_mpu_on
;
73 module_param(keep_mpu_on
, bool, S_IRUGO
| S_IWUSR
);
74 MODULE_PARM_DESC(keep_mpu_on
,
75 "Prevent mpu powerdomain from entering any low power states (debugging)");
78 module_param(max_state
, int, S_IRUGO
| S_IWUSR
);
79 MODULE_PARM_DESC(max_state
,
80 "Select deepest power state allowed (0=any, 1=WFI, 2=INA, 3=CSWR, 4=OSWR)");
82 static int only_state
;
83 module_param(only_state
, int, S_IRUGO
| S_IWUSR
);
84 MODULE_PARM_DESC(only_state
,
85 "Select only power state allowed (0=any, 1=WFI, 2=INA, 3=CSWR, 4=OSWR)");
87 static const int omap4_poke_interrupt
[2] = {
88 OMAP44XX_IRQ_CPUIDLE_POKE0
,
89 OMAP44XX_IRQ_CPUIDLE_POKE1
92 struct omap4_processor_cx
{
100 u32 core_logic_state
;
104 struct omap4_processor_cx omap4_power_states
[OMAP4_MAX_STATES
];
105 static struct powerdomain
*mpu_pd
, *cpu1_pd
, *core_pd
;
106 static struct omap4_processor_cx
*omap4_idle_requested_cx
[NR_CPUS
];
107 static int omap4_idle_ready_count
;
108 static DEFINE_SPINLOCK(omap4_idle_lock
);
109 static struct clockdomain
*cpu1_cd
;
112 * Raw measured exit latency numbers (us):
119 static __initdata
struct cpuidle_params omap443x_cpuidle_params_table
[] = {
120 /* C1 - CPUx WFI + MPU ON + CORE ON */
123 .target_residency
= 4,
126 /* C2 - CPUx OFF + MPU INA + CORE INA */
129 .target_residency
= 300,
132 /* C3 - CPUx OFF + MPU CSWR + CORE OSWR */
134 .exit_latency
= 5000,
135 .target_residency
= 10000,
138 /* C4 - CPUx OFF + MPU CSWR + CORE OSWR */
140 .exit_latency
= 5200,
141 .target_residency
= 35000,
142 .valid
= CPU_IDLE_ALLOW_OSWR
,
146 static __initdata
struct cpuidle_params omap446x_cpuidle_params_table
[] = {
147 /* C1 - CPUx WFI + MPU ON + CORE ON */
150 .target_residency
= 4,
153 /* C2 - CPUx OFF + MPU INA + CORE INA */
156 .target_residency
= 1800,
159 /* C3 - CPUx OFF + MPU CSWR + CORE OSWR */
161 .exit_latency
= 1200,
162 .target_residency
= 4000,
165 /* C4 - CPUx OFF + MPU CSWR + CORE OSWR */
167 .exit_latency
= 1400,
168 .target_residency
= 4200,
169 .valid
= CPU_IDLE_ALLOW_OSWR
,
173 static __initdata
struct cpuidle_params omap447x_cpuidle_params_table
[] = {
174 /* C1 - CPUx WFI + MPU ON + CORE ON */
177 .target_residency
= 4,
180 /* C2 - CPUx OFF + MPU INA + CORE INA */
183 .target_residency
= 1200,
186 /* C3 - CPUx OFF + MPU CSWR + CORE OSWR */
188 .exit_latency
= 5300,
189 .target_residency
= 5300,
192 /* C4 - CPUx OFF + MPU CSWR + CORE OSWR */
194 .exit_latency
= 5500,
195 .target_residency
= 15000,
196 .valid
= CPU_IDLE_ALLOW_OSWR
,
200 static void omap4_update_actual_state(struct cpuidle_device
*dev
,
201 struct omap4_processor_cx
*cx
)
205 for (i
= 0; i
< dev
->state_count
; i
++) {
206 if (dev
->states
[i
].driver_data
== cx
) {
207 dev
->last_state
= &dev
->states
[i
];
213 static bool omap4_gic_interrupt_pending(void)
215 void __iomem
*gic_cpu
= omap4_get_gic_cpu_base();
217 return (__raw_readl(gic_cpu
+ GIC_CPU_HIGHPRI
) != 0x3FF);
221 * omap4_wfi_until_interrupt
223 * wfi can sometimes return with no interrupts pending, for example on a
224 * broadcast cache flush or tlb op. This function will call wfi repeatedly
225 * until an interrupt is actually pending. Returning without looping would
226 * cause very short idle times to be reported to the idle governor, messing
227 * with repeating interrupt detection, and causing deep idle states to be
230 static void omap4_wfi_until_interrupt(void)
235 if (!omap4_gic_interrupt_pending())
242 * similar to WFE, but can be woken by an interrupt even though interrupts
243 * are masked. An "event" is emulated by per-cpu unused interrupt in the GIC.
244 * Returns false if wake caused by an interrupt, true if by an "event".
246 static bool omap4_idle_wait(void)
248 int cpu
= hard_smp_processor_id();
249 void __iomem
*gic_dist
= omap4_get_gic_dist_base();
250 u32 bit
= BIT(omap4_poke_interrupt
[cpu
] % 32);
251 u32 reg
= (omap4_poke_interrupt
[cpu
] / 32) * 4;
254 /* Unmask the "event" interrupt */
255 __raw_writel(bit
, gic_dist
+ GIC_DIST_ENABLE_SET
+ reg
);
257 omap4_wfi_until_interrupt();
259 /* Read the "event" interrupt pending bit */
260 poked
= __raw_readl(gic_dist
+ GIC_DIST_PENDING_SET
+ reg
) & bit
;
262 /* Mask the "event" */
263 __raw_writel(bit
, gic_dist
+ GIC_DIST_ENABLE_CLEAR
+ reg
);
265 /* Clear the event */
267 __raw_writel(bit
, gic_dist
+ GIC_DIST_PENDING_CLEAR
+ reg
);
276 * trigger an "event" to wake a cpu from omap4_idle_wait.
278 static void omap4_poke_cpu(int cpu
)
280 void __iomem
*gic_dist
= omap4_get_gic_dist_base();
281 u32 bit
= BIT(omap4_poke_interrupt
[cpu
] % 32);
282 u32 reg
= (omap4_poke_interrupt
[cpu
] / 32) * 4;
284 __raw_writel(bit
, gic_dist
+ GIC_DIST_PENDING_SET
+ reg
);
289 * @dev: cpuidle device
290 * @state: The target state to be programmed
292 * Idle function for C1 state, WFI on a single CPU.
293 * Called with irqs off, returns with irqs on.
294 * Returns the amount of time spent in the low power state.
296 static int omap4_enter_idle_wfi(struct cpuidle_device
*dev
,
297 struct cpuidle_state
*state
)
299 ktime_t preidle
, postidle
;
303 preidle
= ktime_get();
305 omap4_wfi_until_interrupt();
307 postidle
= ktime_get();
312 omap4_update_actual_state(dev
, &omap4_power_states
[OMAP4_STATE_C1
]);
314 return ktime_to_us(ktime_sub(postidle
, preidle
));
317 static inline bool omap4_all_cpus_idle(void)
321 assert_spin_locked(&omap4_idle_lock
);
323 for_each_online_cpu(i
)
324 if (omap4_idle_requested_cx
[i
] == NULL
)
330 static inline struct omap4_processor_cx
*omap4_get_idle_state(void)
332 struct omap4_processor_cx
*cx
= NULL
;
335 assert_spin_locked(&omap4_idle_lock
);
337 for_each_online_cpu(i
)
338 if (!cx
|| omap4_idle_requested_cx
[i
]->type
< cx
->type
)
339 cx
= omap4_idle_requested_cx
[i
];
344 static void omap4_cpu_poke_others(int cpu
)
348 for_each_online_cpu(i
)
353 static void omap4_cpu_update_state(int cpu
, struct omap4_processor_cx
*cx
)
355 assert_spin_locked(&omap4_idle_lock
);
357 omap4_idle_requested_cx
[cpu
] = cx
;
358 omap4_cpu_poke_others(cpu
);
362 * omap4_enter_idle_primary
363 * @cx: target idle state
365 * Waits for cpu1 to be off, then starts the transition to the target power
366 * state for cpu0, mpu and core power domains.
368 static void omap4_enter_idle_primary(struct omap4_processor_cx
*cx
)
374 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER
, &cpu
);
381 /* spin until cpu1 is really off */
382 while ((pwrdm_read_pwrst(cpu1_pd
) != PWRDM_POWER_OFF
) && count
--)
385 if (pwrdm_read_pwrst(cpu1_pd
) != PWRDM_POWER_OFF
)
388 ret
= pwrdm_wait_transition(cpu1_pd
);
393 pwrdm_set_logic_retst(mpu_pd
, cx
->mpu_logic_state
);
394 omap_set_pwrdm_state(mpu_pd
, cx
->mpu_state
);
398 pwrdm_set_logic_retst(core_pd
, cx
->core_logic_state
);
399 omap_set_pwrdm_state(core_pd
, cx
->core_state
);
402 pr_debug("%s: cpu0 down\n", __func__
);
404 omap4_enter_sleep(0, PWRDM_POWER_OFF
, false);
406 pr_debug("%s: cpu0 up\n", __func__
);
408 /* restore the MPU and CORE states to ON */
409 omap_set_pwrdm_state(mpu_pd
, PWRDM_POWER_ON
);
410 omap_set_pwrdm_state(core_pd
, PWRDM_POWER_ON
);
413 if (!cpu_is_offline(1)) {
415 * Work around a ROM bug that causes CPU1 to corrupt the
416 * gic distributor enable register on 4460 by disabling
417 * the gic distributor before waking CPU1, and then waiting
418 * for CPU1 to re-enable the gic distributor before continuing.
420 if (!cpu_is_omap443x())
423 clkdm_wakeup(cpu1_cd
);
425 if (!cpu_is_omap443x())
426 while (gic_dist_disabled())
430 * cpu1 mucks with page tables while it is starting,
431 * prevent cpu0 executing any processes until cpu1 is up
433 while (omap4_idle_requested_cx
[1] && omap4_idle_ready_count
)
440 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT
, &cpu
);
444 * omap4_enter_idle_secondary
445 * @cpu: target cpu number
447 * Puts target cpu powerdomain into OFF.
449 static void omap4_enter_idle_secondary(int cpu
)
451 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER
, &cpu
);
455 pr_debug("%s: cpu1 down\n", __func__
);
459 /* TODO: merge CPU1 wakeup masks into CPU0 */
460 omap_wakeupgen_irqmask_all(cpu
, 1);
464 omap4_enter_lowpower(cpu
, PWRDM_POWER_OFF
);
466 omap_wakeupgen_irqmask_all(cpu
, 0);
469 pr_debug("%s: cpu1 up\n", __func__
);
473 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT
, &cpu
);
477 * omap4_enter_idle - Programs OMAP4 to enter the specified state
478 * @dev: cpuidle device
479 * @state: The target state to be programmed
481 * Called from the CPUidle framework to program the device to the
482 * specified low power state selected by the governor.
483 * Called with irqs off, returns with irqs on.
484 * Returns the amount of time spent in the low power state.
486 static int omap4_enter_idle(struct cpuidle_device
*dev
,
487 struct cpuidle_state
*state
)
489 struct omap4_processor_cx
*cx
= cpuidle_get_statedata(state
);
490 struct omap4_processor_cx
*actual_cx
;
491 ktime_t preidle
, postidle
;
496 * If disallow_smp_idle is set, revert to the old hotplug governor
499 if (dev
->cpu
!= 0 && disallow_smp_idle
)
500 return omap4_enter_idle_wfi(dev
, state
);
502 /* Clamp the power state at max_state */
503 if (max_state
> 0 && (cx
->type
> max_state
- 1))
504 cx
= &omap4_power_states
[max_state
- 1];
507 * If only_state is set, use wfi if asking for a shallower idle state,
508 * or the specified state if asking for a deeper idle state
510 if (only_state
> 0) {
511 if (cx
->type
< only_state
- 1)
512 cx
= &omap4_power_states
[OMAP4_STATE_C1
];
514 cx
= &omap4_power_states
[only_state
- 1];
517 if (cx
->type
== OMAP4_STATE_C1
)
518 return omap4_enter_idle_wfi(dev
, state
);
520 preidle
= ktime_get();
524 actual_cx
= &omap4_power_states
[OMAP4_STATE_C1
];
526 spin_lock(&omap4_idle_lock
);
527 omap4_cpu_update_state(cpu
, cx
);
529 /* Wait for both cpus to be idle, exiting if an interrupt occurs */
530 while (idle
&& !omap4_all_cpus_idle()) {
531 spin_unlock(&omap4_idle_lock
);
532 idle
= omap4_idle_wait();
533 spin_lock(&omap4_idle_lock
);
537 * If we waited for longer than a millisecond, pop out to the governor
538 * to let it recalculate the desired state.
540 if (ktime_to_us(ktime_sub(preidle
, ktime_get())) > 1000)
544 omap4_cpu_update_state(cpu
, NULL
);
545 spin_unlock(&omap4_idle_lock
);
550 * If we go to sleep with an IPI pending, we will lose it. Once we
551 * reach this point, the other cpu is either already idle or will
552 * shortly abort idle. If it is already idle it can't send us an IPI,
553 * so it is safe to check for pending IPIs here. If it aborts idle
554 * we will abort as well, and any future IPIs will be processed.
556 if (omap4_gic_interrupt_pending()) {
557 omap4_cpu_update_state(cpu
, NULL
);
558 spin_unlock(&omap4_idle_lock
);
563 * Both cpus are probably idle. There is a small chance the other cpu
564 * just became active. cpu 0 will set omap4_idle_ready_count to 1,
565 * then each other cpu will increment it. Once a cpu has incremented
566 * the count, it cannot abort idle and must spin until either the count
567 * has hit num_online_cpus(), or is reset to 0 by an aborting cpu.
570 BUG_ON(omap4_idle_ready_count
!= 0);
571 /* cpu0 requests shared-OFF */
572 omap4_idle_ready_count
= 1;
573 /* cpu0 can no longer abort shared-OFF, but cpu1 can */
575 /* wait for cpu1 to ack shared-OFF, or leave idle */
576 while (omap4_idle_ready_count
!= num_online_cpus() &&
577 omap4_idle_ready_count
!= 0 && omap4_all_cpus_idle()) {
578 spin_unlock(&omap4_idle_lock
);
580 spin_lock(&omap4_idle_lock
);
583 if (omap4_idle_ready_count
!= num_online_cpus() ||
584 !omap4_all_cpus_idle()) {
585 pr_debug("%s: cpu1 aborted: %d %p\n", __func__
,
586 omap4_idle_ready_count
,
587 omap4_idle_requested_cx
[1]);
588 omap4_idle_ready_count
= 0;
589 omap4_cpu_update_state(cpu
, NULL
);
590 spin_unlock(&omap4_idle_lock
);
594 actual_cx
= omap4_get_idle_state();
595 spin_unlock(&omap4_idle_lock
);
597 /* cpu1 is turning itself off, continue with turning cpu0 off */
599 omap4_enter_idle_primary(actual_cx
);
601 spin_lock(&omap4_idle_lock
);
602 omap4_idle_ready_count
= 0;
603 omap4_cpu_update_state(cpu
, NULL
);
604 spin_unlock(&omap4_idle_lock
);
606 /* wait for cpu0 to request the shared-OFF, or leave idle */
607 while ((omap4_idle_ready_count
== 0) && omap4_all_cpus_idle()) {
608 spin_unlock(&omap4_idle_lock
);
610 spin_lock(&omap4_idle_lock
);
613 if (!omap4_all_cpus_idle()) {
614 pr_debug("%s: cpu0 aborted: %d %p\n", __func__
,
615 omap4_idle_ready_count
,
616 omap4_idle_requested_cx
[0]);
617 omap4_cpu_update_state(cpu
, NULL
);
618 spin_unlock(&omap4_idle_lock
);
622 pr_debug("%s: cpu1 acks\n", __func__
);
624 if (omap4_idle_ready_count
> 0)
625 omap4_idle_ready_count
++;
626 BUG_ON(omap4_idle_ready_count
> num_online_cpus());
628 while (omap4_idle_ready_count
!= num_online_cpus() &&
629 omap4_idle_ready_count
!= 0) {
630 spin_unlock(&omap4_idle_lock
);
632 spin_lock(&omap4_idle_lock
);
635 if (omap4_idle_ready_count
== 0) {
636 pr_debug("%s: cpu0 aborted: %d %p\n", __func__
,
637 omap4_idle_ready_count
,
638 omap4_idle_requested_cx
[0]);
639 omap4_cpu_update_state(cpu
, NULL
);
640 spin_unlock(&omap4_idle_lock
);
644 /* cpu1 can no longer abort shared-OFF */
646 actual_cx
= omap4_get_idle_state();
647 spin_unlock(&omap4_idle_lock
);
649 omap4_enter_idle_secondary(cpu
);
651 spin_lock(&omap4_idle_lock
);
652 omap4_idle_ready_count
= 0;
653 omap4_cpu_update_state(cpu
, NULL
);
654 spin_unlock(&omap4_idle_lock
);
656 clkdm_allow_idle(cpu1_cd
);
661 postidle
= ktime_get();
663 omap4_update_actual_state(dev
, actual_cx
);
668 return ktime_to_us(ktime_sub(postidle
, preidle
));
671 DEFINE_PER_CPU(struct cpuidle_device
, omap4_idle_dev
);
674 * omap4_init_power_states - Initialises the OMAP4 specific C states.
676 * Below is the desciption of each C state.
677 * C1 : CPUx wfi + MPU inative + Core inactive
679 static void omap4_init_power_states(
680 const struct cpuidle_params
*cpuidle_params_table
)
683 * C1 - CPUx WFI + MPU ON + CORE ON
685 omap4_power_states
[OMAP4_STATE_C1
].valid
=
686 cpuidle_params_table
[OMAP4_STATE_C1
].valid
;
687 omap4_power_states
[OMAP4_STATE_C1
].type
= OMAP4_STATE_C1
;
688 omap4_power_states
[OMAP4_STATE_C1
].exit_latency
=
689 cpuidle_params_table
[OMAP4_STATE_C1
].exit_latency
;
690 omap4_power_states
[OMAP4_STATE_C1
].target_residency
=
691 cpuidle_params_table
[OMAP4_STATE_C1
].target_residency
;
692 omap4_power_states
[OMAP4_STATE_C1
].desc
= "CPU WFI";
695 * C2 - CPUx OFF + MPU INA + CORE INA
697 omap4_power_states
[OMAP4_STATE_C2
].valid
=
698 cpuidle_params_table
[OMAP4_STATE_C2
].valid
;
699 omap4_power_states
[OMAP4_STATE_C2
].type
= OMAP4_STATE_C2
;
700 omap4_power_states
[OMAP4_STATE_C2
].exit_latency
=
701 cpuidle_params_table
[OMAP4_STATE_C2
].exit_latency
;
702 omap4_power_states
[OMAP4_STATE_C2
].target_residency
=
703 cpuidle_params_table
[OMAP4_STATE_C2
].target_residency
;
704 omap4_power_states
[OMAP4_STATE_C2
].mpu_state
= PWRDM_POWER_INACTIVE
;
705 omap4_power_states
[OMAP4_STATE_C2
].mpu_logic_state
= PWRDM_POWER_RET
;
706 omap4_power_states
[OMAP4_STATE_C2
].core_state
= PWRDM_POWER_INACTIVE
;
707 omap4_power_states
[OMAP4_STATE_C2
].core_logic_state
= PWRDM_POWER_RET
;
708 omap4_power_states
[OMAP4_STATE_C2
].desc
=
709 "CPUs OFF, MPU INA + CORE INA";
712 * C3 - CPUx OFF + MPU CSWR + CORE OSWR
714 omap4_power_states
[OMAP4_STATE_C3
].valid
=
715 cpuidle_params_table
[OMAP4_STATE_C3
].valid
;
716 omap4_power_states
[OMAP4_STATE_C3
].type
= OMAP4_STATE_C3
;
717 omap4_power_states
[OMAP4_STATE_C3
].exit_latency
=
718 cpuidle_params_table
[OMAP4_STATE_C3
].exit_latency
;
719 omap4_power_states
[OMAP4_STATE_C3
].target_residency
=
720 cpuidle_params_table
[OMAP4_STATE_C3
].target_residency
;
721 omap4_power_states
[OMAP4_STATE_C3
].mpu_state
= PWRDM_POWER_RET
;
722 omap4_power_states
[OMAP4_STATE_C3
].mpu_logic_state
= PWRDM_POWER_RET
;
723 omap4_power_states
[OMAP4_STATE_C3
].core_state
= PWRDM_POWER_RET
;
724 omap4_power_states
[OMAP4_STATE_C3
].core_logic_state
= PWRDM_POWER_OFF
;
725 omap4_power_states
[OMAP4_STATE_C3
].desc
=
726 "CPUs OFF, MPU CSWR + CORE OSWR";
729 * C4 - CPUx OFF + MPU OSWR + CORE OSWR
731 omap4_power_states
[OMAP4_STATE_C4
].valid
=
732 cpuidle_params_table
[OMAP4_STATE_C4
].valid
;
733 omap4_power_states
[OMAP4_STATE_C4
].type
= OMAP4_STATE_C4
;
734 omap4_power_states
[OMAP4_STATE_C4
].exit_latency
=
735 cpuidle_params_table
[OMAP4_STATE_C4
].exit_latency
;
736 omap4_power_states
[OMAP4_STATE_C4
].target_residency
=
737 cpuidle_params_table
[OMAP4_STATE_C4
].target_residency
;
738 omap4_power_states
[OMAP4_STATE_C4
].mpu_state
= PWRDM_POWER_RET
;
739 omap4_power_states
[OMAP4_STATE_C4
].mpu_logic_state
= PWRDM_POWER_OFF
;
740 omap4_power_states
[OMAP4_STATE_C4
].core_state
= PWRDM_POWER_RET
;
741 omap4_power_states
[OMAP4_STATE_C4
].core_logic_state
= PWRDM_POWER_OFF
;
742 omap4_power_states
[OMAP4_STATE_C4
].desc
=
743 "CPUs OFF, MPU OSWR + CORE OSWR";
747 struct cpuidle_driver omap4_idle_driver
= {
748 .name
= "omap4_idle",
749 .owner
= THIS_MODULE
,
753 * omap4_idle_init - Init routine for OMAP4 idle
755 * Registers the OMAP4 specific cpuidle driver with the cpuidle
756 * framework with the valid set of states.
758 int __init
omap4_idle_init(void)
760 int cpu_id
= 0, i
, count
= 0;
761 struct omap4_processor_cx
*cx
;
762 struct cpuidle_state
*state
;
763 struct cpuidle_device
*dev
;
764 const struct cpuidle_params
*idle_params
;
766 mpu_pd
= pwrdm_lookup("mpu_pwrdm");
768 cpu1_pd
= pwrdm_lookup("cpu1_pwrdm");
770 cpu1_cd
= clkdm_lookup("mpu1_clkdm");
772 core_pd
= pwrdm_lookup("core_pwrdm");
775 if (cpu_is_omap443x())
776 idle_params
= omap443x_cpuidle_params_table
;
777 else if (cpu_is_omap446x())
778 idle_params
= omap446x_cpuidle_params_table
;
780 idle_params
= omap447x_cpuidle_params_table
;
782 omap4_init_power_states(idle_params
);
784 cpuidle_register_driver(&omap4_idle_driver
);
786 for_each_possible_cpu(cpu_id
) {
787 dev
= &per_cpu(omap4_idle_dev
, cpu_id
);
790 for (i
= OMAP4_STATE_C1
; i
< OMAP4_MAX_STATES
; i
++) {
791 cx
= &omap4_power_states
[i
];
792 state
= &dev
->states
[count
];
796 cpuidle_set_statedata(state
, cx
);
797 state
->exit_latency
= cx
->exit_latency
;
798 state
->target_residency
= cx
->target_residency
;
799 state
->flags
= CPUIDLE_FLAG_TIME_VALID
;
800 if (cx
->type
== OMAP4_STATE_C1
) {
801 dev
->safe_state
= state
;
802 state
->enter
= omap4_enter_idle_wfi
;
804 state
->enter
= omap4_enter_idle
;
807 sprintf(state
->name
, "C%d", count
+1);
808 strncpy(state
->desc
, cx
->desc
, CPUIDLE_DESC_LEN
);
814 dev
->state_count
= count
;
816 if (cpuidle_register_device(dev
)) {
817 pr_err("%s: CPUidle register device failed\n", __func__
);
821 __raw_writeb(BIT(cpu_id
), omap4_get_gic_dist_base() +
822 GIC_DIST_TARGET
+ omap4_poke_interrupt
[cpu_id
]);
828 int __init
omap4_idle_init(void)
832 #endif /* CONFIG_CPU_IDLE */