2 * Copyright (c) 2012 Linaro : Daniel Lezcano <daniel.lezcano@linaro.org> (IBM)
4 * Based on the work of Rickard Andersson <rickard.andersson@stericsson.com>
5 * and Jonas Aaberg <jonas.aberg@stericsson.com>.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/cpuidle.h>
14 #include <linux/clockchips.h>
15 #include <linux/spinlock.h>
16 #include <linux/atomic.h>
17 #include <linux/smp.h>
18 #include <linux/mfd/dbx500-prcmu.h>
20 #include <asm/cpuidle.h>
21 #include <asm/proc-fns.h>
23 static atomic_t master
= ATOMIC_INIT(0);
24 static DEFINE_SPINLOCK(master_lock
);
25 static DEFINE_PER_CPU(struct cpuidle_device
, ux500_cpuidle_device
);
27 static inline int ux500_enter_idle(struct cpuidle_device
*dev
,
28 struct cpuidle_driver
*drv
, int index
)
30 int this_cpu
= smp_processor_id();
31 bool recouple
= false;
33 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER
, &this_cpu
);
35 if (atomic_inc_return(&master
) == num_online_cpus()) {
37 /* With this lock, we prevent the other cpu to exit and enter
38 * this function again and become the master */
39 if (!spin_trylock(&master_lock
))
42 /* decouple the gic from the A9 cores */
43 if (prcmu_gic_decouple())
46 /* If an error occur, we will have to recouple the gic
50 /* At this state, as the gic is decoupled, if the other
51 * cpu is in WFI, we have the guarantee it won't be wake
52 * up, so we can safely go to retention */
53 if (!prcmu_is_cpu_in_wfi(this_cpu
? 0 : 1))
56 /* The prcmu will be in charge of watching the interrupts
57 * and wake up the cpus */
58 if (prcmu_copy_gic_settings())
61 /* Check in the meantime an interrupt did
62 * not occur on the gic ... */
63 if (prcmu_gic_pending_irq())
66 /* ... and the prcmu */
67 if (prcmu_pending_irq())
70 /* Go to the retention state, the prcmu will wait for the
71 * cpu to go WFI and this is what happens after exiting this
72 * 'master' critical section */
73 if (prcmu_set_power_state(PRCMU_AP_IDLE
, true, true))
76 /* When we switch to retention, the prcmu is in charge
77 * of recoupling the gic automatically */
80 spin_unlock(&master_lock
);
89 spin_unlock(&master_lock
);
92 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT
, &this_cpu
);
97 static struct cpuidle_driver ux500_idle_driver
= {
100 .en_core_tk_irqen
= 1,
102 ARM_CPUIDLE_WFI_STATE
,
104 .enter
= ux500_enter_idle
,
106 .target_residency
= 260,
107 .flags
= CPUIDLE_FLAG_TIME_VALID
,
109 .desc
= "ARM Retention",
112 .safe_state_index
= 0,
117 * For each cpu, setup the broadcast timer because we will
118 * need to migrate the timers for the states >= ApIdle.
120 static void ux500_setup_broadcast_timer(void *arg
)
122 int cpu
= smp_processor_id();
123 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON
, &cpu
);
126 int __init
ux500_idle_init(void)
129 struct cpuidle_device
*device
;
131 /* Configure wake up reasons */
132 prcmu_enable_wakeups(PRCMU_WAKEUP(ARM
) | PRCMU_WAKEUP(RTC
) |
136 * Configure the timer broadcast for each cpu, that must
137 * be done from the cpu context, so we use a smp cross
138 * call with 'on_each_cpu'.
140 on_each_cpu(ux500_setup_broadcast_timer
, NULL
, 1);
142 ret
= cpuidle_register_driver(&ux500_idle_driver
);
144 printk(KERN_ERR
"failed to register ux500 idle driver\n");
148 for_each_online_cpu(cpu
) {
149 device
= &per_cpu(ux500_cpuidle_device
, cpu
);
151 ret
= cpuidle_register_device(device
);
153 printk(KERN_ERR
"Failed to register cpuidle "
154 "device for cpu%d\n", cpu
);
162 for_each_online_cpu(cpu
) {
163 device
= &per_cpu(ux500_cpuidle_device
, cpu
);
164 cpuidle_unregister_device(device
);
167 cpuidle_unregister_driver(&ux500_idle_driver
);
171 device_initcall(ux500_idle_init
);