ARM: cpu topology: Add debugfs interface for cpu_power
[cmplus.git] / arch / arm / mach-omap2 / omap-wakeupgen.c
blob7567ee6888a4ed1033b11ed9e68985e1e2bdd13b
1 /*
2 * OMAP WakeupGen Source file
4 * The WakeupGen unit is responsible for generating wakeup event from the
5 * incoming interrupts and enable bits. The WakeupGen is implemented in MPU
6 * always-On power domain. The WakeupGen consists of two sub-units, one for
7 * each CPU and manages only SPI interrupts. Hardware requirements is that
8 * the GIC and WakeupGen should be kept in sync for proper operation.
10 * Copyright (C) 2011 Texas Instruments, Inc.
11 * Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/io.h>
21 #include <linux/irq.h>
22 #include <linux/platform_device.h>
24 #include <asm/hardware/gic.h>
26 #include <mach/omap-wakeupgen.h>
27 #include <mach/omap4-common.h>
29 #include "omap4-sar-layout.h"
31 #define NR_BANKS 4
32 #define MAX_IRQS 128
33 #define WKG_MASK_ALL 0x00000000
34 #define WKG_UNMASK_ALL 0xffffffff
35 #define CPU_ENA_OFFSET 0x400
36 #define CPU0_ID 0x0
37 #define CPU1_ID 0x1
39 /* WakeupGen Base addres */
40 static void __iomem *wakeupgen_base;
41 static void __iomem *sar_base;
42 static DEFINE_PER_CPU(u32 [NR_BANKS], irqmasks);
43 static DEFINE_SPINLOCK(wakeupgen_lock);
46 * Static helper functions
49 static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
51 return __raw_readl(wakeupgen_base + OMAP_WKG_ENB_A_0 +
52 (cpu * CPU_ENA_OFFSET) + (idx * 4));
55 static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
57 __raw_writel(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
58 (cpu * CPU_ENA_OFFSET) + (idx * 4));
61 static inline void sar_writel(u32 val, u32 offset, u8 idx)
63 __raw_writel(val, sar_base + offset + (idx * 4));
66 static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
68 u8 i;
70 for (i = 0; i < NR_BANKS; i++)
71 wakeupgen_writel(reg, i, cpu);
74 static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
76 unsigned int spi_irq;
79 * PPIs and SGIs are not supported
81 if (irq < OMAP44XX_IRQ_GIC_START)
82 return -EINVAL;
85 * Subtract the GIC offset
87 spi_irq = irq - OMAP44XX_IRQ_GIC_START;
88 if (spi_irq > MAX_IRQS) {
89 pr_err("omap wakeupGen: Invalid IRQ%d\n", irq);
90 return -EINVAL;
94 * Each wakeup gen register controls 32
95 * interrupts. i.e 1 bit per SPI IRQ
97 *reg_index = spi_irq >> 5;
98 *bit_posn = spi_irq %= 32;
100 return 0;
103 static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
105 u32 val, bit_number;
106 u8 i;
108 if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
109 return;
111 val = wakeupgen_readl(i, cpu);
112 val &= ~BIT(bit_number);
113 wakeupgen_writel(val, i, cpu);
116 static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
118 u32 val, bit_number;
119 u8 i;
121 if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
122 return;
124 val = wakeupgen_readl(i, cpu);
125 val |= BIT(bit_number);
126 wakeupgen_writel(val, i, cpu);
129 static void _wakeupgen_save_masks(unsigned int cpu)
131 u8 i;
133 for (i = 0; i < NR_BANKS; i++)
134 per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
137 static void _wakeupgen_restore_masks(unsigned int cpu)
139 u8 i;
141 for (i = 0; i < NR_BANKS; i++)
142 wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
146 * Architecture specific Mask extensiom
148 static void wakeupgen_mask(struct irq_data *d)
150 spin_lock(&wakeupgen_lock);
151 _wakeupgen_clear(d->irq, d->node);
152 spin_unlock(&wakeupgen_lock);
156 * Architecture specific Unmask extensiom
158 static void wakeupgen_unmask(struct irq_data *d)
160 spin_lock(&wakeupgen_lock);
161 _wakeupgen_set(d->irq, d->node);
162 spin_unlock(&wakeupgen_lock);
166 * omap_wakeupgen_irqmask_all() - Mask or unmask interrupts
167 * @cpu - CPU ID
168 * @set - The IRQ register mask.
169 * 0 = Mask all interrupts on the 'cpu'
170 * 1 = Unmask all interrupts on the 'cpu'
172 * Ensure that the initial mask is maintained. This is faster than
173 * iterating through GIC rgeisters to arrive at the correct masks
175 void omap_wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
177 if (omap_rev() == OMAP4430_REV_ES1_0)
178 return;
180 spin_lock(&wakeupgen_lock);
181 if (set) {
182 _wakeupgen_save_masks(cpu);
183 _wakeupgen_set_all(cpu, WKG_MASK_ALL);
184 } else {
185 _wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
186 _wakeupgen_restore_masks(cpu);
188 spin_unlock(&wakeupgen_lock);
191 #ifdef CONFIG_PM
193 * Masking wakeup irqs is handled by the IRQCHIP_MASK_ON_SUSPEND flag,
194 * so no action is necessary in set_wake, but implement an empty handler
195 * here to prevent enable_irq_wake() returning an error.
197 static int wakeupgen_set_wake(struct irq_data *d, unsigned int on)
199 return 0;
201 #else
202 #define wakeupgen_set_wake NULL
203 #endif
206 * Initialse the wakeupgen module
208 int __init omap_wakeupgen_init(void)
210 u8 i;
212 /* Not supported on on OMAP4 ES1.0 silicon */
213 if (omap_rev() == OMAP4430_REV_ES1_0) {
214 WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
215 return -EPERM;
218 /* Static mapping, never released */
219 wakeupgen_base = ioremap(OMAP44XX_WKUPGEN_BASE, SZ_4K);
220 if (WARN_ON(!wakeupgen_base))
221 return -ENODEV;
223 /* Clear all IRQ bitmasks at wakeupGen level */
224 for (i = 0; i < NR_BANKS; i++) {
225 wakeupgen_writel(0, i, CPU0_ID);
226 wakeupgen_writel(0, i, CPU1_ID);
230 * Override gic architecture specific fucntioms to add
231 * OMAP WakeupGen interrupt controller along with GIC
233 gic_arch_extn.irq_mask = wakeupgen_mask;
234 gic_arch_extn.irq_unmask = wakeupgen_unmask;
235 gic_arch_extn.irq_set_wake = wakeupgen_set_wake;
236 gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND;
238 return 0;
242 * omap_wakeupgen_save() - WakeupGen context save function
244 * Save WakewupGen context in SAR BANK3. Restore is done by ROM code.
245 * WakeupGen IP is integrated along with GIC to manage the
246 * interrupt wakeups from CPU low power states. It's located in
247 * always ON power domain. It manages masking/unmasking of
248 * Shared peripheral interrupts(SPI).So the interrupt enable/disable
249 * control should be in sync and consistent at WakeupGen and GIC so
250 * that interrupts are not lost. Hence GIC and WakeupGen are saved
251 * and restored together.
253 * During normal operation, WakeupGen delivers external interrupts
254 * directly to the GIC. When the CPU asserts StandbyWFI, indicating
255 * it wants to enter lowpower state, the Standby Controller checks
256 * with the WakeupGen unit using the idlereq/idleack handshake to make
257 * sure there is no incoming interrupts.
260 void omap_wakeupgen_save(void)
262 u8 i;
263 u32 val;
265 if (omap_rev() == OMAP4430_REV_ES1_0)
266 return;
268 if (!sar_base)
269 sar_base = omap4_get_sar_ram_base();
271 for (i = 0; i < NR_BANKS; i++) {
272 /* Save the CPUx interrupt mask for IRQ 0 to 127 */
273 val = wakeupgen_readl(i, 0);
274 sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
275 val = wakeupgen_readl(i, 1);
276 sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
279 * Disable the secure interrupts for CPUx. The restore
280 * code blindly restores secure and non-secure interrupt
281 * masks from SAR RAM. Secure interrupts are not suppose
282 * to be enabled from HLOS. So overwrite the SAR location
283 * so that the secure interrupt remains disabled.
285 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
286 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
289 /* Save AuxBoot* registers */
290 val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
291 __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
292 val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
293 __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
295 /* Save SyncReq generation logic */
296 val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
297 __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
298 val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
299 __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
301 /* Save SyncReq generation logic */
302 val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
303 __raw_writel(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
304 val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
305 __raw_writel(val, sar_base + PTMSYNCREQ_EN_OFFSET);
307 /* Set the Backup Bit Mask status */
308 val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
309 val |= SAR_BACKUP_STATUS_WAKEUPGEN;
310 __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);