x86: Make the vdso2c compiler use the host architecture headers
[linux/fpc-iii.git] / arch / arm / mach-omap2 / omap-wakeupgen.c
blob0c47543865327270a0c18601d68dd493099b13e3
1 /*
2 * OMAP WakeupGen Source file
4 * OMAP WakeupGen is the interrupt controller extension used along
5 * with ARM GIC to wake the CPU out from low power states on
6 * external interrupts. It is responsible for generating wakeup
7 * event from the incoming interrupts and enable bits. It is
8 * implemented in MPU always ON power domain. During normal operation,
9 * WakeupGen delivers external interrupts directly to the GIC.
11 * Copyright (C) 2011 Texas Instruments, Inc.
12 * Santosh Shilimkar <santosh.shilimkar@ti.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/io.h>
22 #include <linux/irq.h>
23 #include <linux/irqchip.h>
24 #include <linux/irqdomain.h>
25 #include <linux/of_address.h>
26 #include <linux/platform_device.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/cpu_pm.h>
31 #include "omap-wakeupgen.h"
32 #include "omap-secure.h"
34 #include "soc.h"
35 #include "omap4-sar-layout.h"
36 #include "common.h"
37 #include "pm.h"
39 #define AM43XX_NR_REG_BANKS 7
40 #define AM43XX_IRQS 224
41 #define MAX_NR_REG_BANKS AM43XX_NR_REG_BANKS
42 #define MAX_IRQS AM43XX_IRQS
43 #define DEFAULT_NR_REG_BANKS 5
44 #define DEFAULT_IRQS 160
45 #define WKG_MASK_ALL 0x00000000
46 #define WKG_UNMASK_ALL 0xffffffff
47 #define CPU_ENA_OFFSET 0x400
48 #define CPU0_ID 0x0
49 #define CPU1_ID 0x1
50 #define OMAP4_NR_BANKS 4
51 #define OMAP4_NR_IRQS 128
53 static void __iomem *wakeupgen_base;
54 static void __iomem *sar_base;
55 static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
56 static unsigned int irq_target_cpu[MAX_IRQS];
57 static unsigned int irq_banks = DEFAULT_NR_REG_BANKS;
58 static unsigned int max_irqs = DEFAULT_IRQS;
59 static unsigned int omap_secure_apis;
62 * Static helper functions.
64 static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
66 return readl_relaxed(wakeupgen_base + OMAP_WKG_ENB_A_0 +
67 (cpu * CPU_ENA_OFFSET) + (idx * 4));
70 static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
72 writel_relaxed(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
73 (cpu * CPU_ENA_OFFSET) + (idx * 4));
76 static inline void sar_writel(u32 val, u32 offset, u8 idx)
78 writel_relaxed(val, sar_base + offset + (idx * 4));
81 static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
84 * Each WakeupGen register controls 32 interrupt.
85 * i.e. 1 bit per SPI IRQ
87 *reg_index = irq >> 5;
88 *bit_posn = irq %= 32;
90 return 0;
93 static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
95 u32 val, bit_number;
96 u8 i;
98 if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
99 return;
101 val = wakeupgen_readl(i, cpu);
102 val &= ~BIT(bit_number);
103 wakeupgen_writel(val, i, cpu);
106 static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
108 u32 val, bit_number;
109 u8 i;
111 if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
112 return;
114 val = wakeupgen_readl(i, cpu);
115 val |= BIT(bit_number);
116 wakeupgen_writel(val, i, cpu);
120 * Architecture specific Mask extension
122 static void wakeupgen_mask(struct irq_data *d)
124 unsigned long flags;
126 raw_spin_lock_irqsave(&wakeupgen_lock, flags);
127 _wakeupgen_clear(d->hwirq, irq_target_cpu[d->hwirq]);
128 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
129 irq_chip_mask_parent(d);
133 * Architecture specific Unmask extension
135 static void wakeupgen_unmask(struct irq_data *d)
137 unsigned long flags;
139 raw_spin_lock_irqsave(&wakeupgen_lock, flags);
140 _wakeupgen_set(d->hwirq, irq_target_cpu[d->hwirq]);
141 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
142 irq_chip_unmask_parent(d);
145 #ifdef CONFIG_HOTPLUG_CPU
146 static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
148 static void _wakeupgen_save_masks(unsigned int cpu)
150 u8 i;
152 for (i = 0; i < irq_banks; i++)
153 per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
156 static void _wakeupgen_restore_masks(unsigned int cpu)
158 u8 i;
160 for (i = 0; i < irq_banks; i++)
161 wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
164 static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
166 u8 i;
168 for (i = 0; i < irq_banks; i++)
169 wakeupgen_writel(reg, i, cpu);
173 * Mask or unmask all interrupts on given CPU.
174 * 0 = Mask all interrupts on the 'cpu'
175 * 1 = Unmask all interrupts on the 'cpu'
176 * Ensure that the initial mask is maintained. This is faster than
177 * iterating through GIC registers to arrive at the correct masks.
179 static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
181 unsigned long flags;
183 raw_spin_lock_irqsave(&wakeupgen_lock, flags);
184 if (set) {
185 _wakeupgen_save_masks(cpu);
186 _wakeupgen_set_all(cpu, WKG_MASK_ALL);
187 } else {
188 _wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
189 _wakeupgen_restore_masks(cpu);
191 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
193 #endif
195 #ifdef CONFIG_CPU_PM
196 static inline void omap4_irq_save_context(void)
198 u32 i, val;
200 if (omap_rev() == OMAP4430_REV_ES1_0)
201 return;
203 for (i = 0; i < irq_banks; i++) {
204 /* Save the CPUx interrupt mask for IRQ 0 to 127 */
205 val = wakeupgen_readl(i, 0);
206 sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
207 val = wakeupgen_readl(i, 1);
208 sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
211 * Disable the secure interrupts for CPUx. The restore
212 * code blindly restores secure and non-secure interrupt
213 * masks from SAR RAM. Secure interrupts are not suppose
214 * to be enabled from HLOS. So overwrite the SAR location
215 * so that the secure interrupt remains disabled.
217 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
218 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
221 /* Save AuxBoot* registers */
222 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
223 writel_relaxed(val, sar_base + AUXCOREBOOT0_OFFSET);
224 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
225 writel_relaxed(val, sar_base + AUXCOREBOOT1_OFFSET);
227 /* Save SyncReq generation logic */
228 val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
229 writel_relaxed(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
230 val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
231 writel_relaxed(val, sar_base + PTMSYNCREQ_EN_OFFSET);
233 /* Set the Backup Bit Mask status */
234 val = readl_relaxed(sar_base + SAR_BACKUP_STATUS_OFFSET);
235 val |= SAR_BACKUP_STATUS_WAKEUPGEN;
236 writel_relaxed(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
240 static inline void omap5_irq_save_context(void)
242 u32 i, val;
244 for (i = 0; i < irq_banks; i++) {
245 /* Save the CPUx interrupt mask for IRQ 0 to 159 */
246 val = wakeupgen_readl(i, 0);
247 sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i);
248 val = wakeupgen_readl(i, 1);
249 sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i);
250 sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
251 sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
254 /* Save AuxBoot* registers */
255 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
256 writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET);
257 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
258 writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET);
260 /* Set the Backup Bit Mask status */
261 val = readl_relaxed(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
262 val |= SAR_BACKUP_STATUS_WAKEUPGEN;
263 writel_relaxed(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
268 * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
269 * ROM code. WakeupGen IP is integrated along with GIC to manage the
270 * interrupt wakeups from CPU low power states. It manages
271 * masking/unmasking of Shared peripheral interrupts(SPI). So the
272 * interrupt enable/disable control should be in sync and consistent
273 * at WakeupGen and GIC so that interrupts are not lost.
275 static void irq_save_context(void)
277 /* DRA7 has no SAR to save */
278 if (soc_is_dra7xx())
279 return;
281 if (!sar_base)
282 sar_base = omap4_get_sar_ram_base();
284 if (soc_is_omap54xx())
285 omap5_irq_save_context();
286 else
287 omap4_irq_save_context();
291 * Clear WakeupGen SAR backup status.
293 static void irq_sar_clear(void)
295 u32 val;
296 u32 offset = SAR_BACKUP_STATUS_OFFSET;
297 /* DRA7 has no SAR to save */
298 if (soc_is_dra7xx())
299 return;
301 if (soc_is_omap54xx())
302 offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
304 val = readl_relaxed(sar_base + offset);
305 val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
306 writel_relaxed(val, sar_base + offset);
310 * Save GIC and Wakeupgen interrupt context using secure API
311 * for HS/EMU devices.
313 static void irq_save_secure_context(void)
315 u32 ret;
316 ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX,
317 FLAG_START_CRITICAL,
318 0, 0, 0, 0, 0);
319 if (ret != API_HAL_RET_VALUE_OK)
320 pr_err("GIC and Wakeupgen context save failed\n");
322 #endif
324 #ifdef CONFIG_HOTPLUG_CPU
325 static int irq_cpu_hotplug_notify(struct notifier_block *self,
326 unsigned long action, void *hcpu)
328 unsigned int cpu = (unsigned int)hcpu;
331 * Corresponding FROZEN transitions do not have to be handled,
332 * they are handled by at a higher level
333 * (drivers/cpuidle/coupled.c).
335 switch (action) {
336 case CPU_ONLINE:
337 wakeupgen_irqmask_all(cpu, 0);
338 break;
339 case CPU_DEAD:
340 wakeupgen_irqmask_all(cpu, 1);
341 break;
343 return NOTIFY_OK;
346 static struct notifier_block irq_hotplug_notifier = {
347 .notifier_call = irq_cpu_hotplug_notify,
350 static void __init irq_hotplug_init(void)
352 register_hotcpu_notifier(&irq_hotplug_notifier);
354 #else
355 static void __init irq_hotplug_init(void)
357 #endif
359 #ifdef CONFIG_CPU_PM
360 static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v)
362 switch (cmd) {
363 case CPU_CLUSTER_PM_ENTER:
364 if (omap_type() == OMAP2_DEVICE_TYPE_GP)
365 irq_save_context();
366 else
367 irq_save_secure_context();
368 break;
369 case CPU_CLUSTER_PM_EXIT:
370 if (omap_type() == OMAP2_DEVICE_TYPE_GP)
371 irq_sar_clear();
372 break;
374 return NOTIFY_OK;
377 static struct notifier_block irq_notifier_block = {
378 .notifier_call = irq_notifier,
381 static void __init irq_pm_init(void)
383 /* FIXME: Remove this when MPU OSWR support is added */
384 if (!IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
385 cpu_pm_register_notifier(&irq_notifier_block);
387 #else
388 static void __init irq_pm_init(void)
390 #endif
392 void __iomem *omap_get_wakeupgen_base(void)
394 return wakeupgen_base;
397 int omap_secure_apis_support(void)
399 return omap_secure_apis;
402 static struct irq_chip wakeupgen_chip = {
403 .name = "WUGEN",
404 .irq_eoi = irq_chip_eoi_parent,
405 .irq_mask = wakeupgen_mask,
406 .irq_unmask = wakeupgen_unmask,
407 .irq_retrigger = irq_chip_retrigger_hierarchy,
408 .irq_set_type = irq_chip_set_type_parent,
409 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
410 #ifdef CONFIG_SMP
411 .irq_set_affinity = irq_chip_set_affinity_parent,
412 #endif
415 static int wakeupgen_domain_translate(struct irq_domain *d,
416 struct irq_fwspec *fwspec,
417 unsigned long *hwirq,
418 unsigned int *type)
420 if (is_of_node(fwspec->fwnode)) {
421 if (fwspec->param_count != 3)
422 return -EINVAL;
424 /* No PPI should point to this domain */
425 if (fwspec->param[0] != 0)
426 return -EINVAL;
428 *hwirq = fwspec->param[1];
429 *type = fwspec->param[2];
430 return 0;
433 return -EINVAL;
436 static int wakeupgen_domain_alloc(struct irq_domain *domain,
437 unsigned int virq,
438 unsigned int nr_irqs, void *data)
440 struct irq_fwspec *fwspec = data;
441 struct irq_fwspec parent_fwspec;
442 irq_hw_number_t hwirq;
443 int i;
445 if (fwspec->param_count != 3)
446 return -EINVAL; /* Not GIC compliant */
447 if (fwspec->param[0] != 0)
448 return -EINVAL; /* No PPI should point to this domain */
450 hwirq = fwspec->param[1];
451 if (hwirq >= MAX_IRQS)
452 return -EINVAL; /* Can't deal with this */
454 for (i = 0; i < nr_irqs; i++)
455 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
456 &wakeupgen_chip, NULL);
458 parent_fwspec = *fwspec;
459 parent_fwspec.fwnode = domain->parent->fwnode;
460 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
461 &parent_fwspec);
464 static const struct irq_domain_ops wakeupgen_domain_ops = {
465 .translate = wakeupgen_domain_translate,
466 .alloc = wakeupgen_domain_alloc,
467 .free = irq_domain_free_irqs_common,
471 * Initialise the wakeupgen module.
473 static int __init wakeupgen_init(struct device_node *node,
474 struct device_node *parent)
476 struct irq_domain *parent_domain, *domain;
477 int i;
478 unsigned int boot_cpu = smp_processor_id();
479 u32 val;
481 if (!parent) {
482 pr_err("%s: no parent, giving up\n", node->full_name);
483 return -ENODEV;
486 parent_domain = irq_find_host(parent);
487 if (!parent_domain) {
488 pr_err("%s: unable to obtain parent domain\n", node->full_name);
489 return -ENXIO;
491 /* Not supported on OMAP4 ES1.0 silicon */
492 if (omap_rev() == OMAP4430_REV_ES1_0) {
493 WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
494 return -EPERM;
497 /* Static mapping, never released */
498 wakeupgen_base = of_iomap(node, 0);
499 if (WARN_ON(!wakeupgen_base))
500 return -ENOMEM;
502 if (cpu_is_omap44xx()) {
503 irq_banks = OMAP4_NR_BANKS;
504 max_irqs = OMAP4_NR_IRQS;
505 omap_secure_apis = 1;
506 } else if (soc_is_am43xx()) {
507 irq_banks = AM43XX_NR_REG_BANKS;
508 max_irqs = AM43XX_IRQS;
511 domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs,
512 node, &wakeupgen_domain_ops,
513 NULL);
514 if (!domain) {
515 iounmap(wakeupgen_base);
516 return -ENOMEM;
519 /* Clear all IRQ bitmasks at wakeupGen level */
520 for (i = 0; i < irq_banks; i++) {
521 wakeupgen_writel(0, i, CPU0_ID);
522 if (!soc_is_am43xx())
523 wakeupgen_writel(0, i, CPU1_ID);
527 * FIXME: Add support to set_smp_affinity() once the core
528 * GIC code has necessary hooks in place.
531 /* Associate all the IRQs to boot CPU like GIC init does. */
532 for (i = 0; i < max_irqs; i++)
533 irq_target_cpu[i] = boot_cpu;
536 * Enables OMAP5 ES2 PM Mode using ES2_PM_MODE in AMBA_IF_MODE
537 * 0x0: ES1 behavior, CPU cores would enter and exit OFF mode together.
538 * 0x1: ES2 behavior, CPU cores are allowed to enter/exit OFF mode
539 * independently.
540 * This needs to be set one time thanks to always ON domain.
542 * We do not support ES1 behavior anymore. OMAP5 is assumed to be
543 * ES2.0, and the same is applicable for DRA7.
545 if (soc_is_omap54xx() || soc_is_dra7xx()) {
546 val = __raw_readl(wakeupgen_base + OMAP_AMBA_IF_MODE);
547 val |= BIT(5);
548 omap_smc1(OMAP5_MON_AMBA_IF_INDEX, val);
551 irq_hotplug_init();
552 irq_pm_init();
554 return 0;
556 IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init);