1 // SPDX-License-Identifier: GPL-2.0-only
3 * OMAP WakeupGen Source file
5 * OMAP WakeupGen is the interrupt controller extension used along
6 * with ARM GIC to wake the CPU out from low power states on
7 * external interrupts. It is responsible for generating wakeup
8 * event from the incoming interrupts and enable bits. It is
9 * implemented in MPU always ON power domain. During normal operation,
10 * WakeupGen delivers external interrupts directly to the GIC.
12 * Copyright (C) 2011 Texas Instruments, Inc.
13 * Santosh Shilimkar <santosh.shilimkar@ti.com>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
19 #include <linux/irq.h>
20 #include <linux/irqchip.h>
21 #include <linux/irqdomain.h>
22 #include <linux/of_address.h>
23 #include <linux/platform_device.h>
24 #include <linux/cpu.h>
25 #include <linux/notifier.h>
26 #include <linux/cpu_pm.h>
28 #include "omap-wakeupgen.h"
29 #include "omap-secure.h"
32 #include "omap4-sar-layout.h"
36 #define AM43XX_NR_REG_BANKS 7
37 #define AM43XX_IRQS 224
38 #define MAX_NR_REG_BANKS AM43XX_NR_REG_BANKS
39 #define MAX_IRQS AM43XX_IRQS
40 #define DEFAULT_NR_REG_BANKS 5
41 #define DEFAULT_IRQS 160
42 #define WKG_MASK_ALL 0x00000000
43 #define WKG_UNMASK_ALL 0xffffffff
44 #define CPU_ENA_OFFSET 0x400
47 #define OMAP4_NR_BANKS 4
48 #define OMAP4_NR_IRQS 128
50 #define SYS_NIRQ1_EXT_SYS_IRQ_1 7
51 #define SYS_NIRQ2_EXT_SYS_IRQ_2 119
53 static void __iomem
*wakeupgen_base
;
54 static void __iomem
*sar_base
;
55 static DEFINE_RAW_SPINLOCK(wakeupgen_lock
);
56 static unsigned int irq_target_cpu
[MAX_IRQS
];
57 static unsigned int irq_banks
= DEFAULT_NR_REG_BANKS
;
58 static unsigned int max_irqs
= DEFAULT_IRQS
;
59 static unsigned int omap_secure_apis
;
62 static unsigned int wakeupgen_context
[MAX_NR_REG_BANKS
];
65 struct omap_wakeupgen_ops
{
66 void (*save_context
)(void);
67 void (*restore_context
)(void);
70 static struct omap_wakeupgen_ops
*wakeupgen_ops
;
73 * Static helper functions.
75 static inline u32
wakeupgen_readl(u8 idx
, u32 cpu
)
77 return readl_relaxed(wakeupgen_base
+ OMAP_WKG_ENB_A_0
+
78 (cpu
* CPU_ENA_OFFSET
) + (idx
* 4));
81 static inline void wakeupgen_writel(u32 val
, u8 idx
, u32 cpu
)
83 writel_relaxed(val
, wakeupgen_base
+ OMAP_WKG_ENB_A_0
+
84 (cpu
* CPU_ENA_OFFSET
) + (idx
* 4));
87 static inline void sar_writel(u32 val
, u32 offset
, u8 idx
)
89 writel_relaxed(val
, sar_base
+ offset
+ (idx
* 4));
92 static inline int _wakeupgen_get_irq_info(u32 irq
, u32
*bit_posn
, u8
*reg_index
)
95 * Each WakeupGen register controls 32 interrupt.
96 * i.e. 1 bit per SPI IRQ
98 *reg_index
= irq
>> 5;
99 *bit_posn
= irq
%= 32;
104 static void _wakeupgen_clear(unsigned int irq
, unsigned int cpu
)
109 if (_wakeupgen_get_irq_info(irq
, &bit_number
, &i
))
112 val
= wakeupgen_readl(i
, cpu
);
113 val
&= ~BIT(bit_number
);
114 wakeupgen_writel(val
, i
, cpu
);
117 static void _wakeupgen_set(unsigned int irq
, unsigned int cpu
)
122 if (_wakeupgen_get_irq_info(irq
, &bit_number
, &i
))
125 val
= wakeupgen_readl(i
, cpu
);
126 val
|= BIT(bit_number
);
127 wakeupgen_writel(val
, i
, cpu
);
131 * Architecture specific Mask extension
133 static void wakeupgen_mask(struct irq_data
*d
)
137 raw_spin_lock_irqsave(&wakeupgen_lock
, flags
);
138 _wakeupgen_clear(d
->hwirq
, irq_target_cpu
[d
->hwirq
]);
139 raw_spin_unlock_irqrestore(&wakeupgen_lock
, flags
);
140 irq_chip_mask_parent(d
);
144 * Architecture specific Unmask extension
146 static void wakeupgen_unmask(struct irq_data
*d
)
150 raw_spin_lock_irqsave(&wakeupgen_lock
, flags
);
151 _wakeupgen_set(d
->hwirq
, irq_target_cpu
[d
->hwirq
]);
152 raw_spin_unlock_irqrestore(&wakeupgen_lock
, flags
);
153 irq_chip_unmask_parent(d
);
157 * The sys_nirq pins bypass peripheral modules and are wired directly
158 * to MPUSS wakeupgen. They get automatically inverted for GIC.
160 static int wakeupgen_irq_set_type(struct irq_data
*d
, unsigned int type
)
162 bool inverted
= false;
165 case IRQ_TYPE_LEVEL_LOW
:
166 type
&= ~IRQ_TYPE_LEVEL_MASK
;
167 type
|= IRQ_TYPE_LEVEL_HIGH
;
170 case IRQ_TYPE_EDGE_FALLING
:
171 type
&= ~IRQ_TYPE_EDGE_BOTH
;
172 type
|= IRQ_TYPE_EDGE_RISING
;
179 if (inverted
&& d
->hwirq
!= SYS_NIRQ1_EXT_SYS_IRQ_1
&&
180 d
->hwirq
!= SYS_NIRQ2_EXT_SYS_IRQ_2
)
181 pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
184 return irq_chip_set_type_parent(d
, type
);
187 #ifdef CONFIG_HOTPLUG_CPU
188 static DEFINE_PER_CPU(u32
[MAX_NR_REG_BANKS
], irqmasks
);
190 static void _wakeupgen_save_masks(unsigned int cpu
)
194 for (i
= 0; i
< irq_banks
; i
++)
195 per_cpu(irqmasks
, cpu
)[i
] = wakeupgen_readl(i
, cpu
);
198 static void _wakeupgen_restore_masks(unsigned int cpu
)
202 for (i
= 0; i
< irq_banks
; i
++)
203 wakeupgen_writel(per_cpu(irqmasks
, cpu
)[i
], i
, cpu
);
206 static void _wakeupgen_set_all(unsigned int cpu
, unsigned int reg
)
210 for (i
= 0; i
< irq_banks
; i
++)
211 wakeupgen_writel(reg
, i
, cpu
);
215 * Mask or unmask all interrupts on given CPU.
216 * 0 = Mask all interrupts on the 'cpu'
217 * 1 = Unmask all interrupts on the 'cpu'
218 * Ensure that the initial mask is maintained. This is faster than
219 * iterating through GIC registers to arrive at the correct masks.
221 static void wakeupgen_irqmask_all(unsigned int cpu
, unsigned int set
)
225 raw_spin_lock_irqsave(&wakeupgen_lock
, flags
);
227 _wakeupgen_save_masks(cpu
);
228 _wakeupgen_set_all(cpu
, WKG_MASK_ALL
);
230 _wakeupgen_set_all(cpu
, WKG_UNMASK_ALL
);
231 _wakeupgen_restore_masks(cpu
);
233 raw_spin_unlock_irqrestore(&wakeupgen_lock
, flags
);
238 static inline void omap4_irq_save_context(void)
242 if (omap_rev() == OMAP4430_REV_ES1_0
)
245 for (i
= 0; i
< irq_banks
; i
++) {
246 /* Save the CPUx interrupt mask for IRQ 0 to 127 */
247 val
= wakeupgen_readl(i
, 0);
248 sar_writel(val
, WAKEUPGENENB_OFFSET_CPU0
, i
);
249 val
= wakeupgen_readl(i
, 1);
250 sar_writel(val
, WAKEUPGENENB_OFFSET_CPU1
, i
);
253 * Disable the secure interrupts for CPUx. The restore
254 * code blindly restores secure and non-secure interrupt
255 * masks from SAR RAM. Secure interrupts are not suppose
256 * to be enabled from HLOS. So overwrite the SAR location
257 * so that the secure interrupt remains disabled.
259 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0
, i
);
260 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1
, i
);
263 /* Save AuxBoot* registers */
264 val
= readl_relaxed(wakeupgen_base
+ OMAP_AUX_CORE_BOOT_0
);
265 writel_relaxed(val
, sar_base
+ AUXCOREBOOT0_OFFSET
);
266 val
= readl_relaxed(wakeupgen_base
+ OMAP_AUX_CORE_BOOT_1
);
267 writel_relaxed(val
, sar_base
+ AUXCOREBOOT1_OFFSET
);
269 /* Save SyncReq generation logic */
270 val
= readl_relaxed(wakeupgen_base
+ OMAP_PTMSYNCREQ_MASK
);
271 writel_relaxed(val
, sar_base
+ PTMSYNCREQ_MASK_OFFSET
);
272 val
= readl_relaxed(wakeupgen_base
+ OMAP_PTMSYNCREQ_EN
);
273 writel_relaxed(val
, sar_base
+ PTMSYNCREQ_EN_OFFSET
);
275 /* Set the Backup Bit Mask status */
276 val
= readl_relaxed(sar_base
+ SAR_BACKUP_STATUS_OFFSET
);
277 val
|= SAR_BACKUP_STATUS_WAKEUPGEN
;
278 writel_relaxed(val
, sar_base
+ SAR_BACKUP_STATUS_OFFSET
);
282 static inline void omap5_irq_save_context(void)
286 for (i
= 0; i
< irq_banks
; i
++) {
287 /* Save the CPUx interrupt mask for IRQ 0 to 159 */
288 val
= wakeupgen_readl(i
, 0);
289 sar_writel(val
, OMAP5_WAKEUPGENENB_OFFSET_CPU0
, i
);
290 val
= wakeupgen_readl(i
, 1);
291 sar_writel(val
, OMAP5_WAKEUPGENENB_OFFSET_CPU1
, i
);
292 sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0
, i
);
293 sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1
, i
);
296 /* Save AuxBoot* registers */
297 val
= readl_relaxed(wakeupgen_base
+ OMAP_AUX_CORE_BOOT_0
);
298 writel_relaxed(val
, sar_base
+ OMAP5_AUXCOREBOOT0_OFFSET
);
299 val
= readl_relaxed(wakeupgen_base
+ OMAP_AUX_CORE_BOOT_0
);
300 writel_relaxed(val
, sar_base
+ OMAP5_AUXCOREBOOT1_OFFSET
);
302 /* Set the Backup Bit Mask status */
303 val
= readl_relaxed(sar_base
+ OMAP5_SAR_BACKUP_STATUS_OFFSET
);
304 val
|= SAR_BACKUP_STATUS_WAKEUPGEN
;
305 writel_relaxed(val
, sar_base
+ OMAP5_SAR_BACKUP_STATUS_OFFSET
);
309 static inline void am43xx_irq_save_context(void)
313 for (i
= 0; i
< irq_banks
; i
++) {
314 wakeupgen_context
[i
] = wakeupgen_readl(i
, 0);
315 wakeupgen_writel(0, i
, CPU0_ID
);
320 * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
321 * ROM code. WakeupGen IP is integrated along with GIC to manage the
322 * interrupt wakeups from CPU low power states. It manages
323 * masking/unmasking of Shared peripheral interrupts(SPI). So the
324 * interrupt enable/disable control should be in sync and consistent
325 * at WakeupGen and GIC so that interrupts are not lost.
327 static void irq_save_context(void)
329 /* DRA7 has no SAR to save */
333 if (wakeupgen_ops
&& wakeupgen_ops
->save_context
)
334 wakeupgen_ops
->save_context();
338 * Clear WakeupGen SAR backup status.
340 static void irq_sar_clear(void)
343 u32 offset
= SAR_BACKUP_STATUS_OFFSET
;
344 /* DRA7 has no SAR to save */
348 if (soc_is_omap54xx())
349 offset
= OMAP5_SAR_BACKUP_STATUS_OFFSET
;
351 val
= readl_relaxed(sar_base
+ offset
);
352 val
&= ~SAR_BACKUP_STATUS_WAKEUPGEN
;
353 writel_relaxed(val
, sar_base
+ offset
);
356 static void am43xx_irq_restore_context(void)
360 for (i
= 0; i
< irq_banks
; i
++)
361 wakeupgen_writel(wakeupgen_context
[i
], i
, CPU0_ID
);
364 static void irq_restore_context(void)
366 if (wakeupgen_ops
&& wakeupgen_ops
->restore_context
)
367 wakeupgen_ops
->restore_context();
371 * Save GIC and Wakeupgen interrupt context using secure API
372 * for HS/EMU devices.
374 static void irq_save_secure_context(void)
378 ret
= omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX
,
381 if (ret
!= API_HAL_RET_VALUE_OK
)
382 pr_err("GIC and Wakeupgen context save failed\n");
385 /* Define ops for context save and restore for each SoC */
386 static struct omap_wakeupgen_ops omap4_wakeupgen_ops
= {
387 .save_context
= omap4_irq_save_context
,
388 .restore_context
= irq_sar_clear
,
391 static struct omap_wakeupgen_ops omap5_wakeupgen_ops
= {
392 .save_context
= omap5_irq_save_context
,
393 .restore_context
= irq_sar_clear
,
396 static struct omap_wakeupgen_ops am43xx_wakeupgen_ops
= {
397 .save_context
= am43xx_irq_save_context
,
398 .restore_context
= am43xx_irq_restore_context
,
401 static struct omap_wakeupgen_ops omap4_wakeupgen_ops
= {};
402 static struct omap_wakeupgen_ops omap5_wakeupgen_ops
= {};
403 static struct omap_wakeupgen_ops am43xx_wakeupgen_ops
= {};
406 #ifdef CONFIG_HOTPLUG_CPU
407 static int omap_wakeupgen_cpu_online(unsigned int cpu
)
409 wakeupgen_irqmask_all(cpu
, 0);
413 static int omap_wakeupgen_cpu_dead(unsigned int cpu
)
415 wakeupgen_irqmask_all(cpu
, 1);
419 static void __init
irq_hotplug_init(void)
421 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN
, "arm/omap-wake:online",
422 omap_wakeupgen_cpu_online
, NULL
);
423 cpuhp_setup_state_nocalls(CPUHP_ARM_OMAP_WAKE_DEAD
,
424 "arm/omap-wake:dead", NULL
,
425 omap_wakeupgen_cpu_dead
);
428 static void __init
irq_hotplug_init(void)
433 static int irq_notifier(struct notifier_block
*self
, unsigned long cmd
, void *v
)
436 case CPU_CLUSTER_PM_ENTER
:
437 if (omap_type() == OMAP2_DEVICE_TYPE_GP
|| soc_is_am43xx())
440 irq_save_secure_context();
442 case CPU_CLUSTER_PM_EXIT
:
443 if (omap_type() == OMAP2_DEVICE_TYPE_GP
|| soc_is_am43xx())
444 irq_restore_context();
450 static struct notifier_block irq_notifier_block
= {
451 .notifier_call
= irq_notifier
,
454 static void __init
irq_pm_init(void)
456 /* FIXME: Remove this when MPU OSWR support is added */
457 if (!IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE
))
458 cpu_pm_register_notifier(&irq_notifier_block
);
461 static void __init
irq_pm_init(void)
465 void __iomem
*omap_get_wakeupgen_base(void)
467 return wakeupgen_base
;
470 int omap_secure_apis_support(void)
472 return omap_secure_apis
;
475 static struct irq_chip wakeupgen_chip
= {
477 .irq_eoi
= irq_chip_eoi_parent
,
478 .irq_mask
= wakeupgen_mask
,
479 .irq_unmask
= wakeupgen_unmask
,
480 .irq_retrigger
= irq_chip_retrigger_hierarchy
,
481 .irq_set_type
= wakeupgen_irq_set_type
,
482 .flags
= IRQCHIP_SKIP_SET_WAKE
| IRQCHIP_MASK_ON_SUSPEND
,
484 .irq_set_affinity
= irq_chip_set_affinity_parent
,
488 static int wakeupgen_domain_translate(struct irq_domain
*d
,
489 struct irq_fwspec
*fwspec
,
490 unsigned long *hwirq
,
493 if (is_of_node(fwspec
->fwnode
)) {
494 if (fwspec
->param_count
!= 3)
497 /* No PPI should point to this domain */
498 if (fwspec
->param
[0] != 0)
501 *hwirq
= fwspec
->param
[1];
502 *type
= fwspec
->param
[2];
509 static int wakeupgen_domain_alloc(struct irq_domain
*domain
,
511 unsigned int nr_irqs
, void *data
)
513 struct irq_fwspec
*fwspec
= data
;
514 struct irq_fwspec parent_fwspec
;
515 irq_hw_number_t hwirq
;
518 if (fwspec
->param_count
!= 3)
519 return -EINVAL
; /* Not GIC compliant */
520 if (fwspec
->param
[0] != 0)
521 return -EINVAL
; /* No PPI should point to this domain */
523 hwirq
= fwspec
->param
[1];
524 if (hwirq
>= MAX_IRQS
)
525 return -EINVAL
; /* Can't deal with this */
527 for (i
= 0; i
< nr_irqs
; i
++)
528 irq_domain_set_hwirq_and_chip(domain
, virq
+ i
, hwirq
+ i
,
529 &wakeupgen_chip
, NULL
);
531 parent_fwspec
= *fwspec
;
532 parent_fwspec
.fwnode
= domain
->parent
->fwnode
;
533 return irq_domain_alloc_irqs_parent(domain
, virq
, nr_irqs
,
537 static const struct irq_domain_ops wakeupgen_domain_ops
= {
538 .translate
= wakeupgen_domain_translate
,
539 .alloc
= wakeupgen_domain_alloc
,
540 .free
= irq_domain_free_irqs_common
,
544 * Initialise the wakeupgen module.
546 static int __init
wakeupgen_init(struct device_node
*node
,
547 struct device_node
*parent
)
549 struct irq_domain
*parent_domain
, *domain
;
551 unsigned int boot_cpu
= smp_processor_id();
555 pr_err("%pOF: no parent, giving up\n", node
);
559 parent_domain
= irq_find_host(parent
);
560 if (!parent_domain
) {
561 pr_err("%pOF: unable to obtain parent domain\n", node
);
564 /* Not supported on OMAP4 ES1.0 silicon */
565 if (omap_rev() == OMAP4430_REV_ES1_0
) {
566 WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
570 /* Static mapping, never released */
571 wakeupgen_base
= of_iomap(node
, 0);
572 if (WARN_ON(!wakeupgen_base
))
575 if (cpu_is_omap44xx()) {
576 irq_banks
= OMAP4_NR_BANKS
;
577 max_irqs
= OMAP4_NR_IRQS
;
578 omap_secure_apis
= 1;
579 wakeupgen_ops
= &omap4_wakeupgen_ops
;
580 } else if (soc_is_omap54xx()) {
581 wakeupgen_ops
= &omap5_wakeupgen_ops
;
582 } else if (soc_is_am43xx()) {
583 irq_banks
= AM43XX_NR_REG_BANKS
;
584 max_irqs
= AM43XX_IRQS
;
585 wakeupgen_ops
= &am43xx_wakeupgen_ops
;
588 domain
= irq_domain_add_hierarchy(parent_domain
, 0, max_irqs
,
589 node
, &wakeupgen_domain_ops
,
592 iounmap(wakeupgen_base
);
596 /* Clear all IRQ bitmasks at wakeupGen level */
597 for (i
= 0; i
< irq_banks
; i
++) {
598 wakeupgen_writel(0, i
, CPU0_ID
);
599 if (!soc_is_am43xx())
600 wakeupgen_writel(0, i
, CPU1_ID
);
604 * FIXME: Add support to set_smp_affinity() once the core
605 * GIC code has necessary hooks in place.
608 /* Associate all the IRQs to boot CPU like GIC init does. */
609 for (i
= 0; i
< max_irqs
; i
++)
610 irq_target_cpu
[i
] = boot_cpu
;
613 * Enables OMAP5 ES2 PM Mode using ES2_PM_MODE in AMBA_IF_MODE
614 * 0x0: ES1 behavior, CPU cores would enter and exit OFF mode together.
615 * 0x1: ES2 behavior, CPU cores are allowed to enter/exit OFF mode
617 * This needs to be set one time thanks to always ON domain.
619 * We do not support ES1 behavior anymore. OMAP5 is assumed to be
620 * ES2.0, and the same is applicable for DRA7.
622 if (soc_is_omap54xx() || soc_is_dra7xx()) {
623 val
= __raw_readl(wakeupgen_base
+ OMAP_AMBA_IF_MODE
);
625 omap_smc1(OMAP5_MON_AMBA_IF_INDEX
, val
);
631 sar_base
= omap4_get_sar_ram_base();
635 IRQCHIP_DECLARE(ti_wakeupgen
, "ti,omap4-wugen-mpu", wakeupgen_init
);