1 // SPDX-License-Identifier: GPL-2.0-only
3 * Broadcom BCM7038 style Level 1 interrupt controller driver
5 * Copyright (C) 2014 Broadcom Corporation
6 * Author: Kevin Cernekee
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/bitops.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
16 #include <linux/ioport.h>
17 #include <linux/irq.h>
18 #include <linux/irqdomain.h>
19 #include <linux/module.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_address.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/smp.h>
27 #include <linux/types.h>
28 #include <linux/irqchip.h>
29 #include <linux/irqchip/chained_irq.h>
30 #include <linux/syscore_ops.h>
32 #include <asm/smp_plat.h>
35 #define IRQS_PER_WORD 32
36 #define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
39 struct bcm7038_l1_cpu
;
41 struct bcm7038_l1_chip
{
44 struct irq_domain
*domain
;
45 struct bcm7038_l1_cpu
*cpus
[NR_CPUS
];
46 #ifdef CONFIG_PM_SLEEP
47 struct list_head list
;
48 u32 wake_mask
[MAX_WORDS
];
50 u32 irq_fwd_mask
[MAX_WORDS
];
51 u8 affinity
[MAX_WORDS
* IRQS_PER_WORD
];
54 struct bcm7038_l1_cpu
{
55 void __iomem
*map_base
;
60 * STATUS/MASK_STATUS/MASK_SET/MASK_CLEAR are packed one right after another:
63 * 0x1000_1400: W0_STATUS
64 * 0x1000_1404: W1_STATUS
65 * 0x1000_1408: W0_MASK_STATUS
66 * 0x1000_140c: W1_MASK_STATUS
67 * 0x1000_1410: W0_MASK_SET
68 * 0x1000_1414: W1_MASK_SET
69 * 0x1000_1418: W0_MASK_CLEAR
70 * 0x1000_141c: W1_MASK_CLEAR
73 * 0xf03e_1500: W0_STATUS
74 * 0xf03e_1504: W1_STATUS
75 * 0xf03e_1508: W2_STATUS
76 * 0xf03e_150c: W3_STATUS
77 * 0xf03e_1510: W4_STATUS
78 * 0xf03e_1514: W0_MASK_STATUS
79 * 0xf03e_1518: W1_MASK_STATUS
83 static inline unsigned int reg_status(struct bcm7038_l1_chip
*intc
,
86 return (0 * intc
->n_words
+ word
) * sizeof(u32
);
89 static inline unsigned int reg_mask_status(struct bcm7038_l1_chip
*intc
,
92 return (1 * intc
->n_words
+ word
) * sizeof(u32
);
95 static inline unsigned int reg_mask_set(struct bcm7038_l1_chip
*intc
,
98 return (2 * intc
->n_words
+ word
) * sizeof(u32
);
101 static inline unsigned int reg_mask_clr(struct bcm7038_l1_chip
*intc
,
104 return (3 * intc
->n_words
+ word
) * sizeof(u32
);
107 static inline u32
l1_readl(void __iomem
*reg
)
109 if (IS_ENABLED(CONFIG_MIPS
) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
110 return ioread32be(reg
);
115 static inline void l1_writel(u32 val
, void __iomem
*reg
)
117 if (IS_ENABLED(CONFIG_MIPS
) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
118 iowrite32be(val
, reg
);
123 static void bcm7038_l1_irq_handle(struct irq_desc
*desc
)
125 struct bcm7038_l1_chip
*intc
= irq_desc_get_handler_data(desc
);
126 struct bcm7038_l1_cpu
*cpu
;
127 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
131 cpu
= intc
->cpus
[cpu_logical_map(smp_processor_id())];
136 chained_irq_enter(chip
, desc
);
138 for (idx
= 0; idx
< intc
->n_words
; idx
++) {
139 int base
= idx
* IRQS_PER_WORD
;
140 unsigned long pending
, flags
;
143 raw_spin_lock_irqsave(&intc
->lock
, flags
);
144 pending
= l1_readl(cpu
->map_base
+ reg_status(intc
, idx
)) &
145 ~cpu
->mask_cache
[idx
];
146 raw_spin_unlock_irqrestore(&intc
->lock
, flags
);
148 for_each_set_bit(hwirq
, &pending
, IRQS_PER_WORD
) {
149 generic_handle_irq(irq_find_mapping(intc
->domain
,
154 chained_irq_exit(chip
, desc
);
157 static void __bcm7038_l1_unmask(struct irq_data
*d
, unsigned int cpu_idx
)
159 struct bcm7038_l1_chip
*intc
= irq_data_get_irq_chip_data(d
);
160 u32 word
= d
->hwirq
/ IRQS_PER_WORD
;
161 u32 mask
= BIT(d
->hwirq
% IRQS_PER_WORD
);
163 intc
->cpus
[cpu_idx
]->mask_cache
[word
] &= ~mask
;
164 l1_writel(mask
, intc
->cpus
[cpu_idx
]->map_base
+
165 reg_mask_clr(intc
, word
));
168 static void __bcm7038_l1_mask(struct irq_data
*d
, unsigned int cpu_idx
)
170 struct bcm7038_l1_chip
*intc
= irq_data_get_irq_chip_data(d
);
171 u32 word
= d
->hwirq
/ IRQS_PER_WORD
;
172 u32 mask
= BIT(d
->hwirq
% IRQS_PER_WORD
);
174 intc
->cpus
[cpu_idx
]->mask_cache
[word
] |= mask
;
175 l1_writel(mask
, intc
->cpus
[cpu_idx
]->map_base
+
176 reg_mask_set(intc
, word
));
179 static void bcm7038_l1_unmask(struct irq_data
*d
)
181 struct bcm7038_l1_chip
*intc
= irq_data_get_irq_chip_data(d
);
184 raw_spin_lock_irqsave(&intc
->lock
, flags
);
185 __bcm7038_l1_unmask(d
, intc
->affinity
[d
->hwirq
]);
186 raw_spin_unlock_irqrestore(&intc
->lock
, flags
);
189 static void bcm7038_l1_mask(struct irq_data
*d
)
191 struct bcm7038_l1_chip
*intc
= irq_data_get_irq_chip_data(d
);
194 raw_spin_lock_irqsave(&intc
->lock
, flags
);
195 __bcm7038_l1_mask(d
, intc
->affinity
[d
->hwirq
]);
196 raw_spin_unlock_irqrestore(&intc
->lock
, flags
);
199 static int bcm7038_l1_set_affinity(struct irq_data
*d
,
200 const struct cpumask
*dest
,
203 struct bcm7038_l1_chip
*intc
= irq_data_get_irq_chip_data(d
);
205 irq_hw_number_t hw
= d
->hwirq
;
206 u32 word
= hw
/ IRQS_PER_WORD
;
207 u32 mask
= BIT(hw
% IRQS_PER_WORD
);
208 unsigned int first_cpu
= cpumask_any_and(dest
, cpu_online_mask
);
211 raw_spin_lock_irqsave(&intc
->lock
, flags
);
213 was_disabled
= !!(intc
->cpus
[intc
->affinity
[hw
]]->mask_cache
[word
] &
215 __bcm7038_l1_mask(d
, intc
->affinity
[hw
]);
216 intc
->affinity
[hw
] = first_cpu
;
218 __bcm7038_l1_unmask(d
, first_cpu
);
220 raw_spin_unlock_irqrestore(&intc
->lock
, flags
);
221 irq_data_update_effective_affinity(d
, cpumask_of(first_cpu
));
227 static void bcm7038_l1_cpu_offline(struct irq_data
*d
)
229 struct cpumask
*mask
= irq_data_get_affinity_mask(d
);
230 int cpu
= smp_processor_id();
231 cpumask_t new_affinity
;
233 /* This CPU was not on the affinity mask */
234 if (!cpumask_test_cpu(cpu
, mask
))
237 if (cpumask_weight(mask
) > 1) {
239 * Multiple CPU affinity, remove this CPU from the affinity
242 cpumask_copy(&new_affinity
, mask
);
243 cpumask_clear_cpu(cpu
, &new_affinity
);
245 /* Only CPU, put on the lowest online CPU */
246 cpumask_clear(&new_affinity
);
247 cpumask_set_cpu(cpumask_first(cpu_online_mask
), &new_affinity
);
249 irq_set_affinity_locked(d
, &new_affinity
, false);
253 static int __init
bcm7038_l1_init_one(struct device_node
*dn
,
255 struct bcm7038_l1_chip
*intc
)
259 struct bcm7038_l1_cpu
*cpu
;
260 unsigned int i
, n_words
, parent_irq
;
263 if (of_address_to_resource(dn
, idx
, &res
))
265 sz
= resource_size(&res
);
266 n_words
= sz
/ REG_BYTES_PER_IRQ_WORD
;
268 if (n_words
> MAX_WORDS
)
270 else if (!intc
->n_words
)
271 intc
->n_words
= n_words
;
272 else if (intc
->n_words
!= n_words
)
275 ret
= of_property_read_u32_array(dn
, "brcm,int-fwd-mask",
276 intc
->irq_fwd_mask
, n_words
);
277 if (ret
!= 0 && ret
!= -EINVAL
) {
278 /* property exists but has the wrong number of words */
279 pr_err("invalid brcm,int-fwd-mask property\n");
283 cpu
= intc
->cpus
[idx
] = kzalloc(sizeof(*cpu
) + n_words
* sizeof(u32
),
288 cpu
->map_base
= ioremap(res
.start
, sz
);
292 for (i
= 0; i
< n_words
; i
++) {
293 l1_writel(~intc
->irq_fwd_mask
[i
],
294 cpu
->map_base
+ reg_mask_set(intc
, i
));
295 l1_writel(intc
->irq_fwd_mask
[i
],
296 cpu
->map_base
+ reg_mask_clr(intc
, i
));
297 cpu
->mask_cache
[i
] = ~intc
->irq_fwd_mask
[i
];
300 parent_irq
= irq_of_parse_and_map(dn
, idx
);
302 pr_err("failed to map parent interrupt %d\n", parent_irq
);
306 if (of_property_read_bool(dn
, "brcm,irq-can-wake"))
307 enable_irq_wake(parent_irq
);
309 irq_set_chained_handler_and_data(parent_irq
, bcm7038_l1_irq_handle
,
315 #ifdef CONFIG_PM_SLEEP
317 * We keep a list of bcm7038_l1_chip used for suspend/resume. This hack is
318 * used because the struct chip_type suspend/resume hooks are not called
319 * unless chip_type is hooked onto a generic_chip. Since this driver does
320 * not use generic_chip, we need to manually hook our resume/suspend to
323 static LIST_HEAD(bcm7038_l1_intcs_list
);
324 static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock
);
326 static int bcm7038_l1_suspend(void)
328 struct bcm7038_l1_chip
*intc
;
332 /* Wakeup interrupt should only come from the boot cpu */
334 boot_cpu
= cpu_logical_map(0);
339 list_for_each_entry(intc
, &bcm7038_l1_intcs_list
, list
) {
340 for (word
= 0; word
< intc
->n_words
; word
++) {
341 val
= intc
->wake_mask
[word
] | intc
->irq_fwd_mask
[word
];
343 intc
->cpus
[boot_cpu
]->map_base
+ reg_mask_set(intc
, word
));
345 intc
->cpus
[boot_cpu
]->map_base
+ reg_mask_clr(intc
, word
));
352 static void bcm7038_l1_resume(void)
354 struct bcm7038_l1_chip
*intc
;
358 boot_cpu
= cpu_logical_map(0);
363 list_for_each_entry(intc
, &bcm7038_l1_intcs_list
, list
) {
364 for (word
= 0; word
< intc
->n_words
; word
++) {
365 l1_writel(intc
->cpus
[boot_cpu
]->mask_cache
[word
],
366 intc
->cpus
[boot_cpu
]->map_base
+ reg_mask_set(intc
, word
));
367 l1_writel(~intc
->cpus
[boot_cpu
]->mask_cache
[word
],
368 intc
->cpus
[boot_cpu
]->map_base
+ reg_mask_clr(intc
, word
));
373 static struct syscore_ops bcm7038_l1_syscore_ops
= {
374 .suspend
= bcm7038_l1_suspend
,
375 .resume
= bcm7038_l1_resume
,
378 static int bcm7038_l1_set_wake(struct irq_data
*d
, unsigned int on
)
380 struct bcm7038_l1_chip
*intc
= irq_data_get_irq_chip_data(d
);
382 u32 word
= d
->hwirq
/ IRQS_PER_WORD
;
383 u32 mask
= BIT(d
->hwirq
% IRQS_PER_WORD
);
385 raw_spin_lock_irqsave(&intc
->lock
, flags
);
387 intc
->wake_mask
[word
] |= mask
;
389 intc
->wake_mask
[word
] &= ~mask
;
390 raw_spin_unlock_irqrestore(&intc
->lock
, flags
);
396 static struct irq_chip bcm7038_l1_irq_chip
= {
397 .name
= "bcm7038-l1",
398 .irq_mask
= bcm7038_l1_mask
,
399 .irq_unmask
= bcm7038_l1_unmask
,
400 .irq_set_affinity
= bcm7038_l1_set_affinity
,
402 .irq_cpu_offline
= bcm7038_l1_cpu_offline
,
404 #ifdef CONFIG_PM_SLEEP
405 .irq_set_wake
= bcm7038_l1_set_wake
,
409 static int bcm7038_l1_map(struct irq_domain
*d
, unsigned int virq
,
410 irq_hw_number_t hw_irq
)
412 struct bcm7038_l1_chip
*intc
= d
->host_data
;
413 u32 mask
= BIT(hw_irq
% IRQS_PER_WORD
);
414 u32 word
= hw_irq
/ IRQS_PER_WORD
;
416 if (intc
->irq_fwd_mask
[word
] & mask
)
419 irq_set_chip_and_handler(virq
, &bcm7038_l1_irq_chip
, handle_level_irq
);
420 irq_set_chip_data(virq
, d
->host_data
);
421 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq
)));
425 static const struct irq_domain_ops bcm7038_l1_domain_ops
= {
426 .xlate
= irq_domain_xlate_onecell
,
427 .map
= bcm7038_l1_map
,
430 static int __init
bcm7038_l1_of_init(struct device_node
*dn
,
431 struct device_node
*parent
)
433 struct bcm7038_l1_chip
*intc
;
436 intc
= kzalloc(sizeof(*intc
), GFP_KERNEL
);
440 raw_spin_lock_init(&intc
->lock
);
441 for_each_possible_cpu(idx
) {
442 ret
= bcm7038_l1_init_one(dn
, idx
, intc
);
446 pr_err("failed to remap intc L1 registers\n");
451 intc
->domain
= irq_domain_add_linear(dn
, IRQS_PER_WORD
* intc
->n_words
,
452 &bcm7038_l1_domain_ops
,
459 #ifdef CONFIG_PM_SLEEP
460 /* Add bcm7038_l1_chip into a list */
461 raw_spin_lock(&bcm7038_l1_intcs_lock
);
462 list_add_tail(&intc
->list
, &bcm7038_l1_intcs_list
);
463 raw_spin_unlock(&bcm7038_l1_intcs_lock
);
465 if (list_is_singular(&bcm7038_l1_intcs_list
))
466 register_syscore_ops(&bcm7038_l1_syscore_ops
);
469 pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
470 dn
, IRQS_PER_WORD
* intc
->n_words
);
475 for_each_possible_cpu(idx
) {
476 struct bcm7038_l1_cpu
*cpu
= intc
->cpus
[idx
];
480 iounmap(cpu
->map_base
);
489 IRQCHIP_DECLARE(bcm7038_l1
, "brcm,bcm7038-l1-intc", bcm7038_l1_of_init
);