2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2008 Cavium Networks
9 #include <linux/interrupt.h>
10 #include <linux/smp.h>
12 #include <asm/octeon/octeon.h>
13 #include <asm/octeon/cvmx-pexp-defs.h>
14 #include <asm/octeon/cvmx-npi-defs.h>
16 DEFINE_RWLOCK(octeon_irq_ciu0_rwlock
);
17 DEFINE_RWLOCK(octeon_irq_ciu1_rwlock
);
18 DEFINE_SPINLOCK(octeon_irq_msi_lock
);
20 static void octeon_irq_core_ack(unsigned int irq
)
22 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
24 * We don't need to disable IRQs to make these atomic since
25 * they are already disabled earlier in the low level
28 clear_c0_status(0x100 << bit
);
29 /* The two user interrupts must be cleared manually. */
31 clear_c0_cause(0x100 << bit
);
34 static void octeon_irq_core_eoi(unsigned int irq
)
36 struct irq_desc
*desc
= irq_desc
+ irq
;
37 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
39 * If an IRQ is being processed while we are disabling it the
40 * handler will attempt to unmask the interrupt after it has
43 if (desc
->status
& IRQ_DISABLED
)
46 /* There is a race here. We should fix it. */
49 * We don't need to disable IRQs to make these atomic since
50 * they are already disabled earlier in the low level
53 set_c0_status(0x100 << bit
);
56 static void octeon_irq_core_enable(unsigned int irq
)
59 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
62 * We need to disable interrupts to make sure our updates are
65 local_irq_save(flags
);
66 set_c0_status(0x100 << bit
);
67 local_irq_restore(flags
);
70 static void octeon_irq_core_disable_local(unsigned int irq
)
73 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
75 * We need to disable interrupts to make sure our updates are
78 local_irq_save(flags
);
79 clear_c0_status(0x100 << bit
);
80 local_irq_restore(flags
);
83 static void octeon_irq_core_disable(unsigned int irq
)
86 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local
,
87 (void *) (long) irq
, 1);
89 octeon_irq_core_disable_local(irq
);
93 static struct irq_chip octeon_irq_chip_core
= {
95 .enable
= octeon_irq_core_enable
,
96 .disable
= octeon_irq_core_disable
,
97 .ack
= octeon_irq_core_ack
,
98 .eoi
= octeon_irq_core_eoi
,
102 static void octeon_irq_ciu0_ack(unsigned int irq
)
105 * In order to avoid any locking accessing the CIU, we
106 * acknowledge CIU interrupts by disabling all of them. This
107 * way we can use a per core register and avoid any out of
108 * core locking requirements. This has the side affect that
109 * CIU interrupts can't be processed recursively.
111 * We don't need to disable IRQs to make these atomic since
112 * they are already disabled earlier in the low level
115 clear_c0_status(0x100 << 2);
118 static void octeon_irq_ciu0_eoi(unsigned int irq
)
121 * Enable all CIU interrupts again. We don't need to disable
122 * IRQs to make these atomic since they are already disabled
123 * earlier in the low level interrupt code.
125 set_c0_status(0x100 << 2);
128 static void octeon_irq_ciu0_enable(unsigned int irq
)
130 int coreid
= cvmx_get_core_num();
133 int bit
= irq
- OCTEON_IRQ_WORKQ0
; /* Bit 0-63 of EN0 */
136 * A read lock is used here to make sure only one core is ever
137 * updating the CIU enable bits at a time. During an enable
138 * the cores don't interfere with each other. During a disable
139 * the write lock stops any enables that might cause a
142 read_lock_irqsave(&octeon_irq_ciu0_rwlock
, flags
);
143 en0
= cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
145 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), en0
);
146 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
147 read_unlock_irqrestore(&octeon_irq_ciu0_rwlock
, flags
);
150 static void octeon_irq_ciu0_disable(unsigned int irq
)
152 int bit
= irq
- OCTEON_IRQ_WORKQ0
; /* Bit 0-63 of EN0 */
157 write_lock_irqsave(&octeon_irq_ciu0_rwlock
, flags
);
158 for_each_online_cpu(cpu
) {
159 int coreid
= cpu_logical_map(cpu
);
160 en0
= cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
161 en0
&= ~(1ull << bit
);
162 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), en0
);
165 * We need to do a read after the last update to make sure all
168 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
169 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock
, flags
);
171 int coreid
= cvmx_get_core_num();
172 local_irq_save(flags
);
173 en0
= cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
174 en0
&= ~(1ull << bit
);
175 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), en0
);
176 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
177 local_irq_restore(flags
);
182 static int octeon_irq_ciu0_set_affinity(unsigned int irq
, const struct cpumask
*dest
)
185 int bit
= irq
- OCTEON_IRQ_WORKQ0
; /* Bit 0-63 of EN0 */
187 write_lock(&octeon_irq_ciu0_rwlock
);
188 for_each_online_cpu(cpu
) {
189 int coreid
= cpu_logical_map(cpu
);
191 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
192 if (cpumask_test_cpu(cpu
, dest
))
195 en0
&= ~(1ull << bit
);
196 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), en0
);
199 * We need to do a read after the last update to make sure all
202 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
203 write_unlock(&octeon_irq_ciu0_rwlock
);
209 static struct irq_chip octeon_irq_chip_ciu0
= {
211 .enable
= octeon_irq_ciu0_enable
,
212 .disable
= octeon_irq_ciu0_disable
,
213 .ack
= octeon_irq_ciu0_ack
,
214 .eoi
= octeon_irq_ciu0_eoi
,
216 .set_affinity
= octeon_irq_ciu0_set_affinity
,
221 static void octeon_irq_ciu1_ack(unsigned int irq
)
224 * In order to avoid any locking accessing the CIU, we
225 * acknowledge CIU interrupts by disabling all of them. This
226 * way we can use a per core register and avoid any out of
227 * core locking requirements. This has the side affect that
228 * CIU interrupts can't be processed recursively. We don't
229 * need to disable IRQs to make these atomic since they are
230 * already disabled earlier in the low level interrupt code.
232 clear_c0_status(0x100 << 3);
235 static void octeon_irq_ciu1_eoi(unsigned int irq
)
238 * Enable all CIU interrupts again. We don't need to disable
239 * IRQs to make these atomic since they are already disabled
240 * earlier in the low level interrupt code.
242 set_c0_status(0x100 << 3);
245 static void octeon_irq_ciu1_enable(unsigned int irq
)
247 int coreid
= cvmx_get_core_num();
250 int bit
= irq
- OCTEON_IRQ_WDOG0
; /* Bit 0-63 of EN1 */
253 * A read lock is used here to make sure only one core is ever
254 * updating the CIU enable bits at a time. During an enable
255 * the cores don't interfere with each other. During a disable
256 * the write lock stops any enables that might cause a
259 read_lock_irqsave(&octeon_irq_ciu1_rwlock
, flags
);
260 en1
= cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
262 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), en1
);
263 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
264 read_unlock_irqrestore(&octeon_irq_ciu1_rwlock
, flags
);
267 static void octeon_irq_ciu1_disable(unsigned int irq
)
269 int bit
= irq
- OCTEON_IRQ_WDOG0
; /* Bit 0-63 of EN1 */
274 write_lock_irqsave(&octeon_irq_ciu1_rwlock
, flags
);
275 for_each_online_cpu(cpu
) {
276 int coreid
= cpu_logical_map(cpu
);
277 en1
= cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
278 en1
&= ~(1ull << bit
);
279 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), en1
);
282 * We need to do a read after the last update to make sure all
285 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
286 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock
, flags
);
288 int coreid
= cvmx_get_core_num();
289 local_irq_save(flags
);
290 en1
= cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
291 en1
&= ~(1ull << bit
);
292 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), en1
);
293 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
294 local_irq_restore(flags
);
299 static int octeon_irq_ciu1_set_affinity(unsigned int irq
, const struct cpumask
*dest
)
302 int bit
= irq
- OCTEON_IRQ_WDOG0
; /* Bit 0-63 of EN1 */
304 write_lock(&octeon_irq_ciu1_rwlock
);
305 for_each_online_cpu(cpu
) {
306 int coreid
= cpu_logical_map(cpu
);
308 cvmx_read_csr(CVMX_CIU_INTX_EN1
310 if (cpumask_test_cpu(cpu
, dest
))
313 en1
&= ~(1ull << bit
);
314 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), en1
);
317 * We need to do a read after the last update to make sure all
320 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
321 write_unlock(&octeon_irq_ciu1_rwlock
);
327 static struct irq_chip octeon_irq_chip_ciu1
= {
329 .enable
= octeon_irq_ciu1_enable
,
330 .disable
= octeon_irq_ciu1_disable
,
331 .ack
= octeon_irq_ciu1_ack
,
332 .eoi
= octeon_irq_ciu1_eoi
,
334 .set_affinity
= octeon_irq_ciu1_set_affinity
,
338 #ifdef CONFIG_PCI_MSI
340 static void octeon_irq_msi_ack(unsigned int irq
)
342 if (!octeon_has_feature(OCTEON_FEATURE_PCIE
)) {
343 /* These chips have PCI */
344 cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV
,
345 1ull << (irq
- OCTEON_IRQ_MSI_BIT0
));
348 * These chips have PCIe. Thankfully the ACK doesn't
351 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0
,
352 1ull << (irq
- OCTEON_IRQ_MSI_BIT0
));
356 static void octeon_irq_msi_eoi(unsigned int irq
)
361 static void octeon_irq_msi_enable(unsigned int irq
)
363 if (!octeon_has_feature(OCTEON_FEATURE_PCIE
)) {
365 * Octeon PCI doesn't have the ability to mask/unmask
366 * MSI interrupts individually. Instead of
367 * masking/unmasking them in groups of 16, we simple
368 * assume MSI devices are well behaved. MSI
369 * interrupts are always enable and the ACK is assumed
373 /* These chips have PCIe. Note that we only support
374 * the first 64 MSI interrupts. Unfortunately all the
375 * MSI enables are in the same register. We use
376 * MSI0's lock to control access to them all.
380 spin_lock_irqsave(&octeon_irq_msi_lock
, flags
);
381 en
= cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
382 en
|= 1ull << (irq
- OCTEON_IRQ_MSI_BIT0
);
383 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0
, en
);
384 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
385 spin_unlock_irqrestore(&octeon_irq_msi_lock
, flags
);
389 static void octeon_irq_msi_disable(unsigned int irq
)
391 if (!octeon_has_feature(OCTEON_FEATURE_PCIE
)) {
392 /* See comment in enable */
395 * These chips have PCIe. Note that we only support
396 * the first 64 MSI interrupts. Unfortunately all the
397 * MSI enables are in the same register. We use
398 * MSI0's lock to control access to them all.
402 spin_lock_irqsave(&octeon_irq_msi_lock
, flags
);
403 en
= cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
404 en
&= ~(1ull << (irq
- OCTEON_IRQ_MSI_BIT0
));
405 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0
, en
);
406 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
407 spin_unlock_irqrestore(&octeon_irq_msi_lock
, flags
);
411 static struct irq_chip octeon_irq_chip_msi
= {
413 .enable
= octeon_irq_msi_enable
,
414 .disable
= octeon_irq_msi_disable
,
415 .ack
= octeon_irq_msi_ack
,
416 .eoi
= octeon_irq_msi_eoi
,
420 void __init
arch_init_irq(void)
425 /* Set the default affinity to the boot cpu. */
426 cpumask_clear(irq_default_affinity
);
427 cpumask_set_cpu(smp_processor_id(), irq_default_affinity
);
430 if (NR_IRQS
< OCTEON_IRQ_LAST
)
431 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
433 /* 0 - 15 reserved for i8259 master and slave controller. */
435 /* 17 - 23 Mips internal */
436 for (irq
= OCTEON_IRQ_SW0
; irq
<= OCTEON_IRQ_TIMER
; irq
++) {
437 set_irq_chip_and_handler(irq
, &octeon_irq_chip_core
,
441 /* 24 - 87 CIU_INT_SUM0 */
442 for (irq
= OCTEON_IRQ_WORKQ0
; irq
<= OCTEON_IRQ_BOOTDMA
; irq
++) {
443 set_irq_chip_and_handler(irq
, &octeon_irq_chip_ciu0
,
447 /* 88 - 151 CIU_INT_SUM1 */
448 for (irq
= OCTEON_IRQ_WDOG0
; irq
<= OCTEON_IRQ_RESERVED151
; irq
++) {
449 set_irq_chip_and_handler(irq
, &octeon_irq_chip_ciu1
,
453 #ifdef CONFIG_PCI_MSI
454 /* 152 - 215 PCI/PCIe MSI interrupts */
455 for (irq
= OCTEON_IRQ_MSI_BIT0
; irq
<= OCTEON_IRQ_MSI_BIT63
; irq
++) {
456 set_irq_chip_and_handler(irq
, &octeon_irq_chip_msi
,
460 set_c0_status(0x300 << 2);
463 asmlinkage
void plat_irq_dispatch(void)
465 const unsigned long core_id
= cvmx_get_core_num();
466 const uint64_t ciu_sum0_address
= CVMX_CIU_INTX_SUM0(core_id
* 2);
467 const uint64_t ciu_en0_address
= CVMX_CIU_INTX_EN0(core_id
* 2);
468 const uint64_t ciu_sum1_address
= CVMX_CIU_INT_SUM1
;
469 const uint64_t ciu_en1_address
= CVMX_CIU_INTX_EN1(core_id
* 2 + 1);
470 unsigned long cop0_cause
;
471 unsigned long cop0_status
;
476 cop0_cause
= read_c0_cause();
477 cop0_status
= read_c0_status();
478 cop0_cause
&= cop0_status
;
479 cop0_cause
&= ST0_IM
;
481 if (unlikely(cop0_cause
& STATUSF_IP2
)) {
482 ciu_sum
= cvmx_read_csr(ciu_sum0_address
);
483 ciu_en
= cvmx_read_csr(ciu_en0_address
);
486 do_IRQ(fls64(ciu_sum
) + OCTEON_IRQ_WORKQ0
- 1);
488 spurious_interrupt();
489 } else if (unlikely(cop0_cause
& STATUSF_IP3
)) {
490 ciu_sum
= cvmx_read_csr(ciu_sum1_address
);
491 ciu_en
= cvmx_read_csr(ciu_en1_address
);
494 do_IRQ(fls64(ciu_sum
) + OCTEON_IRQ_WDOG0
- 1);
496 spurious_interrupt();
497 } else if (likely(cop0_cause
)) {
498 do_IRQ(fls(cop0_cause
) - 9 + MIPS_CPU_IRQ_BASE
);
505 #ifdef CONFIG_HOTPLUG_CPU
506 static int is_irq_enabled_on_cpu(unsigned int irq
, unsigned int cpu
)
510 int coreid
= cpu_logical_map(cpu
);
512 int coreid
= cvmx_get_core_num();
514 int bit
= (irq
< OCTEON_IRQ_WDOG0
) ?
515 irq
- OCTEON_IRQ_WORKQ0
: irq
- OCTEON_IRQ_WDOG0
;
517 isset
= (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2)) &
518 (1ull << bit
)) >> bit
;
520 isset
= (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1)) &
521 (1ull << bit
)) >> bit
;
526 void fixup_irqs(void)
530 for (irq
= OCTEON_IRQ_SW0
; irq
<= OCTEON_IRQ_TIMER
; irq
++)
531 octeon_irq_core_disable_local(irq
);
533 for (irq
= OCTEON_IRQ_WORKQ0
; irq
<= OCTEON_IRQ_GPIO15
; irq
++) {
534 if (is_irq_enabled_on_cpu(irq
, smp_processor_id())) {
535 /* ciu irq migrates to next cpu */
536 octeon_irq_chip_ciu0
.disable(irq
);
537 octeon_irq_ciu0_set_affinity(irq
, &cpu_online_map
);
542 for (irq
= OCTEON_IRQ_MBOX0
; irq
<= OCTEON_IRQ_MBOX1
; irq
++)
543 octeon_irq_mailbox_mask(irq
);
545 for (irq
= OCTEON_IRQ_UART0
; irq
<= OCTEON_IRQ_BOOTDMA
; irq
++) {
546 if (is_irq_enabled_on_cpu(irq
, smp_processor_id())) {
547 /* ciu irq migrates to next cpu */
548 octeon_irq_chip_ciu0
.disable(irq
);
549 octeon_irq_ciu0_set_affinity(irq
, &cpu_online_map
);
553 for (irq
= OCTEON_IRQ_UART2
; irq
<= OCTEON_IRQ_RESERVED135
; irq
++) {
554 if (is_irq_enabled_on_cpu(irq
, smp_processor_id())) {
555 /* ciu irq migrates to next cpu */
556 octeon_irq_chip_ciu1
.disable(irq
);
557 octeon_irq_ciu1_set_affinity(irq
, &cpu_online_map
);
562 #endif /* CONFIG_HOTPLUG_CPU */