2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2014 Cavium, Inc.
9 #include <linux/of_address.h>
10 #include <linux/interrupt.h>
11 #include <linux/irqdomain.h>
12 #include <linux/bitops.h>
13 #include <linux/of_irq.h>
14 #include <linux/percpu.h>
15 #include <linux/slab.h>
16 #include <linux/irq.h>
17 #include <linux/smp.h>
20 #include <asm/octeon/octeon.h>
21 #include <asm/octeon/cvmx-ciu2-defs.h>
23 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror
);
24 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror
);
25 static DEFINE_PER_CPU(raw_spinlock_t
, octeon_irq_ciu_spinlock
);
27 struct octeon_irq_ciu_domain_data
{
28 int num_sum
; /* number of sum registers (2 or 3). */
31 static __read_mostly u8 octeon_irq_ciu_to_irq
[8][64];
33 struct octeon_ciu_chip_data
{
35 struct { /* only used for ciu3 */
39 struct { /* only used for ciu/ciu2 */
45 int current_cpu
; /* Next CPU expected to take this irq */
48 struct octeon_core_chip_data
{
49 struct mutex core_irq_mutex
;
55 #define MIPS_CORE_IRQ_LINES 8
57 static struct octeon_core_chip_data octeon_irq_core_chip_data
[MIPS_CORE_IRQ_LINES
];
59 static int octeon_irq_set_ciu_mapping(int irq
, int line
, int bit
, int gpio_line
,
60 struct irq_chip
*chip
,
61 irq_flow_handler_t handler
)
63 struct octeon_ciu_chip_data
*cd
;
65 cd
= kzalloc(sizeof(*cd
), GFP_KERNEL
);
69 irq_set_chip_and_handler(irq
, chip
, handler
);
73 cd
->gpio_line
= gpio_line
;
75 irq_set_chip_data(irq
, cd
);
76 octeon_irq_ciu_to_irq
[line
][bit
] = irq
;
80 static void octeon_irq_free_cd(struct irq_domain
*d
, unsigned int irq
)
82 struct irq_data
*data
= irq_get_irq_data(irq
);
83 struct octeon_ciu_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
85 irq_set_chip_data(irq
, NULL
);
89 static int octeon_irq_force_ciu_mapping(struct irq_domain
*domain
,
90 int irq
, int line
, int bit
)
92 return irq_domain_associate(domain
, irq
, line
<< 6 | bit
);
95 static int octeon_coreid_for_cpu(int cpu
)
98 return cpu_logical_map(cpu
);
100 return cvmx_get_core_num();
104 static int octeon_cpu_for_coreid(int coreid
)
107 return cpu_number_map(coreid
);
109 return smp_processor_id();
113 static void octeon_irq_core_ack(struct irq_data
*data
)
115 struct octeon_core_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
116 unsigned int bit
= cd
->bit
;
119 * We don't need to disable IRQs to make these atomic since
120 * they are already disabled earlier in the low level
123 clear_c0_status(0x100 << bit
);
124 /* The two user interrupts must be cleared manually. */
126 clear_c0_cause(0x100 << bit
);
129 static void octeon_irq_core_eoi(struct irq_data
*data
)
131 struct octeon_core_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
134 * We don't need to disable IRQs to make these atomic since
135 * they are already disabled earlier in the low level
138 set_c0_status(0x100 << cd
->bit
);
141 static void octeon_irq_core_set_enable_local(void *arg
)
143 struct irq_data
*data
= arg
;
144 struct octeon_core_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
145 unsigned int mask
= 0x100 << cd
->bit
;
148 * Interrupts are already disabled, so these are atomic.
153 clear_c0_status(mask
);
157 static void octeon_irq_core_disable(struct irq_data
*data
)
159 struct octeon_core_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
160 cd
->desired_en
= false;
163 static void octeon_irq_core_enable(struct irq_data
*data
)
165 struct octeon_core_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
166 cd
->desired_en
= true;
169 static void octeon_irq_core_bus_lock(struct irq_data
*data
)
171 struct octeon_core_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
173 mutex_lock(&cd
->core_irq_mutex
);
176 static void octeon_irq_core_bus_sync_unlock(struct irq_data
*data
)
178 struct octeon_core_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
180 if (cd
->desired_en
!= cd
->current_en
) {
181 on_each_cpu(octeon_irq_core_set_enable_local
, data
, 1);
183 cd
->current_en
= cd
->desired_en
;
186 mutex_unlock(&cd
->core_irq_mutex
);
189 static struct irq_chip octeon_irq_chip_core
= {
191 .irq_enable
= octeon_irq_core_enable
,
192 .irq_disable
= octeon_irq_core_disable
,
193 .irq_ack
= octeon_irq_core_ack
,
194 .irq_eoi
= octeon_irq_core_eoi
,
195 .irq_bus_lock
= octeon_irq_core_bus_lock
,
196 .irq_bus_sync_unlock
= octeon_irq_core_bus_sync_unlock
,
198 .irq_cpu_online
= octeon_irq_core_eoi
,
199 .irq_cpu_offline
= octeon_irq_core_ack
,
200 .flags
= IRQCHIP_ONOFFLINE_ENABLED
,
203 static void __init
octeon_irq_init_core(void)
207 struct octeon_core_chip_data
*cd
;
209 for (i
= 0; i
< MIPS_CORE_IRQ_LINES
; i
++) {
210 cd
= &octeon_irq_core_chip_data
[i
];
211 cd
->current_en
= false;
212 cd
->desired_en
= false;
214 mutex_init(&cd
->core_irq_mutex
);
216 irq
= OCTEON_IRQ_SW0
+ i
;
217 irq_set_chip_data(irq
, cd
);
218 irq_set_chip_and_handler(irq
, &octeon_irq_chip_core
,
223 static int next_cpu_for_irq(struct irq_data
*data
)
228 int weight
= cpumask_weight(data
->affinity
);
229 struct octeon_ciu_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
232 cpu
= cd
->current_cpu
;
234 cpu
= cpumask_next(cpu
, data
->affinity
);
235 if (cpu
>= nr_cpu_ids
) {
238 } else if (cpumask_test_cpu(cpu
, cpu_online_mask
)) {
242 } else if (weight
== 1) {
243 cpu
= cpumask_first(data
->affinity
);
245 cpu
= smp_processor_id();
247 cd
->current_cpu
= cpu
;
250 return smp_processor_id();
254 static void octeon_irq_ciu_enable(struct irq_data
*data
)
256 int cpu
= next_cpu_for_irq(data
);
257 int coreid
= octeon_coreid_for_cpu(cpu
);
260 struct octeon_ciu_chip_data
*cd
;
261 raw_spinlock_t
*lock
= &per_cpu(octeon_irq_ciu_spinlock
, cpu
);
263 cd
= irq_data_get_irq_chip_data(data
);
265 raw_spin_lock_irqsave(lock
, flags
);
267 pen
= &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
);
268 __set_bit(cd
->bit
, pen
);
270 * Must be visible to octeon_irq_ip{2,3}_ciu() before
274 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), *pen
);
276 pen
= &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
);
277 __set_bit(cd
->bit
, pen
);
279 * Must be visible to octeon_irq_ip{2,3}_ciu() before
283 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), *pen
);
285 raw_spin_unlock_irqrestore(lock
, flags
);
288 static void octeon_irq_ciu_enable_local(struct irq_data
*data
)
292 struct octeon_ciu_chip_data
*cd
;
293 raw_spinlock_t
*lock
= this_cpu_ptr(&octeon_irq_ciu_spinlock
);
295 cd
= irq_data_get_irq_chip_data(data
);
297 raw_spin_lock_irqsave(lock
, flags
);
299 pen
= this_cpu_ptr(&octeon_irq_ciu0_en_mirror
);
300 __set_bit(cd
->bit
, pen
);
302 * Must be visible to octeon_irq_ip{2,3}_ciu() before
306 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen
);
308 pen
= this_cpu_ptr(&octeon_irq_ciu1_en_mirror
);
309 __set_bit(cd
->bit
, pen
);
311 * Must be visible to octeon_irq_ip{2,3}_ciu() before
315 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen
);
317 raw_spin_unlock_irqrestore(lock
, flags
);
320 static void octeon_irq_ciu_disable_local(struct irq_data
*data
)
324 struct octeon_ciu_chip_data
*cd
;
325 raw_spinlock_t
*lock
= this_cpu_ptr(&octeon_irq_ciu_spinlock
);
327 cd
= irq_data_get_irq_chip_data(data
);
329 raw_spin_lock_irqsave(lock
, flags
);
331 pen
= this_cpu_ptr(&octeon_irq_ciu0_en_mirror
);
332 __clear_bit(cd
->bit
, pen
);
334 * Must be visible to octeon_irq_ip{2,3}_ciu() before
338 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen
);
340 pen
= this_cpu_ptr(&octeon_irq_ciu1_en_mirror
);
341 __clear_bit(cd
->bit
, pen
);
343 * Must be visible to octeon_irq_ip{2,3}_ciu() before
347 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen
);
349 raw_spin_unlock_irqrestore(lock
, flags
);
352 static void octeon_irq_ciu_disable_all(struct irq_data
*data
)
357 struct octeon_ciu_chip_data
*cd
;
358 raw_spinlock_t
*lock
;
360 cd
= irq_data_get_irq_chip_data(data
);
362 for_each_online_cpu(cpu
) {
363 int coreid
= octeon_coreid_for_cpu(cpu
);
364 lock
= &per_cpu(octeon_irq_ciu_spinlock
, cpu
);
366 pen
= &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
);
368 pen
= &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
);
370 raw_spin_lock_irqsave(lock
, flags
);
371 __clear_bit(cd
->bit
, pen
);
373 * Must be visible to octeon_irq_ip{2,3}_ciu() before
378 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), *pen
);
380 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), *pen
);
381 raw_spin_unlock_irqrestore(lock
, flags
);
385 static void octeon_irq_ciu_enable_all(struct irq_data
*data
)
390 struct octeon_ciu_chip_data
*cd
;
391 raw_spinlock_t
*lock
;
393 cd
= irq_data_get_irq_chip_data(data
);
395 for_each_online_cpu(cpu
) {
396 int coreid
= octeon_coreid_for_cpu(cpu
);
397 lock
= &per_cpu(octeon_irq_ciu_spinlock
, cpu
);
399 pen
= &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
);
401 pen
= &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
);
403 raw_spin_lock_irqsave(lock
, flags
);
404 __set_bit(cd
->bit
, pen
);
406 * Must be visible to octeon_irq_ip{2,3}_ciu() before
411 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), *pen
);
413 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), *pen
);
414 raw_spin_unlock_irqrestore(lock
, flags
);
419 * Enable the irq on the next core in the affinity set for chips that
420 * have the EN*_W1{S,C} registers.
422 static void octeon_irq_ciu_enable_v2(struct irq_data
*data
)
425 int cpu
= next_cpu_for_irq(data
);
426 struct octeon_ciu_chip_data
*cd
;
428 cd
= irq_data_get_irq_chip_data(data
);
429 mask
= 1ull << (cd
->bit
);
432 * Called under the desc lock, so these should never get out
436 int index
= octeon_coreid_for_cpu(cpu
) * 2;
437 set_bit(cd
->bit
, &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
));
438 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index
), mask
);
440 int index
= octeon_coreid_for_cpu(cpu
) * 2 + 1;
441 set_bit(cd
->bit
, &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
));
442 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index
), mask
);
447 * Enable the irq in the sum2 registers.
449 static void octeon_irq_ciu_enable_sum2(struct irq_data
*data
)
452 int cpu
= next_cpu_for_irq(data
);
453 int index
= octeon_coreid_for_cpu(cpu
);
454 struct octeon_ciu_chip_data
*cd
;
456 cd
= irq_data_get_irq_chip_data(data
);
457 mask
= 1ull << (cd
->bit
);
459 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index
), mask
);
463 * Disable the irq in the sum2 registers.
465 static void octeon_irq_ciu_disable_local_sum2(struct irq_data
*data
)
468 int cpu
= next_cpu_for_irq(data
);
469 int index
= octeon_coreid_for_cpu(cpu
);
470 struct octeon_ciu_chip_data
*cd
;
472 cd
= irq_data_get_irq_chip_data(data
);
473 mask
= 1ull << (cd
->bit
);
475 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index
), mask
);
478 static void octeon_irq_ciu_ack_sum2(struct irq_data
*data
)
481 int cpu
= next_cpu_for_irq(data
);
482 int index
= octeon_coreid_for_cpu(cpu
);
483 struct octeon_ciu_chip_data
*cd
;
485 cd
= irq_data_get_irq_chip_data(data
);
486 mask
= 1ull << (cd
->bit
);
488 cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index
), mask
);
491 static void octeon_irq_ciu_disable_all_sum2(struct irq_data
*data
)
494 struct octeon_ciu_chip_data
*cd
;
497 cd
= irq_data_get_irq_chip_data(data
);
498 mask
= 1ull << (cd
->bit
);
500 for_each_online_cpu(cpu
) {
501 int coreid
= octeon_coreid_for_cpu(cpu
);
503 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid
), mask
);
508 * Enable the irq on the current CPU for chips that
509 * have the EN*_W1{S,C} registers.
511 static void octeon_irq_ciu_enable_local_v2(struct irq_data
*data
)
514 struct octeon_ciu_chip_data
*cd
;
516 cd
= irq_data_get_irq_chip_data(data
);
517 mask
= 1ull << (cd
->bit
);
520 int index
= cvmx_get_core_num() * 2;
521 set_bit(cd
->bit
, this_cpu_ptr(&octeon_irq_ciu0_en_mirror
));
522 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index
), mask
);
524 int index
= cvmx_get_core_num() * 2 + 1;
525 set_bit(cd
->bit
, this_cpu_ptr(&octeon_irq_ciu1_en_mirror
));
526 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index
), mask
);
530 static void octeon_irq_ciu_disable_local_v2(struct irq_data
*data
)
533 struct octeon_ciu_chip_data
*cd
;
535 cd
= irq_data_get_irq_chip_data(data
);
536 mask
= 1ull << (cd
->bit
);
539 int index
= cvmx_get_core_num() * 2;
540 clear_bit(cd
->bit
, this_cpu_ptr(&octeon_irq_ciu0_en_mirror
));
541 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index
), mask
);
543 int index
= cvmx_get_core_num() * 2 + 1;
544 clear_bit(cd
->bit
, this_cpu_ptr(&octeon_irq_ciu1_en_mirror
));
545 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index
), mask
);
550 * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
552 static void octeon_irq_ciu_ack(struct irq_data
*data
)
555 struct octeon_ciu_chip_data
*cd
;
557 cd
= irq_data_get_irq_chip_data(data
);
558 mask
= 1ull << (cd
->bit
);
561 int index
= cvmx_get_core_num() * 2;
562 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index
), mask
);
564 cvmx_write_csr(CVMX_CIU_INT_SUM1
, mask
);
569 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
572 static void octeon_irq_ciu_disable_all_v2(struct irq_data
*data
)
576 struct octeon_ciu_chip_data
*cd
;
578 cd
= irq_data_get_irq_chip_data(data
);
579 mask
= 1ull << (cd
->bit
);
582 for_each_online_cpu(cpu
) {
583 int index
= octeon_coreid_for_cpu(cpu
) * 2;
585 &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
));
586 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index
), mask
);
589 for_each_online_cpu(cpu
) {
590 int index
= octeon_coreid_for_cpu(cpu
) * 2 + 1;
592 &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
));
593 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index
), mask
);
599 * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
602 static void octeon_irq_ciu_enable_all_v2(struct irq_data
*data
)
606 struct octeon_ciu_chip_data
*cd
;
608 cd
= irq_data_get_irq_chip_data(data
);
609 mask
= 1ull << (cd
->bit
);
612 for_each_online_cpu(cpu
) {
613 int index
= octeon_coreid_for_cpu(cpu
) * 2;
615 &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
));
616 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index
), mask
);
619 for_each_online_cpu(cpu
) {
620 int index
= octeon_coreid_for_cpu(cpu
) * 2 + 1;
622 &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
));
623 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index
), mask
);
628 static void octeon_irq_gpio_setup(struct irq_data
*data
)
630 union cvmx_gpio_bit_cfgx cfg
;
631 struct octeon_ciu_chip_data
*cd
;
632 u32 t
= irqd_get_trigger_type(data
);
634 cd
= irq_data_get_irq_chip_data(data
);
638 cfg
.s
.int_type
= (t
& IRQ_TYPE_EDGE_BOTH
) != 0;
639 cfg
.s
.rx_xor
= (t
& (IRQ_TYPE_LEVEL_LOW
| IRQ_TYPE_EDGE_FALLING
)) != 0;
641 /* 140 nS glitch filter*/
645 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd
->gpio_line
), cfg
.u64
);
648 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data
*data
)
650 octeon_irq_gpio_setup(data
);
651 octeon_irq_ciu_enable_v2(data
);
654 static void octeon_irq_ciu_enable_gpio(struct irq_data
*data
)
656 octeon_irq_gpio_setup(data
);
657 octeon_irq_ciu_enable(data
);
660 static int octeon_irq_ciu_gpio_set_type(struct irq_data
*data
, unsigned int t
)
662 irqd_set_trigger_type(data
, t
);
663 octeon_irq_gpio_setup(data
);
665 return IRQ_SET_MASK_OK
;
668 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data
*data
)
670 struct octeon_ciu_chip_data
*cd
;
672 cd
= irq_data_get_irq_chip_data(data
);
673 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd
->gpio_line
), 0);
675 octeon_irq_ciu_disable_all_v2(data
);
678 static void octeon_irq_ciu_disable_gpio(struct irq_data
*data
)
680 struct octeon_ciu_chip_data
*cd
;
682 cd
= irq_data_get_irq_chip_data(data
);
683 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd
->gpio_line
), 0);
685 octeon_irq_ciu_disable_all(data
);
688 static void octeon_irq_ciu_gpio_ack(struct irq_data
*data
)
690 struct octeon_ciu_chip_data
*cd
;
693 cd
= irq_data_get_irq_chip_data(data
);
694 mask
= 1ull << (cd
->gpio_line
);
696 cvmx_write_csr(CVMX_GPIO_INT_CLR
, mask
);
699 static void octeon_irq_handle_trigger(unsigned int irq
, struct irq_desc
*desc
)
701 if (irq_get_trigger_type(irq
) & IRQ_TYPE_EDGE_BOTH
)
702 handle_edge_irq(irq
, desc
);
704 handle_level_irq(irq
, desc
);
709 static void octeon_irq_cpu_offline_ciu(struct irq_data
*data
)
711 int cpu
= smp_processor_id();
712 cpumask_t new_affinity
;
714 if (!cpumask_test_cpu(cpu
, data
->affinity
))
717 if (cpumask_weight(data
->affinity
) > 1) {
719 * It has multi CPU affinity, just remove this CPU
720 * from the affinity set.
722 cpumask_copy(&new_affinity
, data
->affinity
);
723 cpumask_clear_cpu(cpu
, &new_affinity
);
725 /* Otherwise, put it on lowest numbered online CPU. */
726 cpumask_clear(&new_affinity
);
727 cpumask_set_cpu(cpumask_first(cpu_online_mask
), &new_affinity
);
729 irq_set_affinity_locked(data
, &new_affinity
, false);
732 static int octeon_irq_ciu_set_affinity(struct irq_data
*data
,
733 const struct cpumask
*dest
, bool force
)
736 bool enable_one
= !irqd_irq_disabled(data
) && !irqd_irq_masked(data
);
738 struct octeon_ciu_chip_data
*cd
;
740 raw_spinlock_t
*lock
;
742 cd
= irq_data_get_irq_chip_data(data
);
745 * For non-v2 CIU, we will allow only single CPU affinity.
746 * This removes the need to do locking in the .ack/.eoi
749 if (cpumask_weight(dest
) != 1)
756 for_each_online_cpu(cpu
) {
757 int coreid
= octeon_coreid_for_cpu(cpu
);
759 lock
= &per_cpu(octeon_irq_ciu_spinlock
, cpu
);
760 raw_spin_lock_irqsave(lock
, flags
);
763 pen
= &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
);
765 pen
= &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
);
767 if (cpumask_test_cpu(cpu
, dest
) && enable_one
) {
769 __set_bit(cd
->bit
, pen
);
771 __clear_bit(cd
->bit
, pen
);
774 * Must be visible to octeon_irq_ip{2,3}_ciu() before
780 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), *pen
);
782 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), *pen
);
784 raw_spin_unlock_irqrestore(lock
, flags
);
790 * Set affinity for the irq for chips that have the EN*_W1{S,C}
793 static int octeon_irq_ciu_set_affinity_v2(struct irq_data
*data
,
794 const struct cpumask
*dest
,
798 bool enable_one
= !irqd_irq_disabled(data
) && !irqd_irq_masked(data
);
800 struct octeon_ciu_chip_data
*cd
;
805 cd
= irq_data_get_irq_chip_data(data
);
806 mask
= 1ull << cd
->bit
;
809 for_each_online_cpu(cpu
) {
810 unsigned long *pen
= &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
);
811 int index
= octeon_coreid_for_cpu(cpu
) * 2;
812 if (cpumask_test_cpu(cpu
, dest
) && enable_one
) {
814 set_bit(cd
->bit
, pen
);
815 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index
), mask
);
817 clear_bit(cd
->bit
, pen
);
818 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index
), mask
);
822 for_each_online_cpu(cpu
) {
823 unsigned long *pen
= &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
);
824 int index
= octeon_coreid_for_cpu(cpu
) * 2 + 1;
825 if (cpumask_test_cpu(cpu
, dest
) && enable_one
) {
827 set_bit(cd
->bit
, pen
);
828 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index
), mask
);
830 clear_bit(cd
->bit
, pen
);
831 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index
), mask
);
838 static int octeon_irq_ciu_set_affinity_sum2(struct irq_data
*data
,
839 const struct cpumask
*dest
,
843 bool enable_one
= !irqd_irq_disabled(data
) && !irqd_irq_masked(data
);
845 struct octeon_ciu_chip_data
*cd
;
850 cd
= irq_data_get_irq_chip_data(data
);
851 mask
= 1ull << cd
->bit
;
853 for_each_online_cpu(cpu
) {
854 int index
= octeon_coreid_for_cpu(cpu
);
856 if (cpumask_test_cpu(cpu
, dest
) && enable_one
) {
858 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index
), mask
);
860 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index
), mask
);
868 * Newer octeon chips have support for lockless CIU operation.
870 static struct irq_chip octeon_irq_chip_ciu_v2
= {
872 .irq_enable
= octeon_irq_ciu_enable_v2
,
873 .irq_disable
= octeon_irq_ciu_disable_all_v2
,
874 .irq_mask
= octeon_irq_ciu_disable_local_v2
,
875 .irq_unmask
= octeon_irq_ciu_enable_v2
,
877 .irq_set_affinity
= octeon_irq_ciu_set_affinity_v2
,
878 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
882 static struct irq_chip octeon_irq_chip_ciu_v2_edge
= {
884 .irq_enable
= octeon_irq_ciu_enable_v2
,
885 .irq_disable
= octeon_irq_ciu_disable_all_v2
,
886 .irq_ack
= octeon_irq_ciu_ack
,
887 .irq_mask
= octeon_irq_ciu_disable_local_v2
,
888 .irq_unmask
= octeon_irq_ciu_enable_v2
,
890 .irq_set_affinity
= octeon_irq_ciu_set_affinity_v2
,
891 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
896 * Newer octeon chips have support for lockless CIU operation.
898 static struct irq_chip octeon_irq_chip_ciu_sum2
= {
900 .irq_enable
= octeon_irq_ciu_enable_sum2
,
901 .irq_disable
= octeon_irq_ciu_disable_all_sum2
,
902 .irq_mask
= octeon_irq_ciu_disable_local_sum2
,
903 .irq_unmask
= octeon_irq_ciu_enable_sum2
,
905 .irq_set_affinity
= octeon_irq_ciu_set_affinity_sum2
,
906 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
910 static struct irq_chip octeon_irq_chip_ciu_sum2_edge
= {
912 .irq_enable
= octeon_irq_ciu_enable_sum2
,
913 .irq_disable
= octeon_irq_ciu_disable_all_sum2
,
914 .irq_ack
= octeon_irq_ciu_ack_sum2
,
915 .irq_mask
= octeon_irq_ciu_disable_local_sum2
,
916 .irq_unmask
= octeon_irq_ciu_enable_sum2
,
918 .irq_set_affinity
= octeon_irq_ciu_set_affinity_sum2
,
919 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
923 static struct irq_chip octeon_irq_chip_ciu
= {
925 .irq_enable
= octeon_irq_ciu_enable
,
926 .irq_disable
= octeon_irq_ciu_disable_all
,
927 .irq_mask
= octeon_irq_ciu_disable_local
,
928 .irq_unmask
= octeon_irq_ciu_enable
,
930 .irq_set_affinity
= octeon_irq_ciu_set_affinity
,
931 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
935 static struct irq_chip octeon_irq_chip_ciu_edge
= {
937 .irq_enable
= octeon_irq_ciu_enable
,
938 .irq_disable
= octeon_irq_ciu_disable_all
,
939 .irq_ack
= octeon_irq_ciu_ack
,
940 .irq_mask
= octeon_irq_ciu_disable_local
,
941 .irq_unmask
= octeon_irq_ciu_enable
,
943 .irq_set_affinity
= octeon_irq_ciu_set_affinity
,
944 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
948 /* The mbox versions don't do any affinity or round-robin. */
949 static struct irq_chip octeon_irq_chip_ciu_mbox_v2
= {
951 .irq_enable
= octeon_irq_ciu_enable_all_v2
,
952 .irq_disable
= octeon_irq_ciu_disable_all_v2
,
953 .irq_ack
= octeon_irq_ciu_disable_local_v2
,
954 .irq_eoi
= octeon_irq_ciu_enable_local_v2
,
956 .irq_cpu_online
= octeon_irq_ciu_enable_local_v2
,
957 .irq_cpu_offline
= octeon_irq_ciu_disable_local_v2
,
958 .flags
= IRQCHIP_ONOFFLINE_ENABLED
,
961 static struct irq_chip octeon_irq_chip_ciu_mbox
= {
963 .irq_enable
= octeon_irq_ciu_enable_all
,
964 .irq_disable
= octeon_irq_ciu_disable_all
,
965 .irq_ack
= octeon_irq_ciu_disable_local
,
966 .irq_eoi
= octeon_irq_ciu_enable_local
,
968 .irq_cpu_online
= octeon_irq_ciu_enable_local
,
969 .irq_cpu_offline
= octeon_irq_ciu_disable_local
,
970 .flags
= IRQCHIP_ONOFFLINE_ENABLED
,
973 static struct irq_chip octeon_irq_chip_ciu_gpio_v2
= {
975 .irq_enable
= octeon_irq_ciu_enable_gpio_v2
,
976 .irq_disable
= octeon_irq_ciu_disable_gpio_v2
,
977 .irq_ack
= octeon_irq_ciu_gpio_ack
,
978 .irq_mask
= octeon_irq_ciu_disable_local_v2
,
979 .irq_unmask
= octeon_irq_ciu_enable_v2
,
980 .irq_set_type
= octeon_irq_ciu_gpio_set_type
,
982 .irq_set_affinity
= octeon_irq_ciu_set_affinity_v2
,
983 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
985 .flags
= IRQCHIP_SET_TYPE_MASKED
,
988 static struct irq_chip octeon_irq_chip_ciu_gpio
= {
990 .irq_enable
= octeon_irq_ciu_enable_gpio
,
991 .irq_disable
= octeon_irq_ciu_disable_gpio
,
992 .irq_mask
= octeon_irq_ciu_disable_local
,
993 .irq_unmask
= octeon_irq_ciu_enable
,
994 .irq_ack
= octeon_irq_ciu_gpio_ack
,
995 .irq_set_type
= octeon_irq_ciu_gpio_set_type
,
997 .irq_set_affinity
= octeon_irq_ciu_set_affinity
,
998 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
1000 .flags
= IRQCHIP_SET_TYPE_MASKED
,
1004 * Watchdog interrupts are special. They are associated with a single
1005 * core, so we hardwire the affinity to that core.
1007 static void octeon_irq_ciu_wd_enable(struct irq_data
*data
)
1009 unsigned long flags
;
1011 int coreid
= data
->irq
- OCTEON_IRQ_WDOG0
; /* Bit 0-63 of EN1 */
1012 int cpu
= octeon_cpu_for_coreid(coreid
);
1013 raw_spinlock_t
*lock
= &per_cpu(octeon_irq_ciu_spinlock
, cpu
);
1015 raw_spin_lock_irqsave(lock
, flags
);
1016 pen
= &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
);
1017 __set_bit(coreid
, pen
);
1019 * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
1023 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), *pen
);
1024 raw_spin_unlock_irqrestore(lock
, flags
);
1028 * Watchdog interrupts are special. They are associated with a single
1029 * core, so we hardwire the affinity to that core.
1031 static void octeon_irq_ciu1_wd_enable_v2(struct irq_data
*data
)
1033 int coreid
= data
->irq
- OCTEON_IRQ_WDOG0
;
1034 int cpu
= octeon_cpu_for_coreid(coreid
);
1036 set_bit(coreid
, &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
));
1037 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid
* 2 + 1), 1ull << coreid
);
1041 static struct irq_chip octeon_irq_chip_ciu_wd_v2
= {
1043 .irq_enable
= octeon_irq_ciu1_wd_enable_v2
,
1044 .irq_disable
= octeon_irq_ciu_disable_all_v2
,
1045 .irq_mask
= octeon_irq_ciu_disable_local_v2
,
1046 .irq_unmask
= octeon_irq_ciu_enable_local_v2
,
1049 static struct irq_chip octeon_irq_chip_ciu_wd
= {
1051 .irq_enable
= octeon_irq_ciu_wd_enable
,
1052 .irq_disable
= octeon_irq_ciu_disable_all
,
1053 .irq_mask
= octeon_irq_ciu_disable_local
,
1054 .irq_unmask
= octeon_irq_ciu_enable_local
,
1057 static bool octeon_irq_ciu_is_edge(unsigned int line
, unsigned int bit
)
1063 case 48 ... 49: /* GMX DRP */
1064 case 50: /* IPD_DRP */
1065 case 52 ... 55: /* Timers */
1072 else /* line == 1 */
1083 struct octeon_irq_gpio_domain_data
{
1084 unsigned int base_hwirq
;
1087 static int octeon_irq_gpio_xlat(struct irq_domain
*d
,
1088 struct device_node
*node
,
1090 unsigned int intsize
,
1091 unsigned long *out_hwirq
,
1092 unsigned int *out_type
)
1096 unsigned int trigger
;
1098 if (d
->of_node
!= node
)
1108 trigger
= intspec
[1];
1112 type
= IRQ_TYPE_EDGE_RISING
;
1115 type
= IRQ_TYPE_EDGE_FALLING
;
1118 type
= IRQ_TYPE_LEVEL_HIGH
;
1121 type
= IRQ_TYPE_LEVEL_LOW
;
1124 pr_err("Error: (%s) Invalid irq trigger specification: %x\n",
1127 type
= IRQ_TYPE_LEVEL_LOW
;
1136 static int octeon_irq_ciu_xlat(struct irq_domain
*d
,
1137 struct device_node
*node
,
1139 unsigned int intsize
,
1140 unsigned long *out_hwirq
,
1141 unsigned int *out_type
)
1143 unsigned int ciu
, bit
;
1144 struct octeon_irq_ciu_domain_data
*dd
= d
->host_data
;
1149 if (ciu
>= dd
->num_sum
|| bit
> 63)
1152 *out_hwirq
= (ciu
<< 6) | bit
;
1158 static struct irq_chip
*octeon_irq_ciu_chip
;
1159 static struct irq_chip
*octeon_irq_ciu_chip_edge
;
1160 static struct irq_chip
*octeon_irq_gpio_chip
;
1162 static bool octeon_irq_virq_in_range(unsigned int virq
)
1164 /* We cannot let it overflow the mapping array. */
1165 if (virq
< (1ul << 8 * sizeof(octeon_irq_ciu_to_irq
[0][0])))
1168 WARN_ONCE(true, "virq out of range %u.\n", virq
);
1172 static int octeon_irq_ciu_map(struct irq_domain
*d
,
1173 unsigned int virq
, irq_hw_number_t hw
)
1176 unsigned int line
= hw
>> 6;
1177 unsigned int bit
= hw
& 63;
1178 struct octeon_irq_ciu_domain_data
*dd
= d
->host_data
;
1180 if (!octeon_irq_virq_in_range(virq
))
1183 /* Don't map irq if it is reserved for GPIO. */
1184 if (line
== 0 && bit
>= 16 && bit
<32)
1187 if (line
>= dd
->num_sum
|| octeon_irq_ciu_to_irq
[line
][bit
] != 0)
1191 if (octeon_irq_ciu_is_edge(line
, bit
))
1192 rv
= octeon_irq_set_ciu_mapping(virq
, line
, bit
, 0,
1193 &octeon_irq_chip_ciu_sum2_edge
,
1196 rv
= octeon_irq_set_ciu_mapping(virq
, line
, bit
, 0,
1197 &octeon_irq_chip_ciu_sum2
,
1200 if (octeon_irq_ciu_is_edge(line
, bit
))
1201 rv
= octeon_irq_set_ciu_mapping(virq
, line
, bit
, 0,
1202 octeon_irq_ciu_chip_edge
,
1205 rv
= octeon_irq_set_ciu_mapping(virq
, line
, bit
, 0,
1206 octeon_irq_ciu_chip
,
1212 static int octeon_irq_gpio_map(struct irq_domain
*d
,
1213 unsigned int virq
, irq_hw_number_t hw
)
1215 struct octeon_irq_gpio_domain_data
*gpiod
= d
->host_data
;
1216 unsigned int line
, bit
;
1219 if (!octeon_irq_virq_in_range(virq
))
1222 line
= (hw
+ gpiod
->base_hwirq
) >> 6;
1223 bit
= (hw
+ gpiod
->base_hwirq
) & 63;
1224 if (line
> ARRAY_SIZE(octeon_irq_ciu_to_irq
) ||
1225 octeon_irq_ciu_to_irq
[line
][bit
] != 0)
1228 r
= octeon_irq_set_ciu_mapping(virq
, line
, bit
, hw
,
1229 octeon_irq_gpio_chip
, octeon_irq_handle_trigger
);
1233 static struct irq_domain_ops octeon_irq_domain_ciu_ops
= {
1234 .map
= octeon_irq_ciu_map
,
1235 .unmap
= octeon_irq_free_cd
,
1236 .xlate
= octeon_irq_ciu_xlat
,
1239 static struct irq_domain_ops octeon_irq_domain_gpio_ops
= {
1240 .map
= octeon_irq_gpio_map
,
1241 .unmap
= octeon_irq_free_cd
,
1242 .xlate
= octeon_irq_gpio_xlat
,
1245 static void octeon_irq_ip2_ciu(void)
1247 const unsigned long core_id
= cvmx_get_core_num();
1248 u64 ciu_sum
= cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id
* 2));
1250 ciu_sum
&= __this_cpu_read(octeon_irq_ciu0_en_mirror
);
1251 if (likely(ciu_sum
)) {
1252 int bit
= fls64(ciu_sum
) - 1;
1253 int irq
= octeon_irq_ciu_to_irq
[0][bit
];
1257 spurious_interrupt();
1259 spurious_interrupt();
1263 static void octeon_irq_ip3_ciu(void)
1265 u64 ciu_sum
= cvmx_read_csr(CVMX_CIU_INT_SUM1
);
1267 ciu_sum
&= __this_cpu_read(octeon_irq_ciu1_en_mirror
);
1268 if (likely(ciu_sum
)) {
1269 int bit
= fls64(ciu_sum
) - 1;
1270 int irq
= octeon_irq_ciu_to_irq
[1][bit
];
1274 spurious_interrupt();
1276 spurious_interrupt();
1280 static void octeon_irq_ip4_ciu(void)
1282 int coreid
= cvmx_get_core_num();
1283 u64 ciu_sum
= cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid
));
1284 u64 ciu_en
= cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid
));
1287 if (likely(ciu_sum
)) {
1288 int bit
= fls64(ciu_sum
) - 1;
1289 int irq
= octeon_irq_ciu_to_irq
[2][bit
];
1294 spurious_interrupt();
1296 spurious_interrupt();
1300 static bool octeon_irq_use_ip4
;
1302 static void octeon_irq_local_enable_ip4(void *arg
)
1304 set_c0_status(STATUSF_IP4
);
1307 static void octeon_irq_ip4_mask(void)
1309 clear_c0_status(STATUSF_IP4
);
1310 spurious_interrupt();
1313 static void (*octeon_irq_ip2
)(void);
1314 static void (*octeon_irq_ip3
)(void);
1315 static void (*octeon_irq_ip4
)(void);
1317 void (*octeon_irq_setup_secondary
)(void);
1319 void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h
)
1322 octeon_irq_use_ip4
= true;
1323 on_each_cpu(octeon_irq_local_enable_ip4
, NULL
, 1);
1326 static void octeon_irq_percpu_enable(void)
1331 static void octeon_irq_init_ciu_percpu(void)
1333 int coreid
= cvmx_get_core_num();
1336 __this_cpu_write(octeon_irq_ciu0_en_mirror
, 0);
1337 __this_cpu_write(octeon_irq_ciu1_en_mirror
, 0);
1339 raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock
));
1341 * Disable All CIU Interrupts. The ones we need will be
1342 * enabled later. Read the SUM register so we know the write
1345 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid
* 2)), 0);
1346 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid
* 2 + 1)), 0);
1347 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid
* 2)), 0);
1348 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid
* 2 + 1)), 0);
1349 cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid
* 2)));
1352 static void octeon_irq_init_ciu2_percpu(void)
1355 int coreid
= cvmx_get_core_num();
1356 u64 base
= CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid
);
1359 * Disable All CIU2 Interrupts. The ones we need will be
1360 * enabled later. Read the SUM register so we know the write
1363 * There are 9 registers and 3 IPX levels with strides 0x1000
1364 * and 0x200 respectivly. Use loops to clear them.
1366 for (regx
= 0; regx
<= 0x8000; regx
+= 0x1000) {
1367 for (ipx
= 0; ipx
<= 0x400; ipx
+= 0x200)
1368 cvmx_write_csr(base
+ regx
+ ipx
, 0);
1371 cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid
));
1374 static void octeon_irq_setup_secondary_ciu(void)
1376 octeon_irq_init_ciu_percpu();
1377 octeon_irq_percpu_enable();
1379 /* Enable the CIU lines */
1380 set_c0_status(STATUSF_IP3
| STATUSF_IP2
);
1381 if (octeon_irq_use_ip4
)
1382 set_c0_status(STATUSF_IP4
);
1384 clear_c0_status(STATUSF_IP4
);
1387 static void octeon_irq_setup_secondary_ciu2(void)
1389 octeon_irq_init_ciu2_percpu();
1390 octeon_irq_percpu_enable();
1392 /* Enable the CIU lines */
1393 set_c0_status(STATUSF_IP3
| STATUSF_IP2
);
1394 if (octeon_irq_use_ip4
)
1395 set_c0_status(STATUSF_IP4
);
1397 clear_c0_status(STATUSF_IP4
);
1400 static int __init
octeon_irq_init_ciu(
1401 struct device_node
*ciu_node
, struct device_node
*parent
)
1404 struct irq_chip
*chip
;
1405 struct irq_chip
*chip_edge
;
1406 struct irq_chip
*chip_mbox
;
1407 struct irq_chip
*chip_wd
;
1408 struct irq_domain
*ciu_domain
= NULL
;
1409 struct octeon_irq_ciu_domain_data
*dd
;
1411 dd
= kzalloc(sizeof(*dd
), GFP_KERNEL
);
1415 octeon_irq_init_ciu_percpu();
1416 octeon_irq_setup_secondary
= octeon_irq_setup_secondary_ciu
;
1418 octeon_irq_ip2
= octeon_irq_ip2_ciu
;
1419 octeon_irq_ip3
= octeon_irq_ip3_ciu
;
1420 if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
1421 && !OCTEON_IS_MODEL(OCTEON_CN63XX
)) {
1422 octeon_irq_ip4
= octeon_irq_ip4_ciu
;
1424 octeon_irq_use_ip4
= true;
1426 octeon_irq_ip4
= octeon_irq_ip4_mask
;
1428 octeon_irq_use_ip4
= false;
1430 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X
) ||
1431 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X
) ||
1432 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X
) ||
1433 OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
1434 chip
= &octeon_irq_chip_ciu_v2
;
1435 chip_edge
= &octeon_irq_chip_ciu_v2_edge
;
1436 chip_mbox
= &octeon_irq_chip_ciu_mbox_v2
;
1437 chip_wd
= &octeon_irq_chip_ciu_wd_v2
;
1438 octeon_irq_gpio_chip
= &octeon_irq_chip_ciu_gpio_v2
;
1440 chip
= &octeon_irq_chip_ciu
;
1441 chip_edge
= &octeon_irq_chip_ciu_edge
;
1442 chip_mbox
= &octeon_irq_chip_ciu_mbox
;
1443 chip_wd
= &octeon_irq_chip_ciu_wd
;
1444 octeon_irq_gpio_chip
= &octeon_irq_chip_ciu_gpio
;
1446 octeon_irq_ciu_chip
= chip
;
1447 octeon_irq_ciu_chip_edge
= chip_edge
;
1450 octeon_irq_init_core();
1452 ciu_domain
= irq_domain_add_tree(
1453 ciu_node
, &octeon_irq_domain_ciu_ops
, dd
);
1454 irq_set_default_host(ciu_domain
);
1457 for (i
= 0; i
< 16; i
++) {
1458 r
= octeon_irq_force_ciu_mapping(
1459 ciu_domain
, i
+ OCTEON_IRQ_WORKQ0
, 0, i
+ 0);
1464 r
= octeon_irq_set_ciu_mapping(
1465 OCTEON_IRQ_MBOX0
, 0, 32, 0, chip_mbox
, handle_percpu_irq
);
1468 r
= octeon_irq_set_ciu_mapping(
1469 OCTEON_IRQ_MBOX1
, 0, 33, 0, chip_mbox
, handle_percpu_irq
);
1473 for (i
= 0; i
< 4; i
++) {
1474 r
= octeon_irq_force_ciu_mapping(
1475 ciu_domain
, i
+ OCTEON_IRQ_PCI_INT0
, 0, i
+ 36);
1479 for (i
= 0; i
< 4; i
++) {
1480 r
= octeon_irq_force_ciu_mapping(
1481 ciu_domain
, i
+ OCTEON_IRQ_PCI_MSI0
, 0, i
+ 40);
1486 r
= octeon_irq_force_ciu_mapping(ciu_domain
, OCTEON_IRQ_TWSI
, 0, 45);
1490 r
= octeon_irq_force_ciu_mapping(ciu_domain
, OCTEON_IRQ_RML
, 0, 46);
1494 for (i
= 0; i
< 4; i
++) {
1495 r
= octeon_irq_force_ciu_mapping(
1496 ciu_domain
, i
+ OCTEON_IRQ_TIMER0
, 0, i
+ 52);
1501 r
= octeon_irq_force_ciu_mapping(ciu_domain
, OCTEON_IRQ_USB0
, 0, 56);
1505 r
= octeon_irq_force_ciu_mapping(ciu_domain
, OCTEON_IRQ_TWSI2
, 0, 59);
1510 for (i
= 0; i
< 16; i
++) {
1511 r
= octeon_irq_set_ciu_mapping(
1512 i
+ OCTEON_IRQ_WDOG0
, 1, i
+ 0, 0, chip_wd
,
1518 r
= octeon_irq_force_ciu_mapping(ciu_domain
, OCTEON_IRQ_USB1
, 1, 17);
1522 /* Enable the CIU lines */
1523 set_c0_status(STATUSF_IP3
| STATUSF_IP2
);
1524 if (octeon_irq_use_ip4
)
1525 set_c0_status(STATUSF_IP4
);
1527 clear_c0_status(STATUSF_IP4
);
1534 static int __init
octeon_irq_init_gpio(
1535 struct device_node
*gpio_node
, struct device_node
*parent
)
1537 struct octeon_irq_gpio_domain_data
*gpiod
;
1538 u32 interrupt_cells
;
1539 unsigned int base_hwirq
;
1542 r
= of_property_read_u32(parent
, "#interrupt-cells", &interrupt_cells
);
1546 if (interrupt_cells
== 1) {
1549 r
= of_property_read_u32_index(gpio_node
, "interrupts", 0, &v
);
1551 pr_warn("No \"interrupts\" property.\n");
1555 } else if (interrupt_cells
== 2) {
1558 r
= of_property_read_u32_index(gpio_node
, "interrupts", 0, &v0
);
1560 pr_warn("No \"interrupts\" property.\n");
1563 r
= of_property_read_u32_index(gpio_node
, "interrupts", 1, &v1
);
1565 pr_warn("No \"interrupts\" property.\n");
1568 base_hwirq
= (v0
<< 6) | v1
;
1570 pr_warn("Bad \"#interrupt-cells\" property: %u\n",
1575 gpiod
= kzalloc(sizeof(*gpiod
), GFP_KERNEL
);
1577 /* gpio domain host_data is the base hwirq number. */
1578 gpiod
->base_hwirq
= base_hwirq
;
1579 irq_domain_add_linear(
1580 gpio_node
, 16, &octeon_irq_domain_gpio_ops
, gpiod
);
1582 pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
1589 * Watchdog interrupts are special. They are associated with a single
1590 * core, so we hardwire the affinity to that core.
1592 static void octeon_irq_ciu2_wd_enable(struct irq_data
*data
)
1596 int coreid
= data
->irq
- OCTEON_IRQ_WDOG0
;
1597 struct octeon_ciu_chip_data
*cd
;
1599 cd
= irq_data_get_irq_chip_data(data
);
1600 mask
= 1ull << (cd
->bit
);
1602 en_addr
= CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid
) +
1603 (0x1000ull
* cd
->line
);
1604 cvmx_write_csr(en_addr
, mask
);
1608 static void octeon_irq_ciu2_enable(struct irq_data
*data
)
1612 int cpu
= next_cpu_for_irq(data
);
1613 int coreid
= octeon_coreid_for_cpu(cpu
);
1614 struct octeon_ciu_chip_data
*cd
;
1616 cd
= irq_data_get_irq_chip_data(data
);
1617 mask
= 1ull << (cd
->bit
);
1619 en_addr
= CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid
) +
1620 (0x1000ull
* cd
->line
);
1621 cvmx_write_csr(en_addr
, mask
);
1624 static void octeon_irq_ciu2_enable_local(struct irq_data
*data
)
1628 int coreid
= cvmx_get_core_num();
1629 struct octeon_ciu_chip_data
*cd
;
1631 cd
= irq_data_get_irq_chip_data(data
);
1632 mask
= 1ull << (cd
->bit
);
1634 en_addr
= CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid
) +
1635 (0x1000ull
* cd
->line
);
1636 cvmx_write_csr(en_addr
, mask
);
1640 static void octeon_irq_ciu2_disable_local(struct irq_data
*data
)
1644 int coreid
= cvmx_get_core_num();
1645 struct octeon_ciu_chip_data
*cd
;
1647 cd
= irq_data_get_irq_chip_data(data
);
1648 mask
= 1ull << (cd
->bit
);
1650 en_addr
= CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid
) +
1651 (0x1000ull
* cd
->line
);
1652 cvmx_write_csr(en_addr
, mask
);
1656 static void octeon_irq_ciu2_ack(struct irq_data
*data
)
1660 int coreid
= cvmx_get_core_num();
1661 struct octeon_ciu_chip_data
*cd
;
1663 cd
= irq_data_get_irq_chip_data(data
);
1664 mask
= 1ull << (cd
->bit
);
1666 en_addr
= CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid
) + (0x1000ull
* cd
->line
);
1667 cvmx_write_csr(en_addr
, mask
);
1671 static void octeon_irq_ciu2_disable_all(struct irq_data
*data
)
1675 struct octeon_ciu_chip_data
*cd
;
1677 cd
= irq_data_get_irq_chip_data(data
);
1678 mask
= 1ull << (cd
->bit
);
1680 for_each_online_cpu(cpu
) {
1681 u64 en_addr
= CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
1682 octeon_coreid_for_cpu(cpu
)) + (0x1000ull
* cd
->line
);
1683 cvmx_write_csr(en_addr
, mask
);
1687 static void octeon_irq_ciu2_mbox_enable_all(struct irq_data
*data
)
1692 mask
= 1ull << (data
->irq
- OCTEON_IRQ_MBOX0
);
1694 for_each_online_cpu(cpu
) {
1695 u64 en_addr
= CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
1696 octeon_coreid_for_cpu(cpu
));
1697 cvmx_write_csr(en_addr
, mask
);
1701 static void octeon_irq_ciu2_mbox_disable_all(struct irq_data
*data
)
1706 mask
= 1ull << (data
->irq
- OCTEON_IRQ_MBOX0
);
1708 for_each_online_cpu(cpu
) {
1709 u64 en_addr
= CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
1710 octeon_coreid_for_cpu(cpu
));
1711 cvmx_write_csr(en_addr
, mask
);
1715 static void octeon_irq_ciu2_mbox_enable_local(struct irq_data
*data
)
1719 int coreid
= cvmx_get_core_num();
1721 mask
= 1ull << (data
->irq
- OCTEON_IRQ_MBOX0
);
1722 en_addr
= CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid
);
1723 cvmx_write_csr(en_addr
, mask
);
1726 static void octeon_irq_ciu2_mbox_disable_local(struct irq_data
*data
)
1730 int coreid
= cvmx_get_core_num();
1732 mask
= 1ull << (data
->irq
- OCTEON_IRQ_MBOX0
);
1733 en_addr
= CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid
);
1734 cvmx_write_csr(en_addr
, mask
);
1738 static int octeon_irq_ciu2_set_affinity(struct irq_data
*data
,
1739 const struct cpumask
*dest
, bool force
)
1742 bool enable_one
= !irqd_irq_disabled(data
) && !irqd_irq_masked(data
);
1744 struct octeon_ciu_chip_data
*cd
;
1749 cd
= irq_data_get_irq_chip_data(data
);
1750 mask
= 1ull << cd
->bit
;
1752 for_each_online_cpu(cpu
) {
1754 if (cpumask_test_cpu(cpu
, dest
) && enable_one
) {
1756 en_addr
= CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
1757 octeon_coreid_for_cpu(cpu
)) +
1758 (0x1000ull
* cd
->line
);
1760 en_addr
= CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
1761 octeon_coreid_for_cpu(cpu
)) +
1762 (0x1000ull
* cd
->line
);
1764 cvmx_write_csr(en_addr
, mask
);
1771 static void octeon_irq_ciu2_enable_gpio(struct irq_data
*data
)
1773 octeon_irq_gpio_setup(data
);
1774 octeon_irq_ciu2_enable(data
);
1777 static void octeon_irq_ciu2_disable_gpio(struct irq_data
*data
)
1779 struct octeon_ciu_chip_data
*cd
;
1781 cd
= irq_data_get_irq_chip_data(data
);
1783 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd
->gpio_line
), 0);
1785 octeon_irq_ciu2_disable_all(data
);
1788 static struct irq_chip octeon_irq_chip_ciu2
= {
1790 .irq_enable
= octeon_irq_ciu2_enable
,
1791 .irq_disable
= octeon_irq_ciu2_disable_all
,
1792 .irq_mask
= octeon_irq_ciu2_disable_local
,
1793 .irq_unmask
= octeon_irq_ciu2_enable
,
1795 .irq_set_affinity
= octeon_irq_ciu2_set_affinity
,
1796 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
1800 static struct irq_chip octeon_irq_chip_ciu2_edge
= {
1802 .irq_enable
= octeon_irq_ciu2_enable
,
1803 .irq_disable
= octeon_irq_ciu2_disable_all
,
1804 .irq_ack
= octeon_irq_ciu2_ack
,
1805 .irq_mask
= octeon_irq_ciu2_disable_local
,
1806 .irq_unmask
= octeon_irq_ciu2_enable
,
1808 .irq_set_affinity
= octeon_irq_ciu2_set_affinity
,
1809 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
1813 static struct irq_chip octeon_irq_chip_ciu2_mbox
= {
1815 .irq_enable
= octeon_irq_ciu2_mbox_enable_all
,
1816 .irq_disable
= octeon_irq_ciu2_mbox_disable_all
,
1817 .irq_ack
= octeon_irq_ciu2_mbox_disable_local
,
1818 .irq_eoi
= octeon_irq_ciu2_mbox_enable_local
,
1820 .irq_cpu_online
= octeon_irq_ciu2_mbox_enable_local
,
1821 .irq_cpu_offline
= octeon_irq_ciu2_mbox_disable_local
,
1822 .flags
= IRQCHIP_ONOFFLINE_ENABLED
,
1825 static struct irq_chip octeon_irq_chip_ciu2_wd
= {
1827 .irq_enable
= octeon_irq_ciu2_wd_enable
,
1828 .irq_disable
= octeon_irq_ciu2_disable_all
,
1829 .irq_mask
= octeon_irq_ciu2_disable_local
,
1830 .irq_unmask
= octeon_irq_ciu2_enable_local
,
1833 static struct irq_chip octeon_irq_chip_ciu2_gpio
= {
1835 .irq_enable
= octeon_irq_ciu2_enable_gpio
,
1836 .irq_disable
= octeon_irq_ciu2_disable_gpio
,
1837 .irq_ack
= octeon_irq_ciu_gpio_ack
,
1838 .irq_mask
= octeon_irq_ciu2_disable_local
,
1839 .irq_unmask
= octeon_irq_ciu2_enable
,
1840 .irq_set_type
= octeon_irq_ciu_gpio_set_type
,
1842 .irq_set_affinity
= octeon_irq_ciu2_set_affinity
,
1843 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
1845 .flags
= IRQCHIP_SET_TYPE_MASKED
,
1848 static int octeon_irq_ciu2_xlat(struct irq_domain
*d
,
1849 struct device_node
*node
,
1851 unsigned int intsize
,
1852 unsigned long *out_hwirq
,
1853 unsigned int *out_type
)
1855 unsigned int ciu
, bit
;
1860 *out_hwirq
= (ciu
<< 6) | bit
;
1866 static bool octeon_irq_ciu2_is_edge(unsigned int line
, unsigned int bit
)
1870 if (line
== 3) /* MIO */
1872 case 2: /* IPD_DRP */
1873 case 8 ... 11: /* Timers */
1880 else if (line
== 6) /* PKT */
1882 case 52 ... 53: /* ILK_DRP */
1883 case 8 ... 12: /* GMX_DRP */
1892 static int octeon_irq_ciu2_map(struct irq_domain
*d
,
1893 unsigned int virq
, irq_hw_number_t hw
)
1895 unsigned int line
= hw
>> 6;
1896 unsigned int bit
= hw
& 63;
1898 if (!octeon_irq_virq_in_range(virq
))
1902 * Don't map irq if it is reserved for GPIO.
1903 * (Line 7 are the GPIO lines.)
1908 if (line
> 7 || octeon_irq_ciu_to_irq
[line
][bit
] != 0)
1911 if (octeon_irq_ciu2_is_edge(line
, bit
))
1912 octeon_irq_set_ciu_mapping(virq
, line
, bit
, 0,
1913 &octeon_irq_chip_ciu2_edge
,
1916 octeon_irq_set_ciu_mapping(virq
, line
, bit
, 0,
1917 &octeon_irq_chip_ciu2
,
1923 static struct irq_domain_ops octeon_irq_domain_ciu2_ops
= {
1924 .map
= octeon_irq_ciu2_map
,
1925 .unmap
= octeon_irq_free_cd
,
1926 .xlate
= octeon_irq_ciu2_xlat
,
1929 static void octeon_irq_ciu2(void)
1934 u64 src_reg
, src
, sum
;
1935 const unsigned long core_id
= cvmx_get_core_num();
1937 sum
= cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id
)) & 0xfful
;
1942 line
= fls64(sum
) - 1;
1943 src_reg
= CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id
) + (0x1000 * line
);
1944 src
= cvmx_read_csr(src_reg
);
1949 bit
= fls64(src
) - 1;
1950 irq
= octeon_irq_ciu_to_irq
[line
][bit
];
1958 spurious_interrupt();
1960 /* CN68XX pass 1.x has an errata that accessing the ACK registers
1961 can stop interrupts from propagating */
1962 if (OCTEON_IS_MODEL(OCTEON_CN68XX
))
1963 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY
);
1965 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id
));
1969 static void octeon_irq_ciu2_mbox(void)
1973 const unsigned long core_id
= cvmx_get_core_num();
1974 u64 sum
= cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id
)) >> 60;
1979 line
= fls64(sum
) - 1;
1981 do_IRQ(OCTEON_IRQ_MBOX0
+ line
);
1985 spurious_interrupt();
1987 /* CN68XX pass 1.x has an errata that accessing the ACK registers
1988 can stop interrupts from propagating */
1989 if (OCTEON_IS_MODEL(OCTEON_CN68XX
))
1990 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY
);
1992 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id
));
1996 static int __init
octeon_irq_init_ciu2(
1997 struct device_node
*ciu_node
, struct device_node
*parent
)
2000 struct irq_domain
*ciu_domain
= NULL
;
2002 octeon_irq_init_ciu2_percpu();
2003 octeon_irq_setup_secondary
= octeon_irq_setup_secondary_ciu2
;
2005 octeon_irq_gpio_chip
= &octeon_irq_chip_ciu2_gpio
;
2006 octeon_irq_ip2
= octeon_irq_ciu2
;
2007 octeon_irq_ip3
= octeon_irq_ciu2_mbox
;
2008 octeon_irq_ip4
= octeon_irq_ip4_mask
;
2011 octeon_irq_init_core();
2013 ciu_domain
= irq_domain_add_tree(
2014 ciu_node
, &octeon_irq_domain_ciu2_ops
, NULL
);
2015 irq_set_default_host(ciu_domain
);
2018 for (i
= 0; i
< 64; i
++) {
2019 r
= octeon_irq_force_ciu_mapping(
2020 ciu_domain
, i
+ OCTEON_IRQ_WORKQ0
, 0, i
);
2025 for (i
= 0; i
< 32; i
++) {
2026 r
= octeon_irq_set_ciu_mapping(i
+ OCTEON_IRQ_WDOG0
, 1, i
, 0,
2027 &octeon_irq_chip_ciu2_wd
, handle_level_irq
);
2032 for (i
= 0; i
< 4; i
++) {
2033 r
= octeon_irq_force_ciu_mapping(
2034 ciu_domain
, i
+ OCTEON_IRQ_TIMER0
, 3, i
+ 8);
2039 r
= octeon_irq_force_ciu_mapping(ciu_domain
, OCTEON_IRQ_USB0
, 3, 44);
2043 for (i
= 0; i
< 4; i
++) {
2044 r
= octeon_irq_force_ciu_mapping(
2045 ciu_domain
, i
+ OCTEON_IRQ_PCI_INT0
, 4, i
);
2050 for (i
= 0; i
< 4; i
++) {
2051 r
= octeon_irq_force_ciu_mapping(
2052 ciu_domain
, i
+ OCTEON_IRQ_PCI_MSI0
, 4, i
+ 8);
2057 irq_set_chip_and_handler(OCTEON_IRQ_MBOX0
, &octeon_irq_chip_ciu2_mbox
, handle_percpu_irq
);
2058 irq_set_chip_and_handler(OCTEON_IRQ_MBOX1
, &octeon_irq_chip_ciu2_mbox
, handle_percpu_irq
);
2059 irq_set_chip_and_handler(OCTEON_IRQ_MBOX2
, &octeon_irq_chip_ciu2_mbox
, handle_percpu_irq
);
2060 irq_set_chip_and_handler(OCTEON_IRQ_MBOX3
, &octeon_irq_chip_ciu2_mbox
, handle_percpu_irq
);
2062 /* Enable the CIU lines */
2063 set_c0_status(STATUSF_IP3
| STATUSF_IP2
);
2064 clear_c0_status(STATUSF_IP4
);
2070 struct octeon_irq_cib_host_data
{
2071 raw_spinlock_t lock
;
2077 struct octeon_irq_cib_chip_data
{
2078 struct octeon_irq_cib_host_data
*host_data
;
2082 static void octeon_irq_cib_enable(struct irq_data
*data
)
2084 unsigned long flags
;
2086 struct octeon_irq_cib_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
2087 struct octeon_irq_cib_host_data
*host_data
= cd
->host_data
;
2089 raw_spin_lock_irqsave(&host_data
->lock
, flags
);
2090 en
= cvmx_read_csr(host_data
->en_reg
);
2091 en
|= 1ull << cd
->bit
;
2092 cvmx_write_csr(host_data
->en_reg
, en
);
2093 raw_spin_unlock_irqrestore(&host_data
->lock
, flags
);
2096 static void octeon_irq_cib_disable(struct irq_data
*data
)
2098 unsigned long flags
;
2100 struct octeon_irq_cib_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
2101 struct octeon_irq_cib_host_data
*host_data
= cd
->host_data
;
2103 raw_spin_lock_irqsave(&host_data
->lock
, flags
);
2104 en
= cvmx_read_csr(host_data
->en_reg
);
2105 en
&= ~(1ull << cd
->bit
);
2106 cvmx_write_csr(host_data
->en_reg
, en
);
2107 raw_spin_unlock_irqrestore(&host_data
->lock
, flags
);
2110 static int octeon_irq_cib_set_type(struct irq_data
*data
, unsigned int t
)
2112 irqd_set_trigger_type(data
, t
);
2113 return IRQ_SET_MASK_OK
;
2116 static struct irq_chip octeon_irq_chip_cib
= {
2118 .irq_enable
= octeon_irq_cib_enable
,
2119 .irq_disable
= octeon_irq_cib_disable
,
2120 .irq_mask
= octeon_irq_cib_disable
,
2121 .irq_unmask
= octeon_irq_cib_enable
,
2122 .irq_set_type
= octeon_irq_cib_set_type
,
2125 static int octeon_irq_cib_xlat(struct irq_domain
*d
,
2126 struct device_node
*node
,
2128 unsigned int intsize
,
2129 unsigned long *out_hwirq
,
2130 unsigned int *out_type
)
2132 unsigned int type
= 0;
2138 case 0: /* unofficial value, but we might as well let it work. */
2139 case 4: /* official value for level triggering. */
2140 *out_type
= IRQ_TYPE_LEVEL_HIGH
;
2142 case 1: /* official value for edge triggering. */
2143 *out_type
= IRQ_TYPE_EDGE_RISING
;
2145 default: /* Nothing else is acceptable. */
2149 *out_hwirq
= intspec
[0];
2154 static int octeon_irq_cib_map(struct irq_domain
*d
,
2155 unsigned int virq
, irq_hw_number_t hw
)
2157 struct octeon_irq_cib_host_data
*host_data
= d
->host_data
;
2158 struct octeon_irq_cib_chip_data
*cd
;
2160 if (hw
>= host_data
->max_bits
) {
2161 pr_err("ERROR: %s mapping %u is to big!\n",
2162 d
->of_node
->name
, (unsigned)hw
);
2166 cd
= kzalloc(sizeof(*cd
), GFP_KERNEL
);
2167 cd
->host_data
= host_data
;
2170 irq_set_chip_and_handler(virq
, &octeon_irq_chip_cib
,
2172 irq_set_chip_data(virq
, cd
);
2176 static struct irq_domain_ops octeon_irq_domain_cib_ops
= {
2177 .map
= octeon_irq_cib_map
,
2178 .unmap
= octeon_irq_free_cd
,
2179 .xlate
= octeon_irq_cib_xlat
,
2182 /* Chain to real handler. */
2183 static irqreturn_t
octeon_irq_cib_handler(int my_irq
, void *data
)
2190 struct irq_domain
*cib_domain
= data
;
2191 struct octeon_irq_cib_host_data
*host_data
= cib_domain
->host_data
;
2193 en
= cvmx_read_csr(host_data
->en_reg
);
2194 raw
= cvmx_read_csr(host_data
->raw_reg
);
2198 for (i
= 0; i
< host_data
->max_bits
; i
++) {
2199 if ((bits
& 1ull << i
) == 0)
2201 irq
= irq_find_mapping(cib_domain
, i
);
2203 unsigned long flags
;
2205 pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
2206 i
, host_data
->raw_reg
);
2207 raw_spin_lock_irqsave(&host_data
->lock
, flags
);
2208 en
= cvmx_read_csr(host_data
->en_reg
);
2210 cvmx_write_csr(host_data
->en_reg
, en
);
2211 cvmx_write_csr(host_data
->raw_reg
, 1ull << i
);
2212 raw_spin_unlock_irqrestore(&host_data
->lock
, flags
);
2214 struct irq_desc
*desc
= irq_to_desc(irq
);
2215 struct irq_data
*irq_data
= irq_desc_get_irq_data(desc
);
2216 /* If edge, acknowledge the bit we will be sending. */
2217 if (irqd_get_trigger_type(irq_data
) &
2219 cvmx_write_csr(host_data
->raw_reg
, 1ull << i
);
2220 generic_handle_irq_desc(irq
, desc
);
2227 static int __init
octeon_irq_init_cib(struct device_node
*ciu_node
,
2228 struct device_node
*parent
)
2232 struct octeon_irq_cib_host_data
*host_data
;
2235 struct irq_domain
*cib_domain
;
2237 parent_irq
= irq_of_parse_and_map(ciu_node
, 0);
2239 pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
2244 host_data
= kzalloc(sizeof(*host_data
), GFP_KERNEL
);
2245 raw_spin_lock_init(&host_data
->lock
);
2247 addr
= of_get_address(ciu_node
, 0, NULL
, NULL
);
2249 pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node
->name
);
2252 host_data
->raw_reg
= (u64
)phys_to_virt(
2253 of_translate_address(ciu_node
, addr
));
2255 addr
= of_get_address(ciu_node
, 1, NULL
, NULL
);
2257 pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node
->name
);
2260 host_data
->en_reg
= (u64
)phys_to_virt(
2261 of_translate_address(ciu_node
, addr
));
2263 r
= of_property_read_u32(ciu_node
, "cavium,max-bits", &val
);
2265 pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
2269 host_data
->max_bits
= val
;
2271 cib_domain
= irq_domain_add_linear(ciu_node
, host_data
->max_bits
,
2272 &octeon_irq_domain_cib_ops
,
2275 pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
2279 cvmx_write_csr(host_data
->en_reg
, 0); /* disable all IRQs */
2280 cvmx_write_csr(host_data
->raw_reg
, ~0); /* ack any outstanding */
2282 r
= request_irq(parent_irq
, octeon_irq_cib_handler
,
2283 IRQF_NO_THREAD
, "cib", cib_domain
);
2285 pr_err("request_irq cib failed %d\n", r
);
2288 pr_info("CIB interrupt controller probed: %llx %d\n",
2289 host_data
->raw_reg
, host_data
->max_bits
);
2293 static struct of_device_id ciu_types
[] __initdata
= {
2294 {.compatible
= "cavium,octeon-3860-ciu", .data
= octeon_irq_init_ciu
},
2295 {.compatible
= "cavium,octeon-3860-gpio", .data
= octeon_irq_init_gpio
},
2296 {.compatible
= "cavium,octeon-6880-ciu2", .data
= octeon_irq_init_ciu2
},
2297 {.compatible
= "cavium,octeon-7130-cib", .data
= octeon_irq_init_cib
},
2301 void __init
arch_init_irq(void)
2304 /* Set the default affinity to the boot cpu. */
2305 cpumask_clear(irq_default_affinity
);
2306 cpumask_set_cpu(smp_processor_id(), irq_default_affinity
);
2308 of_irq_init(ciu_types
);
2311 asmlinkage
void plat_irq_dispatch(void)
2313 unsigned long cop0_cause
;
2314 unsigned long cop0_status
;
2317 cop0_cause
= read_c0_cause();
2318 cop0_status
= read_c0_status();
2319 cop0_cause
&= cop0_status
;
2320 cop0_cause
&= ST0_IM
;
2322 if (cop0_cause
& STATUSF_IP2
)
2324 else if (cop0_cause
& STATUSF_IP3
)
2326 else if (cop0_cause
& STATUSF_IP4
)
2328 else if (cop0_cause
)
2329 do_IRQ(fls(cop0_cause
) - 9 + MIPS_CPU_IRQ_BASE
);
2335 #ifdef CONFIG_HOTPLUG_CPU
2337 void octeon_fixup_irqs(void)
2342 #endif /* CONFIG_HOTPLUG_CPU */