2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2014 Cavium, Inc.
9 #include <linux/of_address.h>
10 #include <linux/interrupt.h>
11 #include <linux/irqdomain.h>
12 #include <linux/bitops.h>
13 #include <linux/of_irq.h>
14 #include <linux/percpu.h>
15 #include <linux/slab.h>
16 #include <linux/irq.h>
17 #include <linux/smp.h>
20 #include <asm/octeon/octeon.h>
21 #include <asm/octeon/cvmx-ciu2-defs.h>
23 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror
);
24 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror
);
25 static DEFINE_PER_CPU(raw_spinlock_t
, octeon_irq_ciu_spinlock
);
27 struct octeon_irq_ciu_domain_data
{
28 int num_sum
; /* number of sum registers (2 or 3). */
31 static __read_mostly u8 octeon_irq_ciu_to_irq
[8][64];
33 struct octeon_ciu_chip_data
{
35 struct { /* only used for ciu3 */
39 struct { /* only used for ciu/ciu2 */
45 int current_cpu
; /* Next CPU expected to take this irq */
48 struct octeon_core_chip_data
{
49 struct mutex core_irq_mutex
;
55 #define MIPS_CORE_IRQ_LINES 8
57 static struct octeon_core_chip_data octeon_irq_core_chip_data
[MIPS_CORE_IRQ_LINES
];
59 static int octeon_irq_set_ciu_mapping(int irq
, int line
, int bit
, int gpio_line
,
60 struct irq_chip
*chip
,
61 irq_flow_handler_t handler
)
63 struct octeon_ciu_chip_data
*cd
;
65 cd
= kzalloc(sizeof(*cd
), GFP_KERNEL
);
69 irq_set_chip_and_handler(irq
, chip
, handler
);
73 cd
->gpio_line
= gpio_line
;
75 irq_set_chip_data(irq
, cd
);
76 octeon_irq_ciu_to_irq
[line
][bit
] = irq
;
80 static void octeon_irq_free_cd(struct irq_domain
*d
, unsigned int irq
)
82 struct irq_data
*data
= irq_get_irq_data(irq
);
83 struct octeon_ciu_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
85 irq_set_chip_data(irq
, NULL
);
89 static int octeon_irq_force_ciu_mapping(struct irq_domain
*domain
,
90 int irq
, int line
, int bit
)
92 return irq_domain_associate(domain
, irq
, line
<< 6 | bit
);
95 static int octeon_coreid_for_cpu(int cpu
)
98 return cpu_logical_map(cpu
);
100 return cvmx_get_core_num();
104 static int octeon_cpu_for_coreid(int coreid
)
107 return cpu_number_map(coreid
);
109 return smp_processor_id();
113 static void octeon_irq_core_ack(struct irq_data
*data
)
115 struct octeon_core_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
116 unsigned int bit
= cd
->bit
;
119 * We don't need to disable IRQs to make these atomic since
120 * they are already disabled earlier in the low level
123 clear_c0_status(0x100 << bit
);
124 /* The two user interrupts must be cleared manually. */
126 clear_c0_cause(0x100 << bit
);
129 static void octeon_irq_core_eoi(struct irq_data
*data
)
131 struct octeon_core_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
134 * We don't need to disable IRQs to make these atomic since
135 * they are already disabled earlier in the low level
138 set_c0_status(0x100 << cd
->bit
);
141 static void octeon_irq_core_set_enable_local(void *arg
)
143 struct irq_data
*data
= arg
;
144 struct octeon_core_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
145 unsigned int mask
= 0x100 << cd
->bit
;
148 * Interrupts are already disabled, so these are atomic.
153 clear_c0_status(mask
);
157 static void octeon_irq_core_disable(struct irq_data
*data
)
159 struct octeon_core_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
160 cd
->desired_en
= false;
163 static void octeon_irq_core_enable(struct irq_data
*data
)
165 struct octeon_core_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
166 cd
->desired_en
= true;
169 static void octeon_irq_core_bus_lock(struct irq_data
*data
)
171 struct octeon_core_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
173 mutex_lock(&cd
->core_irq_mutex
);
176 static void octeon_irq_core_bus_sync_unlock(struct irq_data
*data
)
178 struct octeon_core_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
180 if (cd
->desired_en
!= cd
->current_en
) {
181 on_each_cpu(octeon_irq_core_set_enable_local
, data
, 1);
183 cd
->current_en
= cd
->desired_en
;
186 mutex_unlock(&cd
->core_irq_mutex
);
189 static struct irq_chip octeon_irq_chip_core
= {
191 .irq_enable
= octeon_irq_core_enable
,
192 .irq_disable
= octeon_irq_core_disable
,
193 .irq_ack
= octeon_irq_core_ack
,
194 .irq_eoi
= octeon_irq_core_eoi
,
195 .irq_bus_lock
= octeon_irq_core_bus_lock
,
196 .irq_bus_sync_unlock
= octeon_irq_core_bus_sync_unlock
,
198 .irq_cpu_online
= octeon_irq_core_eoi
,
199 .irq_cpu_offline
= octeon_irq_core_ack
,
200 .flags
= IRQCHIP_ONOFFLINE_ENABLED
,
203 static void __init
octeon_irq_init_core(void)
207 struct octeon_core_chip_data
*cd
;
209 for (i
= 0; i
< MIPS_CORE_IRQ_LINES
; i
++) {
210 cd
= &octeon_irq_core_chip_data
[i
];
211 cd
->current_en
= false;
212 cd
->desired_en
= false;
214 mutex_init(&cd
->core_irq_mutex
);
216 irq
= OCTEON_IRQ_SW0
+ i
;
217 irq_set_chip_data(irq
, cd
);
218 irq_set_chip_and_handler(irq
, &octeon_irq_chip_core
,
223 static int next_cpu_for_irq(struct irq_data
*data
)
228 struct cpumask
*mask
= irq_data_get_affinity_mask(data
);
229 int weight
= cpumask_weight(mask
);
230 struct octeon_ciu_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
233 cpu
= cd
->current_cpu
;
235 cpu
= cpumask_next(cpu
, mask
);
236 if (cpu
>= nr_cpu_ids
) {
239 } else if (cpumask_test_cpu(cpu
, cpu_online_mask
)) {
243 } else if (weight
== 1) {
244 cpu
= cpumask_first(mask
);
246 cpu
= smp_processor_id();
248 cd
->current_cpu
= cpu
;
251 return smp_processor_id();
255 static void octeon_irq_ciu_enable(struct irq_data
*data
)
257 int cpu
= next_cpu_for_irq(data
);
258 int coreid
= octeon_coreid_for_cpu(cpu
);
261 struct octeon_ciu_chip_data
*cd
;
262 raw_spinlock_t
*lock
= &per_cpu(octeon_irq_ciu_spinlock
, cpu
);
264 cd
= irq_data_get_irq_chip_data(data
);
266 raw_spin_lock_irqsave(lock
, flags
);
268 pen
= &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
);
269 __set_bit(cd
->bit
, pen
);
271 * Must be visible to octeon_irq_ip{2,3}_ciu() before
275 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), *pen
);
277 pen
= &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
);
278 __set_bit(cd
->bit
, pen
);
280 * Must be visible to octeon_irq_ip{2,3}_ciu() before
284 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), *pen
);
286 raw_spin_unlock_irqrestore(lock
, flags
);
289 static void octeon_irq_ciu_enable_local(struct irq_data
*data
)
293 struct octeon_ciu_chip_data
*cd
;
294 raw_spinlock_t
*lock
= this_cpu_ptr(&octeon_irq_ciu_spinlock
);
296 cd
= irq_data_get_irq_chip_data(data
);
298 raw_spin_lock_irqsave(lock
, flags
);
300 pen
= this_cpu_ptr(&octeon_irq_ciu0_en_mirror
);
301 __set_bit(cd
->bit
, pen
);
303 * Must be visible to octeon_irq_ip{2,3}_ciu() before
307 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen
);
309 pen
= this_cpu_ptr(&octeon_irq_ciu1_en_mirror
);
310 __set_bit(cd
->bit
, pen
);
312 * Must be visible to octeon_irq_ip{2,3}_ciu() before
316 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen
);
318 raw_spin_unlock_irqrestore(lock
, flags
);
321 static void octeon_irq_ciu_disable_local(struct irq_data
*data
)
325 struct octeon_ciu_chip_data
*cd
;
326 raw_spinlock_t
*lock
= this_cpu_ptr(&octeon_irq_ciu_spinlock
);
328 cd
= irq_data_get_irq_chip_data(data
);
330 raw_spin_lock_irqsave(lock
, flags
);
332 pen
= this_cpu_ptr(&octeon_irq_ciu0_en_mirror
);
333 __clear_bit(cd
->bit
, pen
);
335 * Must be visible to octeon_irq_ip{2,3}_ciu() before
339 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen
);
341 pen
= this_cpu_ptr(&octeon_irq_ciu1_en_mirror
);
342 __clear_bit(cd
->bit
, pen
);
344 * Must be visible to octeon_irq_ip{2,3}_ciu() before
348 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen
);
350 raw_spin_unlock_irqrestore(lock
, flags
);
353 static void octeon_irq_ciu_disable_all(struct irq_data
*data
)
358 struct octeon_ciu_chip_data
*cd
;
359 raw_spinlock_t
*lock
;
361 cd
= irq_data_get_irq_chip_data(data
);
363 for_each_online_cpu(cpu
) {
364 int coreid
= octeon_coreid_for_cpu(cpu
);
365 lock
= &per_cpu(octeon_irq_ciu_spinlock
, cpu
);
367 pen
= &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
);
369 pen
= &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
);
371 raw_spin_lock_irqsave(lock
, flags
);
372 __clear_bit(cd
->bit
, pen
);
374 * Must be visible to octeon_irq_ip{2,3}_ciu() before
379 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), *pen
);
381 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), *pen
);
382 raw_spin_unlock_irqrestore(lock
, flags
);
386 static void octeon_irq_ciu_enable_all(struct irq_data
*data
)
391 struct octeon_ciu_chip_data
*cd
;
392 raw_spinlock_t
*lock
;
394 cd
= irq_data_get_irq_chip_data(data
);
396 for_each_online_cpu(cpu
) {
397 int coreid
= octeon_coreid_for_cpu(cpu
);
398 lock
= &per_cpu(octeon_irq_ciu_spinlock
, cpu
);
400 pen
= &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
);
402 pen
= &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
);
404 raw_spin_lock_irqsave(lock
, flags
);
405 __set_bit(cd
->bit
, pen
);
407 * Must be visible to octeon_irq_ip{2,3}_ciu() before
412 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), *pen
);
414 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), *pen
);
415 raw_spin_unlock_irqrestore(lock
, flags
);
420 * Enable the irq on the next core in the affinity set for chips that
421 * have the EN*_W1{S,C} registers.
423 static void octeon_irq_ciu_enable_v2(struct irq_data
*data
)
426 int cpu
= next_cpu_for_irq(data
);
427 struct octeon_ciu_chip_data
*cd
;
429 cd
= irq_data_get_irq_chip_data(data
);
430 mask
= 1ull << (cd
->bit
);
433 * Called under the desc lock, so these should never get out
437 int index
= octeon_coreid_for_cpu(cpu
) * 2;
438 set_bit(cd
->bit
, &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
));
439 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index
), mask
);
441 int index
= octeon_coreid_for_cpu(cpu
) * 2 + 1;
442 set_bit(cd
->bit
, &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
));
443 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index
), mask
);
448 * Enable the irq in the sum2 registers.
450 static void octeon_irq_ciu_enable_sum2(struct irq_data
*data
)
453 int cpu
= next_cpu_for_irq(data
);
454 int index
= octeon_coreid_for_cpu(cpu
);
455 struct octeon_ciu_chip_data
*cd
;
457 cd
= irq_data_get_irq_chip_data(data
);
458 mask
= 1ull << (cd
->bit
);
460 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index
), mask
);
464 * Disable the irq in the sum2 registers.
466 static void octeon_irq_ciu_disable_local_sum2(struct irq_data
*data
)
469 int cpu
= next_cpu_for_irq(data
);
470 int index
= octeon_coreid_for_cpu(cpu
);
471 struct octeon_ciu_chip_data
*cd
;
473 cd
= irq_data_get_irq_chip_data(data
);
474 mask
= 1ull << (cd
->bit
);
476 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index
), mask
);
479 static void octeon_irq_ciu_ack_sum2(struct irq_data
*data
)
482 int cpu
= next_cpu_for_irq(data
);
483 int index
= octeon_coreid_for_cpu(cpu
);
484 struct octeon_ciu_chip_data
*cd
;
486 cd
= irq_data_get_irq_chip_data(data
);
487 mask
= 1ull << (cd
->bit
);
489 cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index
), mask
);
492 static void octeon_irq_ciu_disable_all_sum2(struct irq_data
*data
)
495 struct octeon_ciu_chip_data
*cd
;
498 cd
= irq_data_get_irq_chip_data(data
);
499 mask
= 1ull << (cd
->bit
);
501 for_each_online_cpu(cpu
) {
502 int coreid
= octeon_coreid_for_cpu(cpu
);
504 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid
), mask
);
509 * Enable the irq on the current CPU for chips that
510 * have the EN*_W1{S,C} registers.
512 static void octeon_irq_ciu_enable_local_v2(struct irq_data
*data
)
515 struct octeon_ciu_chip_data
*cd
;
517 cd
= irq_data_get_irq_chip_data(data
);
518 mask
= 1ull << (cd
->bit
);
521 int index
= cvmx_get_core_num() * 2;
522 set_bit(cd
->bit
, this_cpu_ptr(&octeon_irq_ciu0_en_mirror
));
523 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index
), mask
);
525 int index
= cvmx_get_core_num() * 2 + 1;
526 set_bit(cd
->bit
, this_cpu_ptr(&octeon_irq_ciu1_en_mirror
));
527 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index
), mask
);
531 static void octeon_irq_ciu_disable_local_v2(struct irq_data
*data
)
534 struct octeon_ciu_chip_data
*cd
;
536 cd
= irq_data_get_irq_chip_data(data
);
537 mask
= 1ull << (cd
->bit
);
540 int index
= cvmx_get_core_num() * 2;
541 clear_bit(cd
->bit
, this_cpu_ptr(&octeon_irq_ciu0_en_mirror
));
542 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index
), mask
);
544 int index
= cvmx_get_core_num() * 2 + 1;
545 clear_bit(cd
->bit
, this_cpu_ptr(&octeon_irq_ciu1_en_mirror
));
546 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index
), mask
);
551 * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
553 static void octeon_irq_ciu_ack(struct irq_data
*data
)
556 struct octeon_ciu_chip_data
*cd
;
558 cd
= irq_data_get_irq_chip_data(data
);
559 mask
= 1ull << (cd
->bit
);
562 int index
= cvmx_get_core_num() * 2;
563 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index
), mask
);
565 cvmx_write_csr(CVMX_CIU_INT_SUM1
, mask
);
570 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
573 static void octeon_irq_ciu_disable_all_v2(struct irq_data
*data
)
577 struct octeon_ciu_chip_data
*cd
;
579 cd
= irq_data_get_irq_chip_data(data
);
580 mask
= 1ull << (cd
->bit
);
583 for_each_online_cpu(cpu
) {
584 int index
= octeon_coreid_for_cpu(cpu
) * 2;
586 &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
));
587 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index
), mask
);
590 for_each_online_cpu(cpu
) {
591 int index
= octeon_coreid_for_cpu(cpu
) * 2 + 1;
593 &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
));
594 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index
), mask
);
600 * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
603 static void octeon_irq_ciu_enable_all_v2(struct irq_data
*data
)
607 struct octeon_ciu_chip_data
*cd
;
609 cd
= irq_data_get_irq_chip_data(data
);
610 mask
= 1ull << (cd
->bit
);
613 for_each_online_cpu(cpu
) {
614 int index
= octeon_coreid_for_cpu(cpu
) * 2;
616 &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
));
617 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index
), mask
);
620 for_each_online_cpu(cpu
) {
621 int index
= octeon_coreid_for_cpu(cpu
) * 2 + 1;
623 &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
));
624 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index
), mask
);
629 static void octeon_irq_gpio_setup(struct irq_data
*data
)
631 union cvmx_gpio_bit_cfgx cfg
;
632 struct octeon_ciu_chip_data
*cd
;
633 u32 t
= irqd_get_trigger_type(data
);
635 cd
= irq_data_get_irq_chip_data(data
);
639 cfg
.s
.int_type
= (t
& IRQ_TYPE_EDGE_BOTH
) != 0;
640 cfg
.s
.rx_xor
= (t
& (IRQ_TYPE_LEVEL_LOW
| IRQ_TYPE_EDGE_FALLING
)) != 0;
642 /* 140 nS glitch filter*/
646 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd
->gpio_line
), cfg
.u64
);
649 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data
*data
)
651 octeon_irq_gpio_setup(data
);
652 octeon_irq_ciu_enable_v2(data
);
655 static void octeon_irq_ciu_enable_gpio(struct irq_data
*data
)
657 octeon_irq_gpio_setup(data
);
658 octeon_irq_ciu_enable(data
);
661 static int octeon_irq_ciu_gpio_set_type(struct irq_data
*data
, unsigned int t
)
663 irqd_set_trigger_type(data
, t
);
664 octeon_irq_gpio_setup(data
);
666 if (irqd_get_trigger_type(data
) & IRQ_TYPE_EDGE_BOTH
)
667 irq_set_handler_locked(data
, handle_edge_irq
);
669 irq_set_handler_locked(data
, handle_level_irq
);
671 return IRQ_SET_MASK_OK
;
674 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data
*data
)
676 struct octeon_ciu_chip_data
*cd
;
678 cd
= irq_data_get_irq_chip_data(data
);
679 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd
->gpio_line
), 0);
681 octeon_irq_ciu_disable_all_v2(data
);
684 static void octeon_irq_ciu_disable_gpio(struct irq_data
*data
)
686 struct octeon_ciu_chip_data
*cd
;
688 cd
= irq_data_get_irq_chip_data(data
);
689 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd
->gpio_line
), 0);
691 octeon_irq_ciu_disable_all(data
);
694 static void octeon_irq_ciu_gpio_ack(struct irq_data
*data
)
696 struct octeon_ciu_chip_data
*cd
;
699 cd
= irq_data_get_irq_chip_data(data
);
700 mask
= 1ull << (cd
->gpio_line
);
702 cvmx_write_csr(CVMX_GPIO_INT_CLR
, mask
);
707 static void octeon_irq_cpu_offline_ciu(struct irq_data
*data
)
709 int cpu
= smp_processor_id();
710 cpumask_t new_affinity
;
711 struct cpumask
*mask
= irq_data_get_affinity_mask(data
);
713 if (!cpumask_test_cpu(cpu
, mask
))
716 if (cpumask_weight(mask
) > 1) {
718 * It has multi CPU affinity, just remove this CPU
719 * from the affinity set.
721 cpumask_copy(&new_affinity
, mask
);
722 cpumask_clear_cpu(cpu
, &new_affinity
);
724 /* Otherwise, put it on lowest numbered online CPU. */
725 cpumask_clear(&new_affinity
);
726 cpumask_set_cpu(cpumask_first(cpu_online_mask
), &new_affinity
);
728 irq_set_affinity_locked(data
, &new_affinity
, false);
731 static int octeon_irq_ciu_set_affinity(struct irq_data
*data
,
732 const struct cpumask
*dest
, bool force
)
735 bool enable_one
= !irqd_irq_disabled(data
) && !irqd_irq_masked(data
);
737 struct octeon_ciu_chip_data
*cd
;
739 raw_spinlock_t
*lock
;
741 cd
= irq_data_get_irq_chip_data(data
);
744 * For non-v2 CIU, we will allow only single CPU affinity.
745 * This removes the need to do locking in the .ack/.eoi
748 if (cpumask_weight(dest
) != 1)
755 for_each_online_cpu(cpu
) {
756 int coreid
= octeon_coreid_for_cpu(cpu
);
758 lock
= &per_cpu(octeon_irq_ciu_spinlock
, cpu
);
759 raw_spin_lock_irqsave(lock
, flags
);
762 pen
= &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
);
764 pen
= &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
);
766 if (cpumask_test_cpu(cpu
, dest
) && enable_one
) {
768 __set_bit(cd
->bit
, pen
);
770 __clear_bit(cd
->bit
, pen
);
773 * Must be visible to octeon_irq_ip{2,3}_ciu() before
779 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), *pen
);
781 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), *pen
);
783 raw_spin_unlock_irqrestore(lock
, flags
);
789 * Set affinity for the irq for chips that have the EN*_W1{S,C}
792 static int octeon_irq_ciu_set_affinity_v2(struct irq_data
*data
,
793 const struct cpumask
*dest
,
797 bool enable_one
= !irqd_irq_disabled(data
) && !irqd_irq_masked(data
);
799 struct octeon_ciu_chip_data
*cd
;
804 cd
= irq_data_get_irq_chip_data(data
);
805 mask
= 1ull << cd
->bit
;
808 for_each_online_cpu(cpu
) {
809 unsigned long *pen
= &per_cpu(octeon_irq_ciu0_en_mirror
, cpu
);
810 int index
= octeon_coreid_for_cpu(cpu
) * 2;
811 if (cpumask_test_cpu(cpu
, dest
) && enable_one
) {
813 set_bit(cd
->bit
, pen
);
814 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index
), mask
);
816 clear_bit(cd
->bit
, pen
);
817 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index
), mask
);
821 for_each_online_cpu(cpu
) {
822 unsigned long *pen
= &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
);
823 int index
= octeon_coreid_for_cpu(cpu
) * 2 + 1;
824 if (cpumask_test_cpu(cpu
, dest
) && enable_one
) {
826 set_bit(cd
->bit
, pen
);
827 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index
), mask
);
829 clear_bit(cd
->bit
, pen
);
830 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index
), mask
);
837 static int octeon_irq_ciu_set_affinity_sum2(struct irq_data
*data
,
838 const struct cpumask
*dest
,
842 bool enable_one
= !irqd_irq_disabled(data
) && !irqd_irq_masked(data
);
844 struct octeon_ciu_chip_data
*cd
;
849 cd
= irq_data_get_irq_chip_data(data
);
850 mask
= 1ull << cd
->bit
;
852 for_each_online_cpu(cpu
) {
853 int index
= octeon_coreid_for_cpu(cpu
);
855 if (cpumask_test_cpu(cpu
, dest
) && enable_one
) {
857 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index
), mask
);
859 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index
), mask
);
867 * Newer octeon chips have support for lockless CIU operation.
869 static struct irq_chip octeon_irq_chip_ciu_v2
= {
871 .irq_enable
= octeon_irq_ciu_enable_v2
,
872 .irq_disable
= octeon_irq_ciu_disable_all_v2
,
873 .irq_mask
= octeon_irq_ciu_disable_local_v2
,
874 .irq_unmask
= octeon_irq_ciu_enable_v2
,
876 .irq_set_affinity
= octeon_irq_ciu_set_affinity_v2
,
877 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
881 static struct irq_chip octeon_irq_chip_ciu_v2_edge
= {
883 .irq_enable
= octeon_irq_ciu_enable_v2
,
884 .irq_disable
= octeon_irq_ciu_disable_all_v2
,
885 .irq_ack
= octeon_irq_ciu_ack
,
886 .irq_mask
= octeon_irq_ciu_disable_local_v2
,
887 .irq_unmask
= octeon_irq_ciu_enable_v2
,
889 .irq_set_affinity
= octeon_irq_ciu_set_affinity_v2
,
890 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
895 * Newer octeon chips have support for lockless CIU operation.
897 static struct irq_chip octeon_irq_chip_ciu_sum2
= {
899 .irq_enable
= octeon_irq_ciu_enable_sum2
,
900 .irq_disable
= octeon_irq_ciu_disable_all_sum2
,
901 .irq_mask
= octeon_irq_ciu_disable_local_sum2
,
902 .irq_unmask
= octeon_irq_ciu_enable_sum2
,
904 .irq_set_affinity
= octeon_irq_ciu_set_affinity_sum2
,
905 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
909 static struct irq_chip octeon_irq_chip_ciu_sum2_edge
= {
911 .irq_enable
= octeon_irq_ciu_enable_sum2
,
912 .irq_disable
= octeon_irq_ciu_disable_all_sum2
,
913 .irq_ack
= octeon_irq_ciu_ack_sum2
,
914 .irq_mask
= octeon_irq_ciu_disable_local_sum2
,
915 .irq_unmask
= octeon_irq_ciu_enable_sum2
,
917 .irq_set_affinity
= octeon_irq_ciu_set_affinity_sum2
,
918 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
922 static struct irq_chip octeon_irq_chip_ciu
= {
924 .irq_enable
= octeon_irq_ciu_enable
,
925 .irq_disable
= octeon_irq_ciu_disable_all
,
926 .irq_mask
= octeon_irq_ciu_disable_local
,
927 .irq_unmask
= octeon_irq_ciu_enable
,
929 .irq_set_affinity
= octeon_irq_ciu_set_affinity
,
930 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
934 static struct irq_chip octeon_irq_chip_ciu_edge
= {
936 .irq_enable
= octeon_irq_ciu_enable
,
937 .irq_disable
= octeon_irq_ciu_disable_all
,
938 .irq_ack
= octeon_irq_ciu_ack
,
939 .irq_mask
= octeon_irq_ciu_disable_local
,
940 .irq_unmask
= octeon_irq_ciu_enable
,
942 .irq_set_affinity
= octeon_irq_ciu_set_affinity
,
943 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
947 /* The mbox versions don't do any affinity or round-robin. */
948 static struct irq_chip octeon_irq_chip_ciu_mbox_v2
= {
950 .irq_enable
= octeon_irq_ciu_enable_all_v2
,
951 .irq_disable
= octeon_irq_ciu_disable_all_v2
,
952 .irq_ack
= octeon_irq_ciu_disable_local_v2
,
953 .irq_eoi
= octeon_irq_ciu_enable_local_v2
,
955 .irq_cpu_online
= octeon_irq_ciu_enable_local_v2
,
956 .irq_cpu_offline
= octeon_irq_ciu_disable_local_v2
,
957 .flags
= IRQCHIP_ONOFFLINE_ENABLED
,
960 static struct irq_chip octeon_irq_chip_ciu_mbox
= {
962 .irq_enable
= octeon_irq_ciu_enable_all
,
963 .irq_disable
= octeon_irq_ciu_disable_all
,
964 .irq_ack
= octeon_irq_ciu_disable_local
,
965 .irq_eoi
= octeon_irq_ciu_enable_local
,
967 .irq_cpu_online
= octeon_irq_ciu_enable_local
,
968 .irq_cpu_offline
= octeon_irq_ciu_disable_local
,
969 .flags
= IRQCHIP_ONOFFLINE_ENABLED
,
972 static struct irq_chip octeon_irq_chip_ciu_gpio_v2
= {
974 .irq_enable
= octeon_irq_ciu_enable_gpio_v2
,
975 .irq_disable
= octeon_irq_ciu_disable_gpio_v2
,
976 .irq_ack
= octeon_irq_ciu_gpio_ack
,
977 .irq_mask
= octeon_irq_ciu_disable_local_v2
,
978 .irq_unmask
= octeon_irq_ciu_enable_v2
,
979 .irq_set_type
= octeon_irq_ciu_gpio_set_type
,
981 .irq_set_affinity
= octeon_irq_ciu_set_affinity_v2
,
982 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
984 .flags
= IRQCHIP_SET_TYPE_MASKED
,
987 static struct irq_chip octeon_irq_chip_ciu_gpio
= {
989 .irq_enable
= octeon_irq_ciu_enable_gpio
,
990 .irq_disable
= octeon_irq_ciu_disable_gpio
,
991 .irq_mask
= octeon_irq_ciu_disable_local
,
992 .irq_unmask
= octeon_irq_ciu_enable
,
993 .irq_ack
= octeon_irq_ciu_gpio_ack
,
994 .irq_set_type
= octeon_irq_ciu_gpio_set_type
,
996 .irq_set_affinity
= octeon_irq_ciu_set_affinity
,
997 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
999 .flags
= IRQCHIP_SET_TYPE_MASKED
,
1003 * Watchdog interrupts are special. They are associated with a single
1004 * core, so we hardwire the affinity to that core.
1006 static void octeon_irq_ciu_wd_enable(struct irq_data
*data
)
1008 unsigned long flags
;
1010 int coreid
= data
->irq
- OCTEON_IRQ_WDOG0
; /* Bit 0-63 of EN1 */
1011 int cpu
= octeon_cpu_for_coreid(coreid
);
1012 raw_spinlock_t
*lock
= &per_cpu(octeon_irq_ciu_spinlock
, cpu
);
1014 raw_spin_lock_irqsave(lock
, flags
);
1015 pen
= &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
);
1016 __set_bit(coreid
, pen
);
1018 * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
1022 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), *pen
);
1023 raw_spin_unlock_irqrestore(lock
, flags
);
1027 * Watchdog interrupts are special. They are associated with a single
1028 * core, so we hardwire the affinity to that core.
1030 static void octeon_irq_ciu1_wd_enable_v2(struct irq_data
*data
)
1032 int coreid
= data
->irq
- OCTEON_IRQ_WDOG0
;
1033 int cpu
= octeon_cpu_for_coreid(coreid
);
1035 set_bit(coreid
, &per_cpu(octeon_irq_ciu1_en_mirror
, cpu
));
1036 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid
* 2 + 1), 1ull << coreid
);
1040 static struct irq_chip octeon_irq_chip_ciu_wd_v2
= {
1042 .irq_enable
= octeon_irq_ciu1_wd_enable_v2
,
1043 .irq_disable
= octeon_irq_ciu_disable_all_v2
,
1044 .irq_mask
= octeon_irq_ciu_disable_local_v2
,
1045 .irq_unmask
= octeon_irq_ciu_enable_local_v2
,
1048 static struct irq_chip octeon_irq_chip_ciu_wd
= {
1050 .irq_enable
= octeon_irq_ciu_wd_enable
,
1051 .irq_disable
= octeon_irq_ciu_disable_all
,
1052 .irq_mask
= octeon_irq_ciu_disable_local
,
1053 .irq_unmask
= octeon_irq_ciu_enable_local
,
1056 static bool octeon_irq_ciu_is_edge(unsigned int line
, unsigned int bit
)
1062 case 48 ... 49: /* GMX DRP */
1063 case 50: /* IPD_DRP */
1064 case 52 ... 55: /* Timers */
1071 else /* line == 1 */
1082 struct octeon_irq_gpio_domain_data
{
1083 unsigned int base_hwirq
;
1086 static int octeon_irq_gpio_xlat(struct irq_domain
*d
,
1087 struct device_node
*node
,
1089 unsigned int intsize
,
1090 unsigned long *out_hwirq
,
1091 unsigned int *out_type
)
1095 unsigned int trigger
;
1097 if (irq_domain_get_of_node(d
) != node
)
1107 trigger
= intspec
[1];
1111 type
= IRQ_TYPE_EDGE_RISING
;
1114 type
= IRQ_TYPE_EDGE_FALLING
;
1117 type
= IRQ_TYPE_LEVEL_HIGH
;
1120 type
= IRQ_TYPE_LEVEL_LOW
;
1123 pr_err("Error: (%s) Invalid irq trigger specification: %x\n",
1126 type
= IRQ_TYPE_LEVEL_LOW
;
1135 static int octeon_irq_ciu_xlat(struct irq_domain
*d
,
1136 struct device_node
*node
,
1138 unsigned int intsize
,
1139 unsigned long *out_hwirq
,
1140 unsigned int *out_type
)
1142 unsigned int ciu
, bit
;
1143 struct octeon_irq_ciu_domain_data
*dd
= d
->host_data
;
1148 if (ciu
>= dd
->num_sum
|| bit
> 63)
1151 *out_hwirq
= (ciu
<< 6) | bit
;
1157 static struct irq_chip
*octeon_irq_ciu_chip
;
1158 static struct irq_chip
*octeon_irq_ciu_chip_edge
;
1159 static struct irq_chip
*octeon_irq_gpio_chip
;
1161 static bool octeon_irq_virq_in_range(unsigned int virq
)
1163 /* We cannot let it overflow the mapping array. */
1164 if (virq
< (1ul << 8 * sizeof(octeon_irq_ciu_to_irq
[0][0])))
1167 WARN_ONCE(true, "virq out of range %u.\n", virq
);
1171 static int octeon_irq_ciu_map(struct irq_domain
*d
,
1172 unsigned int virq
, irq_hw_number_t hw
)
1175 unsigned int line
= hw
>> 6;
1176 unsigned int bit
= hw
& 63;
1177 struct octeon_irq_ciu_domain_data
*dd
= d
->host_data
;
1179 if (!octeon_irq_virq_in_range(virq
))
1182 /* Don't map irq if it is reserved for GPIO. */
1183 if (line
== 0 && bit
>= 16 && bit
<32)
1186 if (line
>= dd
->num_sum
|| octeon_irq_ciu_to_irq
[line
][bit
] != 0)
1190 if (octeon_irq_ciu_is_edge(line
, bit
))
1191 rv
= octeon_irq_set_ciu_mapping(virq
, line
, bit
, 0,
1192 &octeon_irq_chip_ciu_sum2_edge
,
1195 rv
= octeon_irq_set_ciu_mapping(virq
, line
, bit
, 0,
1196 &octeon_irq_chip_ciu_sum2
,
1199 if (octeon_irq_ciu_is_edge(line
, bit
))
1200 rv
= octeon_irq_set_ciu_mapping(virq
, line
, bit
, 0,
1201 octeon_irq_ciu_chip_edge
,
1204 rv
= octeon_irq_set_ciu_mapping(virq
, line
, bit
, 0,
1205 octeon_irq_ciu_chip
,
1211 static int octeon_irq_gpio_map(struct irq_domain
*d
,
1212 unsigned int virq
, irq_hw_number_t hw
)
1214 struct octeon_irq_gpio_domain_data
*gpiod
= d
->host_data
;
1215 unsigned int line
, bit
;
1218 if (!octeon_irq_virq_in_range(virq
))
1221 line
= (hw
+ gpiod
->base_hwirq
) >> 6;
1222 bit
= (hw
+ gpiod
->base_hwirq
) & 63;
1223 if (line
> ARRAY_SIZE(octeon_irq_ciu_to_irq
) ||
1224 octeon_irq_ciu_to_irq
[line
][bit
] != 0)
1228 * Default to handle_level_irq. If the DT contains a different
1229 * trigger type, it will call the irq_set_type callback and
1230 * the handler gets updated.
1232 r
= octeon_irq_set_ciu_mapping(virq
, line
, bit
, hw
,
1233 octeon_irq_gpio_chip
, handle_level_irq
);
1237 static struct irq_domain_ops octeon_irq_domain_ciu_ops
= {
1238 .map
= octeon_irq_ciu_map
,
1239 .unmap
= octeon_irq_free_cd
,
1240 .xlate
= octeon_irq_ciu_xlat
,
1243 static struct irq_domain_ops octeon_irq_domain_gpio_ops
= {
1244 .map
= octeon_irq_gpio_map
,
1245 .unmap
= octeon_irq_free_cd
,
1246 .xlate
= octeon_irq_gpio_xlat
,
1249 static void octeon_irq_ip2_ciu(void)
1251 const unsigned long core_id
= cvmx_get_core_num();
1252 u64 ciu_sum
= cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id
* 2));
1254 ciu_sum
&= __this_cpu_read(octeon_irq_ciu0_en_mirror
);
1255 if (likely(ciu_sum
)) {
1256 int bit
= fls64(ciu_sum
) - 1;
1257 int irq
= octeon_irq_ciu_to_irq
[0][bit
];
1261 spurious_interrupt();
1263 spurious_interrupt();
1267 static void octeon_irq_ip3_ciu(void)
1269 u64 ciu_sum
= cvmx_read_csr(CVMX_CIU_INT_SUM1
);
1271 ciu_sum
&= __this_cpu_read(octeon_irq_ciu1_en_mirror
);
1272 if (likely(ciu_sum
)) {
1273 int bit
= fls64(ciu_sum
) - 1;
1274 int irq
= octeon_irq_ciu_to_irq
[1][bit
];
1278 spurious_interrupt();
1280 spurious_interrupt();
1284 static void octeon_irq_ip4_ciu(void)
1286 int coreid
= cvmx_get_core_num();
1287 u64 ciu_sum
= cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid
));
1288 u64 ciu_en
= cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid
));
1291 if (likely(ciu_sum
)) {
1292 int bit
= fls64(ciu_sum
) - 1;
1293 int irq
= octeon_irq_ciu_to_irq
[2][bit
];
1298 spurious_interrupt();
1300 spurious_interrupt();
1304 static bool octeon_irq_use_ip4
;
1306 static void octeon_irq_local_enable_ip4(void *arg
)
1308 set_c0_status(STATUSF_IP4
);
1311 static void octeon_irq_ip4_mask(void)
1313 clear_c0_status(STATUSF_IP4
);
1314 spurious_interrupt();
1317 static void (*octeon_irq_ip2
)(void);
1318 static void (*octeon_irq_ip3
)(void);
1319 static void (*octeon_irq_ip4
)(void);
1321 void (*octeon_irq_setup_secondary
)(void);
1323 void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h
)
1326 octeon_irq_use_ip4
= true;
1327 on_each_cpu(octeon_irq_local_enable_ip4
, NULL
, 1);
1330 static void octeon_irq_percpu_enable(void)
1335 static void octeon_irq_init_ciu_percpu(void)
1337 int coreid
= cvmx_get_core_num();
1340 __this_cpu_write(octeon_irq_ciu0_en_mirror
, 0);
1341 __this_cpu_write(octeon_irq_ciu1_en_mirror
, 0);
1343 raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock
));
1345 * Disable All CIU Interrupts. The ones we need will be
1346 * enabled later. Read the SUM register so we know the write
1349 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid
* 2)), 0);
1350 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid
* 2 + 1)), 0);
1351 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid
* 2)), 0);
1352 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid
* 2 + 1)), 0);
1353 cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid
* 2)));
1356 static void octeon_irq_init_ciu2_percpu(void)
1359 int coreid
= cvmx_get_core_num();
1360 u64 base
= CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid
);
1363 * Disable All CIU2 Interrupts. The ones we need will be
1364 * enabled later. Read the SUM register so we know the write
1367 * There are 9 registers and 3 IPX levels with strides 0x1000
1368 * and 0x200 respectivly. Use loops to clear them.
1370 for (regx
= 0; regx
<= 0x8000; regx
+= 0x1000) {
1371 for (ipx
= 0; ipx
<= 0x400; ipx
+= 0x200)
1372 cvmx_write_csr(base
+ regx
+ ipx
, 0);
1375 cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid
));
1378 static void octeon_irq_setup_secondary_ciu(void)
1380 octeon_irq_init_ciu_percpu();
1381 octeon_irq_percpu_enable();
1383 /* Enable the CIU lines */
1384 set_c0_status(STATUSF_IP3
| STATUSF_IP2
);
1385 if (octeon_irq_use_ip4
)
1386 set_c0_status(STATUSF_IP4
);
1388 clear_c0_status(STATUSF_IP4
);
1391 static void octeon_irq_setup_secondary_ciu2(void)
1393 octeon_irq_init_ciu2_percpu();
1394 octeon_irq_percpu_enable();
1396 /* Enable the CIU lines */
1397 set_c0_status(STATUSF_IP3
| STATUSF_IP2
);
1398 if (octeon_irq_use_ip4
)
1399 set_c0_status(STATUSF_IP4
);
1401 clear_c0_status(STATUSF_IP4
);
1404 static int __init
octeon_irq_init_ciu(
1405 struct device_node
*ciu_node
, struct device_node
*parent
)
1408 struct irq_chip
*chip
;
1409 struct irq_chip
*chip_edge
;
1410 struct irq_chip
*chip_mbox
;
1411 struct irq_chip
*chip_wd
;
1412 struct irq_domain
*ciu_domain
= NULL
;
1413 struct octeon_irq_ciu_domain_data
*dd
;
1415 dd
= kzalloc(sizeof(*dd
), GFP_KERNEL
);
1419 octeon_irq_init_ciu_percpu();
1420 octeon_irq_setup_secondary
= octeon_irq_setup_secondary_ciu
;
1422 octeon_irq_ip2
= octeon_irq_ip2_ciu
;
1423 octeon_irq_ip3
= octeon_irq_ip3_ciu
;
1424 if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
1425 && !OCTEON_IS_MODEL(OCTEON_CN63XX
)) {
1426 octeon_irq_ip4
= octeon_irq_ip4_ciu
;
1428 octeon_irq_use_ip4
= true;
1430 octeon_irq_ip4
= octeon_irq_ip4_mask
;
1432 octeon_irq_use_ip4
= false;
1434 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X
) ||
1435 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X
) ||
1436 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X
) ||
1437 OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
1438 chip
= &octeon_irq_chip_ciu_v2
;
1439 chip_edge
= &octeon_irq_chip_ciu_v2_edge
;
1440 chip_mbox
= &octeon_irq_chip_ciu_mbox_v2
;
1441 chip_wd
= &octeon_irq_chip_ciu_wd_v2
;
1442 octeon_irq_gpio_chip
= &octeon_irq_chip_ciu_gpio_v2
;
1444 chip
= &octeon_irq_chip_ciu
;
1445 chip_edge
= &octeon_irq_chip_ciu_edge
;
1446 chip_mbox
= &octeon_irq_chip_ciu_mbox
;
1447 chip_wd
= &octeon_irq_chip_ciu_wd
;
1448 octeon_irq_gpio_chip
= &octeon_irq_chip_ciu_gpio
;
1450 octeon_irq_ciu_chip
= chip
;
1451 octeon_irq_ciu_chip_edge
= chip_edge
;
1454 octeon_irq_init_core();
1456 ciu_domain
= irq_domain_add_tree(
1457 ciu_node
, &octeon_irq_domain_ciu_ops
, dd
);
1458 irq_set_default_host(ciu_domain
);
1461 for (i
= 0; i
< 16; i
++) {
1462 r
= octeon_irq_force_ciu_mapping(
1463 ciu_domain
, i
+ OCTEON_IRQ_WORKQ0
, 0, i
+ 0);
1468 r
= octeon_irq_set_ciu_mapping(
1469 OCTEON_IRQ_MBOX0
, 0, 32, 0, chip_mbox
, handle_percpu_irq
);
1472 r
= octeon_irq_set_ciu_mapping(
1473 OCTEON_IRQ_MBOX1
, 0, 33, 0, chip_mbox
, handle_percpu_irq
);
1477 for (i
= 0; i
< 4; i
++) {
1478 r
= octeon_irq_force_ciu_mapping(
1479 ciu_domain
, i
+ OCTEON_IRQ_PCI_INT0
, 0, i
+ 36);
1483 for (i
= 0; i
< 4; i
++) {
1484 r
= octeon_irq_force_ciu_mapping(
1485 ciu_domain
, i
+ OCTEON_IRQ_PCI_MSI0
, 0, i
+ 40);
1490 r
= octeon_irq_force_ciu_mapping(ciu_domain
, OCTEON_IRQ_TWSI
, 0, 45);
1494 r
= octeon_irq_force_ciu_mapping(ciu_domain
, OCTEON_IRQ_RML
, 0, 46);
1498 for (i
= 0; i
< 4; i
++) {
1499 r
= octeon_irq_force_ciu_mapping(
1500 ciu_domain
, i
+ OCTEON_IRQ_TIMER0
, 0, i
+ 52);
1505 r
= octeon_irq_force_ciu_mapping(ciu_domain
, OCTEON_IRQ_USB0
, 0, 56);
1509 r
= octeon_irq_force_ciu_mapping(ciu_domain
, OCTEON_IRQ_TWSI2
, 0, 59);
1514 for (i
= 0; i
< 16; i
++) {
1515 r
= octeon_irq_set_ciu_mapping(
1516 i
+ OCTEON_IRQ_WDOG0
, 1, i
+ 0, 0, chip_wd
,
1522 r
= octeon_irq_force_ciu_mapping(ciu_domain
, OCTEON_IRQ_USB1
, 1, 17);
1526 /* Enable the CIU lines */
1527 set_c0_status(STATUSF_IP3
| STATUSF_IP2
);
1528 if (octeon_irq_use_ip4
)
1529 set_c0_status(STATUSF_IP4
);
1531 clear_c0_status(STATUSF_IP4
);
1538 static int __init
octeon_irq_init_gpio(
1539 struct device_node
*gpio_node
, struct device_node
*parent
)
1541 struct octeon_irq_gpio_domain_data
*gpiod
;
1542 u32 interrupt_cells
;
1543 unsigned int base_hwirq
;
1546 r
= of_property_read_u32(parent
, "#interrupt-cells", &interrupt_cells
);
1550 if (interrupt_cells
== 1) {
1553 r
= of_property_read_u32_index(gpio_node
, "interrupts", 0, &v
);
1555 pr_warn("No \"interrupts\" property.\n");
1559 } else if (interrupt_cells
== 2) {
1562 r
= of_property_read_u32_index(gpio_node
, "interrupts", 0, &v0
);
1564 pr_warn("No \"interrupts\" property.\n");
1567 r
= of_property_read_u32_index(gpio_node
, "interrupts", 1, &v1
);
1569 pr_warn("No \"interrupts\" property.\n");
1572 base_hwirq
= (v0
<< 6) | v1
;
1574 pr_warn("Bad \"#interrupt-cells\" property: %u\n",
1579 gpiod
= kzalloc(sizeof(*gpiod
), GFP_KERNEL
);
1581 /* gpio domain host_data is the base hwirq number. */
1582 gpiod
->base_hwirq
= base_hwirq
;
1583 irq_domain_add_linear(
1584 gpio_node
, 16, &octeon_irq_domain_gpio_ops
, gpiod
);
1586 pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
1593 * Watchdog interrupts are special. They are associated with a single
1594 * core, so we hardwire the affinity to that core.
1596 static void octeon_irq_ciu2_wd_enable(struct irq_data
*data
)
1600 int coreid
= data
->irq
- OCTEON_IRQ_WDOG0
;
1601 struct octeon_ciu_chip_data
*cd
;
1603 cd
= irq_data_get_irq_chip_data(data
);
1604 mask
= 1ull << (cd
->bit
);
1606 en_addr
= CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid
) +
1607 (0x1000ull
* cd
->line
);
1608 cvmx_write_csr(en_addr
, mask
);
1612 static void octeon_irq_ciu2_enable(struct irq_data
*data
)
1616 int cpu
= next_cpu_for_irq(data
);
1617 int coreid
= octeon_coreid_for_cpu(cpu
);
1618 struct octeon_ciu_chip_data
*cd
;
1620 cd
= irq_data_get_irq_chip_data(data
);
1621 mask
= 1ull << (cd
->bit
);
1623 en_addr
= CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid
) +
1624 (0x1000ull
* cd
->line
);
1625 cvmx_write_csr(en_addr
, mask
);
1628 static void octeon_irq_ciu2_enable_local(struct irq_data
*data
)
1632 int coreid
= cvmx_get_core_num();
1633 struct octeon_ciu_chip_data
*cd
;
1635 cd
= irq_data_get_irq_chip_data(data
);
1636 mask
= 1ull << (cd
->bit
);
1638 en_addr
= CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid
) +
1639 (0x1000ull
* cd
->line
);
1640 cvmx_write_csr(en_addr
, mask
);
1644 static void octeon_irq_ciu2_disable_local(struct irq_data
*data
)
1648 int coreid
= cvmx_get_core_num();
1649 struct octeon_ciu_chip_data
*cd
;
1651 cd
= irq_data_get_irq_chip_data(data
);
1652 mask
= 1ull << (cd
->bit
);
1654 en_addr
= CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid
) +
1655 (0x1000ull
* cd
->line
);
1656 cvmx_write_csr(en_addr
, mask
);
1660 static void octeon_irq_ciu2_ack(struct irq_data
*data
)
1664 int coreid
= cvmx_get_core_num();
1665 struct octeon_ciu_chip_data
*cd
;
1667 cd
= irq_data_get_irq_chip_data(data
);
1668 mask
= 1ull << (cd
->bit
);
1670 en_addr
= CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid
) + (0x1000ull
* cd
->line
);
1671 cvmx_write_csr(en_addr
, mask
);
1675 static void octeon_irq_ciu2_disable_all(struct irq_data
*data
)
1679 struct octeon_ciu_chip_data
*cd
;
1681 cd
= irq_data_get_irq_chip_data(data
);
1682 mask
= 1ull << (cd
->bit
);
1684 for_each_online_cpu(cpu
) {
1685 u64 en_addr
= CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
1686 octeon_coreid_for_cpu(cpu
)) + (0x1000ull
* cd
->line
);
1687 cvmx_write_csr(en_addr
, mask
);
1691 static void octeon_irq_ciu2_mbox_enable_all(struct irq_data
*data
)
1696 mask
= 1ull << (data
->irq
- OCTEON_IRQ_MBOX0
);
1698 for_each_online_cpu(cpu
) {
1699 u64 en_addr
= CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
1700 octeon_coreid_for_cpu(cpu
));
1701 cvmx_write_csr(en_addr
, mask
);
1705 static void octeon_irq_ciu2_mbox_disable_all(struct irq_data
*data
)
1710 mask
= 1ull << (data
->irq
- OCTEON_IRQ_MBOX0
);
1712 for_each_online_cpu(cpu
) {
1713 u64 en_addr
= CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
1714 octeon_coreid_for_cpu(cpu
));
1715 cvmx_write_csr(en_addr
, mask
);
1719 static void octeon_irq_ciu2_mbox_enable_local(struct irq_data
*data
)
1723 int coreid
= cvmx_get_core_num();
1725 mask
= 1ull << (data
->irq
- OCTEON_IRQ_MBOX0
);
1726 en_addr
= CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid
);
1727 cvmx_write_csr(en_addr
, mask
);
1730 static void octeon_irq_ciu2_mbox_disable_local(struct irq_data
*data
)
1734 int coreid
= cvmx_get_core_num();
1736 mask
= 1ull << (data
->irq
- OCTEON_IRQ_MBOX0
);
1737 en_addr
= CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid
);
1738 cvmx_write_csr(en_addr
, mask
);
1742 static int octeon_irq_ciu2_set_affinity(struct irq_data
*data
,
1743 const struct cpumask
*dest
, bool force
)
1746 bool enable_one
= !irqd_irq_disabled(data
) && !irqd_irq_masked(data
);
1748 struct octeon_ciu_chip_data
*cd
;
1753 cd
= irq_data_get_irq_chip_data(data
);
1754 mask
= 1ull << cd
->bit
;
1756 for_each_online_cpu(cpu
) {
1758 if (cpumask_test_cpu(cpu
, dest
) && enable_one
) {
1760 en_addr
= CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
1761 octeon_coreid_for_cpu(cpu
)) +
1762 (0x1000ull
* cd
->line
);
1764 en_addr
= CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
1765 octeon_coreid_for_cpu(cpu
)) +
1766 (0x1000ull
* cd
->line
);
1768 cvmx_write_csr(en_addr
, mask
);
1775 static void octeon_irq_ciu2_enable_gpio(struct irq_data
*data
)
1777 octeon_irq_gpio_setup(data
);
1778 octeon_irq_ciu2_enable(data
);
1781 static void octeon_irq_ciu2_disable_gpio(struct irq_data
*data
)
1783 struct octeon_ciu_chip_data
*cd
;
1785 cd
= irq_data_get_irq_chip_data(data
);
1787 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd
->gpio_line
), 0);
1789 octeon_irq_ciu2_disable_all(data
);
1792 static struct irq_chip octeon_irq_chip_ciu2
= {
1794 .irq_enable
= octeon_irq_ciu2_enable
,
1795 .irq_disable
= octeon_irq_ciu2_disable_all
,
1796 .irq_mask
= octeon_irq_ciu2_disable_local
,
1797 .irq_unmask
= octeon_irq_ciu2_enable
,
1799 .irq_set_affinity
= octeon_irq_ciu2_set_affinity
,
1800 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
1804 static struct irq_chip octeon_irq_chip_ciu2_edge
= {
1806 .irq_enable
= octeon_irq_ciu2_enable
,
1807 .irq_disable
= octeon_irq_ciu2_disable_all
,
1808 .irq_ack
= octeon_irq_ciu2_ack
,
1809 .irq_mask
= octeon_irq_ciu2_disable_local
,
1810 .irq_unmask
= octeon_irq_ciu2_enable
,
1812 .irq_set_affinity
= octeon_irq_ciu2_set_affinity
,
1813 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
1817 static struct irq_chip octeon_irq_chip_ciu2_mbox
= {
1819 .irq_enable
= octeon_irq_ciu2_mbox_enable_all
,
1820 .irq_disable
= octeon_irq_ciu2_mbox_disable_all
,
1821 .irq_ack
= octeon_irq_ciu2_mbox_disable_local
,
1822 .irq_eoi
= octeon_irq_ciu2_mbox_enable_local
,
1824 .irq_cpu_online
= octeon_irq_ciu2_mbox_enable_local
,
1825 .irq_cpu_offline
= octeon_irq_ciu2_mbox_disable_local
,
1826 .flags
= IRQCHIP_ONOFFLINE_ENABLED
,
1829 static struct irq_chip octeon_irq_chip_ciu2_wd
= {
1831 .irq_enable
= octeon_irq_ciu2_wd_enable
,
1832 .irq_disable
= octeon_irq_ciu2_disable_all
,
1833 .irq_mask
= octeon_irq_ciu2_disable_local
,
1834 .irq_unmask
= octeon_irq_ciu2_enable_local
,
1837 static struct irq_chip octeon_irq_chip_ciu2_gpio
= {
1839 .irq_enable
= octeon_irq_ciu2_enable_gpio
,
1840 .irq_disable
= octeon_irq_ciu2_disable_gpio
,
1841 .irq_ack
= octeon_irq_ciu_gpio_ack
,
1842 .irq_mask
= octeon_irq_ciu2_disable_local
,
1843 .irq_unmask
= octeon_irq_ciu2_enable
,
1844 .irq_set_type
= octeon_irq_ciu_gpio_set_type
,
1846 .irq_set_affinity
= octeon_irq_ciu2_set_affinity
,
1847 .irq_cpu_offline
= octeon_irq_cpu_offline_ciu
,
1849 .flags
= IRQCHIP_SET_TYPE_MASKED
,
1852 static int octeon_irq_ciu2_xlat(struct irq_domain
*d
,
1853 struct device_node
*node
,
1855 unsigned int intsize
,
1856 unsigned long *out_hwirq
,
1857 unsigned int *out_type
)
1859 unsigned int ciu
, bit
;
1864 *out_hwirq
= (ciu
<< 6) | bit
;
1870 static bool octeon_irq_ciu2_is_edge(unsigned int line
, unsigned int bit
)
1874 if (line
== 3) /* MIO */
1876 case 2: /* IPD_DRP */
1877 case 8 ... 11: /* Timers */
1884 else if (line
== 6) /* PKT */
1886 case 52 ... 53: /* ILK_DRP */
1887 case 8 ... 12: /* GMX_DRP */
1896 static int octeon_irq_ciu2_map(struct irq_domain
*d
,
1897 unsigned int virq
, irq_hw_number_t hw
)
1899 unsigned int line
= hw
>> 6;
1900 unsigned int bit
= hw
& 63;
1902 if (!octeon_irq_virq_in_range(virq
))
1906 * Don't map irq if it is reserved for GPIO.
1907 * (Line 7 are the GPIO lines.)
1912 if (line
> 7 || octeon_irq_ciu_to_irq
[line
][bit
] != 0)
1915 if (octeon_irq_ciu2_is_edge(line
, bit
))
1916 octeon_irq_set_ciu_mapping(virq
, line
, bit
, 0,
1917 &octeon_irq_chip_ciu2_edge
,
1920 octeon_irq_set_ciu_mapping(virq
, line
, bit
, 0,
1921 &octeon_irq_chip_ciu2
,
1927 static struct irq_domain_ops octeon_irq_domain_ciu2_ops
= {
1928 .map
= octeon_irq_ciu2_map
,
1929 .unmap
= octeon_irq_free_cd
,
1930 .xlate
= octeon_irq_ciu2_xlat
,
1933 static void octeon_irq_ciu2(void)
1938 u64 src_reg
, src
, sum
;
1939 const unsigned long core_id
= cvmx_get_core_num();
1941 sum
= cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id
)) & 0xfful
;
1946 line
= fls64(sum
) - 1;
1947 src_reg
= CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id
) + (0x1000 * line
);
1948 src
= cvmx_read_csr(src_reg
);
1953 bit
= fls64(src
) - 1;
1954 irq
= octeon_irq_ciu_to_irq
[line
][bit
];
1962 spurious_interrupt();
1964 /* CN68XX pass 1.x has an errata that accessing the ACK registers
1965 can stop interrupts from propagating */
1966 if (OCTEON_IS_MODEL(OCTEON_CN68XX
))
1967 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY
);
1969 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id
));
1973 static void octeon_irq_ciu2_mbox(void)
1977 const unsigned long core_id
= cvmx_get_core_num();
1978 u64 sum
= cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id
)) >> 60;
1983 line
= fls64(sum
) - 1;
1985 do_IRQ(OCTEON_IRQ_MBOX0
+ line
);
1989 spurious_interrupt();
1991 /* CN68XX pass 1.x has an errata that accessing the ACK registers
1992 can stop interrupts from propagating */
1993 if (OCTEON_IS_MODEL(OCTEON_CN68XX
))
1994 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY
);
1996 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id
));
2000 static int __init
octeon_irq_init_ciu2(
2001 struct device_node
*ciu_node
, struct device_node
*parent
)
2004 struct irq_domain
*ciu_domain
= NULL
;
2006 octeon_irq_init_ciu2_percpu();
2007 octeon_irq_setup_secondary
= octeon_irq_setup_secondary_ciu2
;
2009 octeon_irq_gpio_chip
= &octeon_irq_chip_ciu2_gpio
;
2010 octeon_irq_ip2
= octeon_irq_ciu2
;
2011 octeon_irq_ip3
= octeon_irq_ciu2_mbox
;
2012 octeon_irq_ip4
= octeon_irq_ip4_mask
;
2015 octeon_irq_init_core();
2017 ciu_domain
= irq_domain_add_tree(
2018 ciu_node
, &octeon_irq_domain_ciu2_ops
, NULL
);
2019 irq_set_default_host(ciu_domain
);
2022 for (i
= 0; i
< 64; i
++) {
2023 r
= octeon_irq_force_ciu_mapping(
2024 ciu_domain
, i
+ OCTEON_IRQ_WORKQ0
, 0, i
);
2029 for (i
= 0; i
< 32; i
++) {
2030 r
= octeon_irq_set_ciu_mapping(i
+ OCTEON_IRQ_WDOG0
, 1, i
, 0,
2031 &octeon_irq_chip_ciu2_wd
, handle_level_irq
);
2036 for (i
= 0; i
< 4; i
++) {
2037 r
= octeon_irq_force_ciu_mapping(
2038 ciu_domain
, i
+ OCTEON_IRQ_TIMER0
, 3, i
+ 8);
2043 r
= octeon_irq_force_ciu_mapping(ciu_domain
, OCTEON_IRQ_USB0
, 3, 44);
2047 for (i
= 0; i
< 4; i
++) {
2048 r
= octeon_irq_force_ciu_mapping(
2049 ciu_domain
, i
+ OCTEON_IRQ_PCI_INT0
, 4, i
);
2054 for (i
= 0; i
< 4; i
++) {
2055 r
= octeon_irq_force_ciu_mapping(
2056 ciu_domain
, i
+ OCTEON_IRQ_PCI_MSI0
, 4, i
+ 8);
2061 irq_set_chip_and_handler(OCTEON_IRQ_MBOX0
, &octeon_irq_chip_ciu2_mbox
, handle_percpu_irq
);
2062 irq_set_chip_and_handler(OCTEON_IRQ_MBOX1
, &octeon_irq_chip_ciu2_mbox
, handle_percpu_irq
);
2063 irq_set_chip_and_handler(OCTEON_IRQ_MBOX2
, &octeon_irq_chip_ciu2_mbox
, handle_percpu_irq
);
2064 irq_set_chip_and_handler(OCTEON_IRQ_MBOX3
, &octeon_irq_chip_ciu2_mbox
, handle_percpu_irq
);
2066 /* Enable the CIU lines */
2067 set_c0_status(STATUSF_IP3
| STATUSF_IP2
);
2068 clear_c0_status(STATUSF_IP4
);
2074 struct octeon_irq_cib_host_data
{
2075 raw_spinlock_t lock
;
2081 struct octeon_irq_cib_chip_data
{
2082 struct octeon_irq_cib_host_data
*host_data
;
2086 static void octeon_irq_cib_enable(struct irq_data
*data
)
2088 unsigned long flags
;
2090 struct octeon_irq_cib_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
2091 struct octeon_irq_cib_host_data
*host_data
= cd
->host_data
;
2093 raw_spin_lock_irqsave(&host_data
->lock
, flags
);
2094 en
= cvmx_read_csr(host_data
->en_reg
);
2095 en
|= 1ull << cd
->bit
;
2096 cvmx_write_csr(host_data
->en_reg
, en
);
2097 raw_spin_unlock_irqrestore(&host_data
->lock
, flags
);
2100 static void octeon_irq_cib_disable(struct irq_data
*data
)
2102 unsigned long flags
;
2104 struct octeon_irq_cib_chip_data
*cd
= irq_data_get_irq_chip_data(data
);
2105 struct octeon_irq_cib_host_data
*host_data
= cd
->host_data
;
2107 raw_spin_lock_irqsave(&host_data
->lock
, flags
);
2108 en
= cvmx_read_csr(host_data
->en_reg
);
2109 en
&= ~(1ull << cd
->bit
);
2110 cvmx_write_csr(host_data
->en_reg
, en
);
2111 raw_spin_unlock_irqrestore(&host_data
->lock
, flags
);
2114 static int octeon_irq_cib_set_type(struct irq_data
*data
, unsigned int t
)
2116 irqd_set_trigger_type(data
, t
);
2117 return IRQ_SET_MASK_OK
;
2120 static struct irq_chip octeon_irq_chip_cib
= {
2122 .irq_enable
= octeon_irq_cib_enable
,
2123 .irq_disable
= octeon_irq_cib_disable
,
2124 .irq_mask
= octeon_irq_cib_disable
,
2125 .irq_unmask
= octeon_irq_cib_enable
,
2126 .irq_set_type
= octeon_irq_cib_set_type
,
2129 static int octeon_irq_cib_xlat(struct irq_domain
*d
,
2130 struct device_node
*node
,
2132 unsigned int intsize
,
2133 unsigned long *out_hwirq
,
2134 unsigned int *out_type
)
2136 unsigned int type
= 0;
2142 case 0: /* unofficial value, but we might as well let it work. */
2143 case 4: /* official value for level triggering. */
2144 *out_type
= IRQ_TYPE_LEVEL_HIGH
;
2146 case 1: /* official value for edge triggering. */
2147 *out_type
= IRQ_TYPE_EDGE_RISING
;
2149 default: /* Nothing else is acceptable. */
2153 *out_hwirq
= intspec
[0];
2158 static int octeon_irq_cib_map(struct irq_domain
*d
,
2159 unsigned int virq
, irq_hw_number_t hw
)
2161 struct octeon_irq_cib_host_data
*host_data
= d
->host_data
;
2162 struct octeon_irq_cib_chip_data
*cd
;
2164 if (hw
>= host_data
->max_bits
) {
2165 pr_err("ERROR: %s mapping %u is to big!\n",
2166 irq_domain_get_of_node(d
)->name
, (unsigned)hw
);
2170 cd
= kzalloc(sizeof(*cd
), GFP_KERNEL
);
2171 cd
->host_data
= host_data
;
2174 irq_set_chip_and_handler(virq
, &octeon_irq_chip_cib
,
2176 irq_set_chip_data(virq
, cd
);
2180 static struct irq_domain_ops octeon_irq_domain_cib_ops
= {
2181 .map
= octeon_irq_cib_map
,
2182 .unmap
= octeon_irq_free_cd
,
2183 .xlate
= octeon_irq_cib_xlat
,
2186 /* Chain to real handler. */
2187 static irqreturn_t
octeon_irq_cib_handler(int my_irq
, void *data
)
2194 struct irq_domain
*cib_domain
= data
;
2195 struct octeon_irq_cib_host_data
*host_data
= cib_domain
->host_data
;
2197 en
= cvmx_read_csr(host_data
->en_reg
);
2198 raw
= cvmx_read_csr(host_data
->raw_reg
);
2202 for (i
= 0; i
< host_data
->max_bits
; i
++) {
2203 if ((bits
& 1ull << i
) == 0)
2205 irq
= irq_find_mapping(cib_domain
, i
);
2207 unsigned long flags
;
2209 pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
2210 i
, host_data
->raw_reg
);
2211 raw_spin_lock_irqsave(&host_data
->lock
, flags
);
2212 en
= cvmx_read_csr(host_data
->en_reg
);
2214 cvmx_write_csr(host_data
->en_reg
, en
);
2215 cvmx_write_csr(host_data
->raw_reg
, 1ull << i
);
2216 raw_spin_unlock_irqrestore(&host_data
->lock
, flags
);
2218 struct irq_desc
*desc
= irq_to_desc(irq
);
2219 struct irq_data
*irq_data
= irq_desc_get_irq_data(desc
);
2220 /* If edge, acknowledge the bit we will be sending. */
2221 if (irqd_get_trigger_type(irq_data
) &
2223 cvmx_write_csr(host_data
->raw_reg
, 1ull << i
);
2224 generic_handle_irq_desc(desc
);
2231 static int __init
octeon_irq_init_cib(struct device_node
*ciu_node
,
2232 struct device_node
*parent
)
2236 struct octeon_irq_cib_host_data
*host_data
;
2239 struct irq_domain
*cib_domain
;
2241 parent_irq
= irq_of_parse_and_map(ciu_node
, 0);
2243 pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
2248 host_data
= kzalloc(sizeof(*host_data
), GFP_KERNEL
);
2249 raw_spin_lock_init(&host_data
->lock
);
2251 addr
= of_get_address(ciu_node
, 0, NULL
, NULL
);
2253 pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node
->name
);
2256 host_data
->raw_reg
= (u64
)phys_to_virt(
2257 of_translate_address(ciu_node
, addr
));
2259 addr
= of_get_address(ciu_node
, 1, NULL
, NULL
);
2261 pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node
->name
);
2264 host_data
->en_reg
= (u64
)phys_to_virt(
2265 of_translate_address(ciu_node
, addr
));
2267 r
= of_property_read_u32(ciu_node
, "cavium,max-bits", &val
);
2269 pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
2273 host_data
->max_bits
= val
;
2275 cib_domain
= irq_domain_add_linear(ciu_node
, host_data
->max_bits
,
2276 &octeon_irq_domain_cib_ops
,
2279 pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
2283 cvmx_write_csr(host_data
->en_reg
, 0); /* disable all IRQs */
2284 cvmx_write_csr(host_data
->raw_reg
, ~0); /* ack any outstanding */
2286 r
= request_irq(parent_irq
, octeon_irq_cib_handler
,
2287 IRQF_NO_THREAD
, "cib", cib_domain
);
2289 pr_err("request_irq cib failed %d\n", r
);
2292 pr_info("CIB interrupt controller probed: %llx %d\n",
2293 host_data
->raw_reg
, host_data
->max_bits
);
2297 static struct of_device_id ciu_types
[] __initdata
= {
2298 {.compatible
= "cavium,octeon-3860-ciu", .data
= octeon_irq_init_ciu
},
2299 {.compatible
= "cavium,octeon-3860-gpio", .data
= octeon_irq_init_gpio
},
2300 {.compatible
= "cavium,octeon-6880-ciu2", .data
= octeon_irq_init_ciu2
},
2301 {.compatible
= "cavium,octeon-7130-cib", .data
= octeon_irq_init_cib
},
2305 void __init
arch_init_irq(void)
2308 /* Set the default affinity to the boot cpu. */
2309 cpumask_clear(irq_default_affinity
);
2310 cpumask_set_cpu(smp_processor_id(), irq_default_affinity
);
2312 of_irq_init(ciu_types
);
2315 asmlinkage
void plat_irq_dispatch(void)
2317 unsigned long cop0_cause
;
2318 unsigned long cop0_status
;
2321 cop0_cause
= read_c0_cause();
2322 cop0_status
= read_c0_status();
2323 cop0_cause
&= cop0_status
;
2324 cop0_cause
&= ST0_IM
;
2326 if (cop0_cause
& STATUSF_IP2
)
2328 else if (cop0_cause
& STATUSF_IP3
)
2330 else if (cop0_cause
& STATUSF_IP4
)
2332 else if (cop0_cause
)
2333 do_IRQ(fls(cop0_cause
) - 9 + MIPS_CPU_IRQ_BASE
);
2339 #ifdef CONFIG_HOTPLUG_CPU
2341 void octeon_fixup_irqs(void)
2346 #endif /* CONFIG_HOTPLUG_CPU */