2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 #define pr_fmt(fmt) "irq-mips-gic: " fmt
12 #include <linux/bitmap.h>
13 #include <linux/clocksource.h>
14 #include <linux/cpuhotplug.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/irqchip.h>
19 #include <linux/of_address.h>
20 #include <linux/percpu.h>
21 #include <linux/sched.h>
22 #include <linux/smp.h>
24 #include <asm/mips-cps.h>
25 #include <asm/setup.h>
26 #include <asm/traps.h>
28 #include <dt-bindings/interrupt-controller/mips-gic.h>
30 #define GIC_MAX_INTRS 256
31 #define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS)
33 /* Add 2 to convert GIC CPU pin to core interrupt */
34 #define GIC_CPU_PIN_OFFSET 2
36 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
37 #define GIC_PIN_TO_VEC_OFFSET 1
39 /* Convert between local/shared IRQ number and GIC HW IRQ number. */
40 #define GIC_LOCAL_HWIRQ_BASE 0
41 #define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
42 #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
43 #define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
44 #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
45 #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
47 void __iomem
*mips_gic_base
;
49 DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS
], pcpu_masks
);
51 static DEFINE_SPINLOCK(gic_lock
);
52 static struct irq_domain
*gic_irq_domain
;
53 static struct irq_domain
*gic_ipi_domain
;
54 static int gic_shared_intrs
;
55 static unsigned int gic_cpu_pin
;
56 static unsigned int timer_cpu_pin
;
57 static struct irq_chip gic_level_irq_controller
, gic_edge_irq_controller
;
58 static DECLARE_BITMAP(ipi_resrv
, GIC_MAX_INTRS
);
59 static DECLARE_BITMAP(ipi_available
, GIC_MAX_INTRS
);
61 static struct gic_all_vpes_chip_data
{
64 } gic_all_vpes_chip_data
[GIC_NUM_LOCAL_INTRS
];
66 static void gic_clear_pcpu_masks(unsigned int intr
)
70 /* Clear the interrupt's bit in all pcpu_masks */
71 for_each_possible_cpu(i
)
72 clear_bit(intr
, per_cpu_ptr(pcpu_masks
, i
));
75 static bool gic_local_irq_is_routable(int intr
)
79 /* All local interrupts are routable in EIC mode. */
83 vpe_ctl
= read_gic_vl_ctl();
85 case GIC_LOCAL_INT_TIMER
:
86 return vpe_ctl
& GIC_VX_CTL_TIMER_ROUTABLE
;
87 case GIC_LOCAL_INT_PERFCTR
:
88 return vpe_ctl
& GIC_VX_CTL_PERFCNT_ROUTABLE
;
89 case GIC_LOCAL_INT_FDC
:
90 return vpe_ctl
& GIC_VX_CTL_FDC_ROUTABLE
;
91 case GIC_LOCAL_INT_SWINT0
:
92 case GIC_LOCAL_INT_SWINT1
:
93 return vpe_ctl
& GIC_VX_CTL_SWINT_ROUTABLE
;
99 static void gic_bind_eic_interrupt(int irq
, int set
)
101 /* Convert irq vector # to hw int # */
102 irq
-= GIC_PIN_TO_VEC_OFFSET
;
104 /* Set irq to use shadow set */
105 write_gic_vl_eic_shadow_set(irq
, set
);
108 static void gic_send_ipi(struct irq_data
*d
, unsigned int cpu
)
110 irq_hw_number_t hwirq
= GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d
));
112 write_gic_wedge(GIC_WEDGE_RW
| hwirq
);
115 int gic_get_c0_compare_int(void)
117 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER
))
118 return MIPS_CPU_IRQ_BASE
+ cp0_compare_irq
;
119 return irq_create_mapping(gic_irq_domain
,
120 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER
));
123 int gic_get_c0_perfcount_int(void)
125 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR
)) {
126 /* Is the performance counter shared with the timer? */
127 if (cp0_perfcount_irq
< 0)
129 return MIPS_CPU_IRQ_BASE
+ cp0_perfcount_irq
;
131 return irq_create_mapping(gic_irq_domain
,
132 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR
));
135 int gic_get_c0_fdc_int(void)
137 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC
)) {
138 /* Is the FDC IRQ even present? */
141 return MIPS_CPU_IRQ_BASE
+ cp0_fdc_irq
;
144 return irq_create_mapping(gic_irq_domain
,
145 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC
));
148 static void gic_handle_shared_int(bool chained
)
150 unsigned int intr
, virq
;
151 unsigned long *pcpu_mask
;
152 DECLARE_BITMAP(pending
, GIC_MAX_INTRS
);
154 /* Get per-cpu bitmaps */
155 pcpu_mask
= this_cpu_ptr(pcpu_masks
);
158 __ioread64_copy(pending
, addr_gic_pend(),
159 DIV_ROUND_UP(gic_shared_intrs
, 64));
161 __ioread32_copy(pending
, addr_gic_pend(),
162 DIV_ROUND_UP(gic_shared_intrs
, 32));
164 bitmap_and(pending
, pending
, pcpu_mask
, gic_shared_intrs
);
166 for_each_set_bit(intr
, pending
, gic_shared_intrs
) {
167 virq
= irq_linear_revmap(gic_irq_domain
,
168 GIC_SHARED_TO_HWIRQ(intr
));
170 generic_handle_irq(virq
);
176 static void gic_mask_irq(struct irq_data
*d
)
178 unsigned int intr
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
180 write_gic_rmask(intr
);
181 gic_clear_pcpu_masks(intr
);
184 static void gic_unmask_irq(struct irq_data
*d
)
186 unsigned int intr
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
189 write_gic_smask(intr
);
191 gic_clear_pcpu_masks(intr
);
192 cpu
= cpumask_first(irq_data_get_effective_affinity_mask(d
));
193 set_bit(intr
, per_cpu_ptr(pcpu_masks
, cpu
));
196 static void gic_ack_irq(struct irq_data
*d
)
198 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
200 write_gic_wedge(irq
);
203 static int gic_set_type(struct irq_data
*d
, unsigned int type
)
205 unsigned int irq
, pol
, trig
, dual
;
208 irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
210 spin_lock_irqsave(&gic_lock
, flags
);
211 switch (type
& IRQ_TYPE_SENSE_MASK
) {
212 case IRQ_TYPE_EDGE_FALLING
:
213 pol
= GIC_POL_FALLING_EDGE
;
214 trig
= GIC_TRIG_EDGE
;
215 dual
= GIC_DUAL_SINGLE
;
217 case IRQ_TYPE_EDGE_RISING
:
218 pol
= GIC_POL_RISING_EDGE
;
219 trig
= GIC_TRIG_EDGE
;
220 dual
= GIC_DUAL_SINGLE
;
222 case IRQ_TYPE_EDGE_BOTH
:
223 pol
= 0; /* Doesn't matter */
224 trig
= GIC_TRIG_EDGE
;
225 dual
= GIC_DUAL_DUAL
;
227 case IRQ_TYPE_LEVEL_LOW
:
228 pol
= GIC_POL_ACTIVE_LOW
;
229 trig
= GIC_TRIG_LEVEL
;
230 dual
= GIC_DUAL_SINGLE
;
232 case IRQ_TYPE_LEVEL_HIGH
:
234 pol
= GIC_POL_ACTIVE_HIGH
;
235 trig
= GIC_TRIG_LEVEL
;
236 dual
= GIC_DUAL_SINGLE
;
240 change_gic_pol(irq
, pol
);
241 change_gic_trig(irq
, trig
);
242 change_gic_dual(irq
, dual
);
244 if (trig
== GIC_TRIG_EDGE
)
245 irq_set_chip_handler_name_locked(d
, &gic_edge_irq_controller
,
246 handle_edge_irq
, NULL
);
248 irq_set_chip_handler_name_locked(d
, &gic_level_irq_controller
,
249 handle_level_irq
, NULL
);
250 spin_unlock_irqrestore(&gic_lock
, flags
);
256 static int gic_set_affinity(struct irq_data
*d
, const struct cpumask
*cpumask
,
259 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
263 cpu
= cpumask_first_and(cpumask
, cpu_online_mask
);
267 /* Assumption : cpumask refers to a single CPU */
268 spin_lock_irqsave(&gic_lock
, flags
);
270 /* Re-route this IRQ */
271 write_gic_map_vp(irq
, BIT(mips_cm_vp_id(cpu
)));
273 /* Update the pcpu_masks */
274 gic_clear_pcpu_masks(irq
);
275 if (read_gic_mask(irq
))
276 set_bit(irq
, per_cpu_ptr(pcpu_masks
, cpu
));
278 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
279 spin_unlock_irqrestore(&gic_lock
, flags
);
281 return IRQ_SET_MASK_OK
;
285 static struct irq_chip gic_level_irq_controller
= {
287 .irq_mask
= gic_mask_irq
,
288 .irq_unmask
= gic_unmask_irq
,
289 .irq_set_type
= gic_set_type
,
291 .irq_set_affinity
= gic_set_affinity
,
295 static struct irq_chip gic_edge_irq_controller
= {
297 .irq_ack
= gic_ack_irq
,
298 .irq_mask
= gic_mask_irq
,
299 .irq_unmask
= gic_unmask_irq
,
300 .irq_set_type
= gic_set_type
,
302 .irq_set_affinity
= gic_set_affinity
,
304 .ipi_send_single
= gic_send_ipi
,
307 static void gic_handle_local_int(bool chained
)
309 unsigned long pending
, masked
;
310 unsigned int intr
, virq
;
312 pending
= read_gic_vl_pend();
313 masked
= read_gic_vl_mask();
315 bitmap_and(&pending
, &pending
, &masked
, GIC_NUM_LOCAL_INTRS
);
317 for_each_set_bit(intr
, &pending
, GIC_NUM_LOCAL_INTRS
) {
318 virq
= irq_linear_revmap(gic_irq_domain
,
319 GIC_LOCAL_TO_HWIRQ(intr
));
321 generic_handle_irq(virq
);
327 static void gic_mask_local_irq(struct irq_data
*d
)
329 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
331 write_gic_vl_rmask(BIT(intr
));
334 static void gic_unmask_local_irq(struct irq_data
*d
)
336 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
338 write_gic_vl_smask(BIT(intr
));
341 static struct irq_chip gic_local_irq_controller
= {
342 .name
= "MIPS GIC Local",
343 .irq_mask
= gic_mask_local_irq
,
344 .irq_unmask
= gic_unmask_local_irq
,
347 static void gic_mask_local_irq_all_vpes(struct irq_data
*d
)
349 struct gic_all_vpes_chip_data
*cd
;
353 intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
354 cd
= irq_data_get_irq_chip_data(d
);
357 spin_lock_irqsave(&gic_lock
, flags
);
358 for_each_online_cpu(cpu
) {
359 write_gic_vl_other(mips_cm_vp_id(cpu
));
360 write_gic_vo_rmask(BIT(intr
));
362 spin_unlock_irqrestore(&gic_lock
, flags
);
365 static void gic_unmask_local_irq_all_vpes(struct irq_data
*d
)
367 struct gic_all_vpes_chip_data
*cd
;
371 intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
372 cd
= irq_data_get_irq_chip_data(d
);
375 spin_lock_irqsave(&gic_lock
, flags
);
376 for_each_online_cpu(cpu
) {
377 write_gic_vl_other(mips_cm_vp_id(cpu
));
378 write_gic_vo_smask(BIT(intr
));
380 spin_unlock_irqrestore(&gic_lock
, flags
);
383 static void gic_all_vpes_irq_cpu_online(struct irq_data
*d
)
385 struct gic_all_vpes_chip_data
*cd
;
388 intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
389 cd
= irq_data_get_irq_chip_data(d
);
391 write_gic_vl_map(intr
, cd
->map
);
393 write_gic_vl_smask(BIT(intr
));
396 static struct irq_chip gic_all_vpes_local_irq_controller
= {
397 .name
= "MIPS GIC Local",
398 .irq_mask
= gic_mask_local_irq_all_vpes
,
399 .irq_unmask
= gic_unmask_local_irq_all_vpes
,
400 .irq_cpu_online
= gic_all_vpes_irq_cpu_online
,
403 static void __gic_irq_dispatch(void)
405 gic_handle_local_int(false);
406 gic_handle_shared_int(false);
409 static void gic_irq_dispatch(struct irq_desc
*desc
)
411 gic_handle_local_int(true);
412 gic_handle_shared_int(true);
415 static int gic_shared_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
416 irq_hw_number_t hw
, unsigned int cpu
)
418 int intr
= GIC_HWIRQ_TO_SHARED(hw
);
419 struct irq_data
*data
;
422 data
= irq_get_irq_data(virq
);
424 spin_lock_irqsave(&gic_lock
, flags
);
425 write_gic_map_pin(intr
, GIC_MAP_PIN_MAP_TO_PIN
| gic_cpu_pin
);
426 write_gic_map_vp(intr
, BIT(mips_cm_vp_id(cpu
)));
427 gic_clear_pcpu_masks(intr
);
428 set_bit(intr
, per_cpu_ptr(pcpu_masks
, cpu
));
429 irq_data_update_effective_affinity(data
, cpumask_of(cpu
));
430 spin_unlock_irqrestore(&gic_lock
, flags
);
435 static int gic_irq_domain_xlate(struct irq_domain
*d
, struct device_node
*ctrlr
,
436 const u32
*intspec
, unsigned int intsize
,
437 irq_hw_number_t
*out_hwirq
,
438 unsigned int *out_type
)
443 if (intspec
[0] == GIC_SHARED
)
444 *out_hwirq
= GIC_SHARED_TO_HWIRQ(intspec
[1]);
445 else if (intspec
[0] == GIC_LOCAL
)
446 *out_hwirq
= GIC_LOCAL_TO_HWIRQ(intspec
[1]);
449 *out_type
= intspec
[2] & IRQ_TYPE_SENSE_MASK
;
454 static int gic_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
455 irq_hw_number_t hwirq
)
457 struct gic_all_vpes_chip_data
*cd
;
463 if (hwirq
>= GIC_SHARED_HWIRQ_BASE
) {
464 /* verify that shared irqs don't conflict with an IPI irq */
465 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq
), ipi_resrv
))
468 err
= irq_domain_set_hwirq_and_chip(d
, virq
, hwirq
,
469 &gic_level_irq_controller
,
474 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq
)));
475 return gic_shared_irq_domain_map(d
, virq
, hwirq
, 0);
478 intr
= GIC_HWIRQ_TO_LOCAL(hwirq
);
479 map
= GIC_MAP_PIN_MAP_TO_PIN
| gic_cpu_pin
;
482 case GIC_LOCAL_INT_TIMER
:
483 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
484 map
= GIC_MAP_PIN_MAP_TO_PIN
| timer_cpu_pin
;
486 case GIC_LOCAL_INT_PERFCTR
:
487 case GIC_LOCAL_INT_FDC
:
489 * HACK: These are all really percpu interrupts, but
490 * the rest of the MIPS kernel code does not use the
491 * percpu IRQ API for them.
493 cd
= &gic_all_vpes_chip_data
[intr
];
495 err
= irq_domain_set_hwirq_and_chip(d
, virq
, hwirq
,
496 &gic_all_vpes_local_irq_controller
,
501 irq_set_handler(virq
, handle_percpu_irq
);
505 err
= irq_domain_set_hwirq_and_chip(d
, virq
, hwirq
,
506 &gic_local_irq_controller
,
511 irq_set_handler(virq
, handle_percpu_devid_irq
);
512 irq_set_percpu_devid(virq
);
516 if (!gic_local_irq_is_routable(intr
))
519 spin_lock_irqsave(&gic_lock
, flags
);
520 for_each_online_cpu(cpu
) {
521 write_gic_vl_other(mips_cm_vp_id(cpu
));
522 write_gic_vo_map(intr
, map
);
524 spin_unlock_irqrestore(&gic_lock
, flags
);
529 static int gic_irq_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
530 unsigned int nr_irqs
, void *arg
)
532 struct irq_fwspec
*fwspec
= arg
;
533 irq_hw_number_t hwirq
;
535 if (fwspec
->param
[0] == GIC_SHARED
)
536 hwirq
= GIC_SHARED_TO_HWIRQ(fwspec
->param
[1]);
538 hwirq
= GIC_LOCAL_TO_HWIRQ(fwspec
->param
[1]);
540 return gic_irq_domain_map(d
, virq
, hwirq
);
543 void gic_irq_domain_free(struct irq_domain
*d
, unsigned int virq
,
544 unsigned int nr_irqs
)
548 static const struct irq_domain_ops gic_irq_domain_ops
= {
549 .xlate
= gic_irq_domain_xlate
,
550 .alloc
= gic_irq_domain_alloc
,
551 .free
= gic_irq_domain_free
,
552 .map
= gic_irq_domain_map
,
555 static int gic_ipi_domain_xlate(struct irq_domain
*d
, struct device_node
*ctrlr
,
556 const u32
*intspec
, unsigned int intsize
,
557 irq_hw_number_t
*out_hwirq
,
558 unsigned int *out_type
)
561 * There's nothing to translate here. hwirq is dynamically allocated and
562 * the irq type is always edge triggered.
565 *out_type
= IRQ_TYPE_EDGE_RISING
;
570 static int gic_ipi_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
571 unsigned int nr_irqs
, void *arg
)
573 struct cpumask
*ipimask
= arg
;
574 irq_hw_number_t hwirq
, base_hwirq
;
577 base_hwirq
= find_first_bit(ipi_available
, gic_shared_intrs
);
578 if (base_hwirq
== gic_shared_intrs
)
581 /* check that we have enough space */
582 for (i
= base_hwirq
; i
< nr_irqs
; i
++) {
583 if (!test_bit(i
, ipi_available
))
586 bitmap_clear(ipi_available
, base_hwirq
, nr_irqs
);
588 /* map the hwirq for each cpu consecutively */
590 for_each_cpu(cpu
, ipimask
) {
591 hwirq
= GIC_SHARED_TO_HWIRQ(base_hwirq
+ i
);
593 ret
= irq_domain_set_hwirq_and_chip(d
, virq
+ i
, hwirq
,
594 &gic_edge_irq_controller
,
599 ret
= irq_domain_set_hwirq_and_chip(d
->parent
, virq
+ i
, hwirq
,
600 &gic_edge_irq_controller
,
605 ret
= irq_set_irq_type(virq
+ i
, IRQ_TYPE_EDGE_RISING
);
609 ret
= gic_shared_irq_domain_map(d
, virq
+ i
, hwirq
, cpu
);
618 bitmap_set(ipi_available
, base_hwirq
, nr_irqs
);
622 void gic_ipi_domain_free(struct irq_domain
*d
, unsigned int virq
,
623 unsigned int nr_irqs
)
625 irq_hw_number_t base_hwirq
;
626 struct irq_data
*data
;
628 data
= irq_get_irq_data(virq
);
632 base_hwirq
= GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data
));
633 bitmap_set(ipi_available
, base_hwirq
, nr_irqs
);
636 int gic_ipi_domain_match(struct irq_domain
*d
, struct device_node
*node
,
637 enum irq_domain_bus_token bus_token
)
643 is_ipi
= d
->bus_token
== bus_token
;
644 return (!node
|| to_of_node(d
->fwnode
) == node
) && is_ipi
;
651 static const struct irq_domain_ops gic_ipi_domain_ops
= {
652 .xlate
= gic_ipi_domain_xlate
,
653 .alloc
= gic_ipi_domain_alloc
,
654 .free
= gic_ipi_domain_free
,
655 .match
= gic_ipi_domain_match
,
658 static int gic_cpu_startup(unsigned int cpu
)
660 /* Enable or disable EIC */
661 change_gic_vl_ctl(GIC_VX_CTL_EIC
,
662 cpu_has_veic
? GIC_VX_CTL_EIC
: 0);
664 /* Clear all local IRQ masks (ie. disable all local interrupts) */
665 write_gic_vl_rmask(~0);
667 /* Invoke irq_cpu_online callbacks to enable desired interrupts */
673 static int __init
gic_of_init(struct device_node
*node
,
674 struct device_node
*parent
)
676 unsigned int cpu_vec
, i
, gicconfig
, v
[2], num_ipis
;
677 unsigned long reserved
;
678 phys_addr_t gic_base
;
682 /* Find the first available CPU vector. */
684 reserved
= (C_SW0
| C_SW1
) >> __ffs(C_SW0
);
685 while (!of_property_read_u32_index(node
, "mti,reserved-cpu-vectors",
687 reserved
|= BIT(cpu_vec
);
689 cpu_vec
= find_first_zero_bit(&reserved
, hweight_long(ST0_IM
));
690 if (cpu_vec
== hweight_long(ST0_IM
)) {
691 pr_err("No CPU vectors available\n");
695 if (of_address_to_resource(node
, 0, &res
)) {
697 * Probe the CM for the GIC base address if not specified
698 * in the device-tree.
700 if (mips_cm_present()) {
701 gic_base
= read_gcr_gic_base() &
702 ~CM_GCR_GIC_BASE_GICEN
;
704 pr_warn("Using inherited base address %pa\n",
707 pr_err("Failed to get memory range\n");
711 gic_base
= res
.start
;
712 gic_len
= resource_size(&res
);
715 if (mips_cm_present()) {
716 write_gcr_gic_base(gic_base
| CM_GCR_GIC_BASE_GICEN
);
717 /* Ensure GIC region is enabled before trying to access it */
721 mips_gic_base
= ioremap_nocache(gic_base
, gic_len
);
723 gicconfig
= read_gic_config();
724 gic_shared_intrs
= gicconfig
& GIC_CONFIG_NUMINTERRUPTS
;
725 gic_shared_intrs
>>= __ffs(GIC_CONFIG_NUMINTERRUPTS
);
726 gic_shared_intrs
= (gic_shared_intrs
+ 1) * 8;
729 /* Always use vector 1 in EIC mode */
731 timer_cpu_pin
= gic_cpu_pin
;
732 set_vi_handler(gic_cpu_pin
+ GIC_PIN_TO_VEC_OFFSET
,
735 gic_cpu_pin
= cpu_vec
- GIC_CPU_PIN_OFFSET
;
736 irq_set_chained_handler(MIPS_CPU_IRQ_BASE
+ cpu_vec
,
739 * With the CMP implementation of SMP (deprecated), other CPUs
740 * are started by the bootloader and put into a timer based
741 * waiting poll loop. We must not re-route those CPU's local
742 * timer interrupts as the wait instruction will never finish,
743 * so just handle whatever CPU interrupt it is routed to by
746 * This workaround should be removed when CMP support is
749 if (IS_ENABLED(CONFIG_MIPS_CMP
) &&
750 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER
)) {
751 timer_cpu_pin
= read_gic_vl_timer_map() & GIC_MAP_PIN_MAP
;
752 irq_set_chained_handler(MIPS_CPU_IRQ_BASE
+
757 timer_cpu_pin
= gic_cpu_pin
;
761 gic_irq_domain
= irq_domain_add_simple(node
, GIC_NUM_LOCAL_INTRS
+
763 &gic_irq_domain_ops
, NULL
);
764 if (!gic_irq_domain
) {
765 pr_err("Failed to add IRQ domain");
769 gic_ipi_domain
= irq_domain_add_hierarchy(gic_irq_domain
,
770 IRQ_DOMAIN_FLAG_IPI_PER_CPU
,
771 GIC_NUM_LOCAL_INTRS
+ gic_shared_intrs
,
772 node
, &gic_ipi_domain_ops
, NULL
);
773 if (!gic_ipi_domain
) {
774 pr_err("Failed to add IPI domain");
778 irq_domain_update_bus_token(gic_ipi_domain
, DOMAIN_BUS_IPI
);
781 !of_property_read_u32_array(node
, "mti,reserved-ipi-vectors", v
, 2)) {
782 bitmap_set(ipi_resrv
, v
[0], v
[1]);
785 * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
786 * meeting the requirements of arch/mips SMP.
788 num_ipis
= 2 * num_possible_cpus();
789 bitmap_set(ipi_resrv
, gic_shared_intrs
- num_ipis
, num_ipis
);
792 bitmap_copy(ipi_available
, ipi_resrv
, GIC_MAX_INTRS
);
794 board_bind_eic_interrupt
= &gic_bind_eic_interrupt
;
797 for (i
= 0; i
< gic_shared_intrs
; i
++) {
798 change_gic_pol(i
, GIC_POL_ACTIVE_HIGH
);
799 change_gic_trig(i
, GIC_TRIG_LEVEL
);
803 return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING
,
804 "irqchip/mips/gic:starting",
805 gic_cpu_startup
, NULL
);
807 IRQCHIP_DECLARE(mips_gic
, "mti,gic", gic_of_init
);