2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 #define pr_fmt(fmt) "irq-mips-gic: " fmt
12 #include <linux/bitmap.h>
13 #include <linux/clocksource.h>
14 #include <linux/cpuhotplug.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/irqchip.h>
19 #include <linux/of_address.h>
20 #include <linux/percpu.h>
21 #include <linux/sched.h>
22 #include <linux/smp.h>
24 #include <asm/mips-cps.h>
25 #include <asm/setup.h>
26 #include <asm/traps.h>
28 #include <dt-bindings/interrupt-controller/mips-gic.h>
30 #define GIC_MAX_INTRS 256
31 #define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS)
33 /* Add 2 to convert GIC CPU pin to core interrupt */
34 #define GIC_CPU_PIN_OFFSET 2
36 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
37 #define GIC_PIN_TO_VEC_OFFSET 1
39 /* Convert between local/shared IRQ number and GIC HW IRQ number. */
40 #define GIC_LOCAL_HWIRQ_BASE 0
41 #define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
42 #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
43 #define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
44 #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
45 #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
47 void __iomem
*mips_gic_base
;
49 DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS
], pcpu_masks
);
51 static DEFINE_SPINLOCK(gic_lock
);
52 static struct irq_domain
*gic_irq_domain
;
53 static struct irq_domain
*gic_ipi_domain
;
54 static int gic_shared_intrs
;
55 static unsigned int gic_cpu_pin
;
56 static unsigned int timer_cpu_pin
;
57 static struct irq_chip gic_level_irq_controller
, gic_edge_irq_controller
;
58 static DECLARE_BITMAP(ipi_resrv
, GIC_MAX_INTRS
);
59 static DECLARE_BITMAP(ipi_available
, GIC_MAX_INTRS
);
61 static struct gic_all_vpes_chip_data
{
64 } gic_all_vpes_chip_data
[GIC_NUM_LOCAL_INTRS
];
66 static void gic_clear_pcpu_masks(unsigned int intr
)
70 /* Clear the interrupt's bit in all pcpu_masks */
71 for_each_possible_cpu(i
)
72 clear_bit(intr
, per_cpu_ptr(pcpu_masks
, i
));
75 static bool gic_local_irq_is_routable(int intr
)
79 /* All local interrupts are routable in EIC mode. */
83 vpe_ctl
= read_gic_vl_ctl();
85 case GIC_LOCAL_INT_TIMER
:
86 return vpe_ctl
& GIC_VX_CTL_TIMER_ROUTABLE
;
87 case GIC_LOCAL_INT_PERFCTR
:
88 return vpe_ctl
& GIC_VX_CTL_PERFCNT_ROUTABLE
;
89 case GIC_LOCAL_INT_FDC
:
90 return vpe_ctl
& GIC_VX_CTL_FDC_ROUTABLE
;
91 case GIC_LOCAL_INT_SWINT0
:
92 case GIC_LOCAL_INT_SWINT1
:
93 return vpe_ctl
& GIC_VX_CTL_SWINT_ROUTABLE
;
99 static void gic_bind_eic_interrupt(int irq
, int set
)
101 /* Convert irq vector # to hw int # */
102 irq
-= GIC_PIN_TO_VEC_OFFSET
;
104 /* Set irq to use shadow set */
105 write_gic_vl_eic_shadow_set(irq
, set
);
108 static void gic_send_ipi(struct irq_data
*d
, unsigned int cpu
)
110 irq_hw_number_t hwirq
= GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d
));
112 write_gic_wedge(GIC_WEDGE_RW
| hwirq
);
115 int gic_get_c0_compare_int(void)
117 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER
))
118 return MIPS_CPU_IRQ_BASE
+ cp0_compare_irq
;
119 return irq_create_mapping(gic_irq_domain
,
120 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER
));
123 int gic_get_c0_perfcount_int(void)
125 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR
)) {
126 /* Is the performance counter shared with the timer? */
127 if (cp0_perfcount_irq
< 0)
129 return MIPS_CPU_IRQ_BASE
+ cp0_perfcount_irq
;
131 return irq_create_mapping(gic_irq_domain
,
132 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR
));
135 int gic_get_c0_fdc_int(void)
137 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC
)) {
138 /* Is the FDC IRQ even present? */
141 return MIPS_CPU_IRQ_BASE
+ cp0_fdc_irq
;
144 return irq_create_mapping(gic_irq_domain
,
145 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC
));
148 static void gic_handle_shared_int(bool chained
)
150 unsigned int intr
, virq
;
151 unsigned long *pcpu_mask
;
152 DECLARE_BITMAP(pending
, GIC_MAX_INTRS
);
154 /* Get per-cpu bitmaps */
155 pcpu_mask
= this_cpu_ptr(pcpu_masks
);
158 __ioread64_copy(pending
, addr_gic_pend(),
159 DIV_ROUND_UP(gic_shared_intrs
, 64));
161 __ioread32_copy(pending
, addr_gic_pend(),
162 DIV_ROUND_UP(gic_shared_intrs
, 32));
164 bitmap_and(pending
, pending
, pcpu_mask
, gic_shared_intrs
);
166 for_each_set_bit(intr
, pending
, gic_shared_intrs
) {
167 virq
= irq_linear_revmap(gic_irq_domain
,
168 GIC_SHARED_TO_HWIRQ(intr
));
170 generic_handle_irq(virq
);
176 static void gic_mask_irq(struct irq_data
*d
)
178 unsigned int intr
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
180 write_gic_rmask(intr
);
181 gic_clear_pcpu_masks(intr
);
184 static void gic_unmask_irq(struct irq_data
*d
)
186 unsigned int intr
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
189 write_gic_smask(intr
);
191 gic_clear_pcpu_masks(intr
);
192 cpu
= cpumask_first(irq_data_get_effective_affinity_mask(d
));
193 set_bit(intr
, per_cpu_ptr(pcpu_masks
, cpu
));
196 static void gic_ack_irq(struct irq_data
*d
)
198 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
200 write_gic_wedge(irq
);
203 static int gic_set_type(struct irq_data
*d
, unsigned int type
)
205 unsigned int irq
, pol
, trig
, dual
;
208 irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
210 spin_lock_irqsave(&gic_lock
, flags
);
211 switch (type
& IRQ_TYPE_SENSE_MASK
) {
212 case IRQ_TYPE_EDGE_FALLING
:
213 pol
= GIC_POL_FALLING_EDGE
;
214 trig
= GIC_TRIG_EDGE
;
215 dual
= GIC_DUAL_SINGLE
;
217 case IRQ_TYPE_EDGE_RISING
:
218 pol
= GIC_POL_RISING_EDGE
;
219 trig
= GIC_TRIG_EDGE
;
220 dual
= GIC_DUAL_SINGLE
;
222 case IRQ_TYPE_EDGE_BOTH
:
223 pol
= 0; /* Doesn't matter */
224 trig
= GIC_TRIG_EDGE
;
225 dual
= GIC_DUAL_DUAL
;
227 case IRQ_TYPE_LEVEL_LOW
:
228 pol
= GIC_POL_ACTIVE_LOW
;
229 trig
= GIC_TRIG_LEVEL
;
230 dual
= GIC_DUAL_SINGLE
;
232 case IRQ_TYPE_LEVEL_HIGH
:
234 pol
= GIC_POL_ACTIVE_HIGH
;
235 trig
= GIC_TRIG_LEVEL
;
236 dual
= GIC_DUAL_SINGLE
;
240 change_gic_pol(irq
, pol
);
241 change_gic_trig(irq
, trig
);
242 change_gic_dual(irq
, dual
);
244 if (trig
== GIC_TRIG_EDGE
)
245 irq_set_chip_handler_name_locked(d
, &gic_edge_irq_controller
,
246 handle_edge_irq
, NULL
);
248 irq_set_chip_handler_name_locked(d
, &gic_level_irq_controller
,
249 handle_level_irq
, NULL
);
250 spin_unlock_irqrestore(&gic_lock
, flags
);
256 static int gic_set_affinity(struct irq_data
*d
, const struct cpumask
*cpumask
,
259 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
263 cpu
= cpumask_first_and(cpumask
, cpu_online_mask
);
267 /* Assumption : cpumask refers to a single CPU */
268 spin_lock_irqsave(&gic_lock
, flags
);
270 /* Re-route this IRQ */
271 write_gic_map_vp(irq
, BIT(mips_cm_vp_id(cpu
)));
273 /* Update the pcpu_masks */
274 gic_clear_pcpu_masks(irq
);
275 if (read_gic_mask(irq
))
276 set_bit(irq
, per_cpu_ptr(pcpu_masks
, cpu
));
278 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
279 spin_unlock_irqrestore(&gic_lock
, flags
);
281 return IRQ_SET_MASK_OK
;
285 static struct irq_chip gic_level_irq_controller
= {
287 .irq_mask
= gic_mask_irq
,
288 .irq_unmask
= gic_unmask_irq
,
289 .irq_set_type
= gic_set_type
,
291 .irq_set_affinity
= gic_set_affinity
,
295 static struct irq_chip gic_edge_irq_controller
= {
297 .irq_ack
= gic_ack_irq
,
298 .irq_mask
= gic_mask_irq
,
299 .irq_unmask
= gic_unmask_irq
,
300 .irq_set_type
= gic_set_type
,
302 .irq_set_affinity
= gic_set_affinity
,
304 .ipi_send_single
= gic_send_ipi
,
307 static void gic_handle_local_int(bool chained
)
309 unsigned long pending
, masked
;
310 unsigned int intr
, virq
;
312 pending
= read_gic_vl_pend();
313 masked
= read_gic_vl_mask();
315 bitmap_and(&pending
, &pending
, &masked
, GIC_NUM_LOCAL_INTRS
);
317 for_each_set_bit(intr
, &pending
, GIC_NUM_LOCAL_INTRS
) {
318 virq
= irq_linear_revmap(gic_irq_domain
,
319 GIC_LOCAL_TO_HWIRQ(intr
));
321 generic_handle_irq(virq
);
327 static void gic_mask_local_irq(struct irq_data
*d
)
329 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
331 write_gic_vl_rmask(BIT(intr
));
334 static void gic_unmask_local_irq(struct irq_data
*d
)
336 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
338 write_gic_vl_smask(BIT(intr
));
341 static struct irq_chip gic_local_irq_controller
= {
342 .name
= "MIPS GIC Local",
343 .irq_mask
= gic_mask_local_irq
,
344 .irq_unmask
= gic_unmask_local_irq
,
347 static void gic_mask_local_irq_all_vpes(struct irq_data
*d
)
349 struct gic_all_vpes_chip_data
*cd
;
353 intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
354 cd
= irq_data_get_irq_chip_data(d
);
357 spin_lock_irqsave(&gic_lock
, flags
);
358 for_each_online_cpu(cpu
) {
359 write_gic_vl_other(mips_cm_vp_id(cpu
));
360 write_gic_vo_rmask(BIT(intr
));
362 spin_unlock_irqrestore(&gic_lock
, flags
);
365 static void gic_unmask_local_irq_all_vpes(struct irq_data
*d
)
367 struct gic_all_vpes_chip_data
*cd
;
371 intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
372 cd
= irq_data_get_irq_chip_data(d
);
375 spin_lock_irqsave(&gic_lock
, flags
);
376 for_each_online_cpu(cpu
) {
377 write_gic_vl_other(mips_cm_vp_id(cpu
));
378 write_gic_vo_smask(BIT(intr
));
380 spin_unlock_irqrestore(&gic_lock
, flags
);
383 static void gic_all_vpes_irq_cpu_online(struct irq_data
*d
)
385 struct gic_all_vpes_chip_data
*cd
;
388 intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
389 cd
= irq_data_get_irq_chip_data(d
);
391 write_gic_vl_map(intr
, cd
->map
);
393 write_gic_vl_smask(BIT(intr
));
396 static struct irq_chip gic_all_vpes_local_irq_controller
= {
397 .name
= "MIPS GIC Local",
398 .irq_mask
= gic_mask_local_irq_all_vpes
,
399 .irq_unmask
= gic_unmask_local_irq_all_vpes
,
400 .irq_cpu_online
= gic_all_vpes_irq_cpu_online
,
403 static void __gic_irq_dispatch(void)
405 gic_handle_local_int(false);
406 gic_handle_shared_int(false);
409 static void gic_irq_dispatch(struct irq_desc
*desc
)
411 gic_handle_local_int(true);
412 gic_handle_shared_int(true);
415 static int gic_shared_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
416 irq_hw_number_t hw
, unsigned int cpu
)
418 int intr
= GIC_HWIRQ_TO_SHARED(hw
);
419 struct irq_data
*data
;
422 data
= irq_get_irq_data(virq
);
424 spin_lock_irqsave(&gic_lock
, flags
);
425 write_gic_map_pin(intr
, GIC_MAP_PIN_MAP_TO_PIN
| gic_cpu_pin
);
426 write_gic_map_vp(intr
, BIT(mips_cm_vp_id(cpu
)));
427 irq_data_update_effective_affinity(data
, cpumask_of(cpu
));
428 spin_unlock_irqrestore(&gic_lock
, flags
);
433 static int gic_irq_domain_xlate(struct irq_domain
*d
, struct device_node
*ctrlr
,
434 const u32
*intspec
, unsigned int intsize
,
435 irq_hw_number_t
*out_hwirq
,
436 unsigned int *out_type
)
441 if (intspec
[0] == GIC_SHARED
)
442 *out_hwirq
= GIC_SHARED_TO_HWIRQ(intspec
[1]);
443 else if (intspec
[0] == GIC_LOCAL
)
444 *out_hwirq
= GIC_LOCAL_TO_HWIRQ(intspec
[1]);
447 *out_type
= intspec
[2] & IRQ_TYPE_SENSE_MASK
;
452 static int gic_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
453 irq_hw_number_t hwirq
)
455 struct gic_all_vpes_chip_data
*cd
;
461 if (hwirq
>= GIC_SHARED_HWIRQ_BASE
) {
462 /* verify that shared irqs don't conflict with an IPI irq */
463 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq
), ipi_resrv
))
466 err
= irq_domain_set_hwirq_and_chip(d
, virq
, hwirq
,
467 &gic_level_irq_controller
,
472 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq
)));
473 return gic_shared_irq_domain_map(d
, virq
, hwirq
, 0);
476 intr
= GIC_HWIRQ_TO_LOCAL(hwirq
);
477 map
= GIC_MAP_PIN_MAP_TO_PIN
| gic_cpu_pin
;
480 case GIC_LOCAL_INT_TIMER
:
481 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
482 map
= GIC_MAP_PIN_MAP_TO_PIN
| timer_cpu_pin
;
484 case GIC_LOCAL_INT_PERFCTR
:
485 case GIC_LOCAL_INT_FDC
:
487 * HACK: These are all really percpu interrupts, but
488 * the rest of the MIPS kernel code does not use the
489 * percpu IRQ API for them.
491 cd
= &gic_all_vpes_chip_data
[intr
];
493 err
= irq_domain_set_hwirq_and_chip(d
, virq
, hwirq
,
494 &gic_all_vpes_local_irq_controller
,
499 irq_set_handler(virq
, handle_percpu_irq
);
503 err
= irq_domain_set_hwirq_and_chip(d
, virq
, hwirq
,
504 &gic_local_irq_controller
,
509 irq_set_handler(virq
, handle_percpu_devid_irq
);
510 irq_set_percpu_devid(virq
);
514 if (!gic_local_irq_is_routable(intr
))
517 spin_lock_irqsave(&gic_lock
, flags
);
518 for_each_online_cpu(cpu
) {
519 write_gic_vl_other(mips_cm_vp_id(cpu
));
520 write_gic_vo_map(intr
, map
);
522 spin_unlock_irqrestore(&gic_lock
, flags
);
527 static int gic_irq_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
528 unsigned int nr_irqs
, void *arg
)
530 struct irq_fwspec
*fwspec
= arg
;
531 irq_hw_number_t hwirq
;
533 if (fwspec
->param
[0] == GIC_SHARED
)
534 hwirq
= GIC_SHARED_TO_HWIRQ(fwspec
->param
[1]);
536 hwirq
= GIC_LOCAL_TO_HWIRQ(fwspec
->param
[1]);
538 return gic_irq_domain_map(d
, virq
, hwirq
);
541 void gic_irq_domain_free(struct irq_domain
*d
, unsigned int virq
,
542 unsigned int nr_irqs
)
546 static const struct irq_domain_ops gic_irq_domain_ops
= {
547 .xlate
= gic_irq_domain_xlate
,
548 .alloc
= gic_irq_domain_alloc
,
549 .free
= gic_irq_domain_free
,
550 .map
= gic_irq_domain_map
,
553 static int gic_ipi_domain_xlate(struct irq_domain
*d
, struct device_node
*ctrlr
,
554 const u32
*intspec
, unsigned int intsize
,
555 irq_hw_number_t
*out_hwirq
,
556 unsigned int *out_type
)
559 * There's nothing to translate here. hwirq is dynamically allocated and
560 * the irq type is always edge triggered.
563 *out_type
= IRQ_TYPE_EDGE_RISING
;
568 static int gic_ipi_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
569 unsigned int nr_irqs
, void *arg
)
571 struct cpumask
*ipimask
= arg
;
572 irq_hw_number_t hwirq
, base_hwirq
;
575 base_hwirq
= find_first_bit(ipi_available
, gic_shared_intrs
);
576 if (base_hwirq
== gic_shared_intrs
)
579 /* check that we have enough space */
580 for (i
= base_hwirq
; i
< nr_irqs
; i
++) {
581 if (!test_bit(i
, ipi_available
))
584 bitmap_clear(ipi_available
, base_hwirq
, nr_irqs
);
586 /* map the hwirq for each cpu consecutively */
588 for_each_cpu(cpu
, ipimask
) {
589 hwirq
= GIC_SHARED_TO_HWIRQ(base_hwirq
+ i
);
591 ret
= irq_domain_set_hwirq_and_chip(d
, virq
+ i
, hwirq
,
592 &gic_edge_irq_controller
,
597 ret
= irq_domain_set_hwirq_and_chip(d
->parent
, virq
+ i
, hwirq
,
598 &gic_edge_irq_controller
,
603 ret
= irq_set_irq_type(virq
+ i
, IRQ_TYPE_EDGE_RISING
);
607 ret
= gic_shared_irq_domain_map(d
, virq
+ i
, hwirq
, cpu
);
616 bitmap_set(ipi_available
, base_hwirq
, nr_irqs
);
620 void gic_ipi_domain_free(struct irq_domain
*d
, unsigned int virq
,
621 unsigned int nr_irqs
)
623 irq_hw_number_t base_hwirq
;
624 struct irq_data
*data
;
626 data
= irq_get_irq_data(virq
);
630 base_hwirq
= GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data
));
631 bitmap_set(ipi_available
, base_hwirq
, nr_irqs
);
634 int gic_ipi_domain_match(struct irq_domain
*d
, struct device_node
*node
,
635 enum irq_domain_bus_token bus_token
)
641 is_ipi
= d
->bus_token
== bus_token
;
642 return (!node
|| to_of_node(d
->fwnode
) == node
) && is_ipi
;
649 static const struct irq_domain_ops gic_ipi_domain_ops
= {
650 .xlate
= gic_ipi_domain_xlate
,
651 .alloc
= gic_ipi_domain_alloc
,
652 .free
= gic_ipi_domain_free
,
653 .match
= gic_ipi_domain_match
,
656 static int gic_cpu_startup(unsigned int cpu
)
658 /* Enable or disable EIC */
659 change_gic_vl_ctl(GIC_VX_CTL_EIC
,
660 cpu_has_veic
? GIC_VX_CTL_EIC
: 0);
662 /* Clear all local IRQ masks (ie. disable all local interrupts) */
663 write_gic_vl_rmask(~0);
665 /* Invoke irq_cpu_online callbacks to enable desired interrupts */
671 static int __init
gic_of_init(struct device_node
*node
,
672 struct device_node
*parent
)
674 unsigned int cpu_vec
, i
, gicconfig
, v
[2], num_ipis
;
675 unsigned long reserved
;
676 phys_addr_t gic_base
;
680 /* Find the first available CPU vector. */
682 reserved
= (C_SW0
| C_SW1
) >> __ffs(C_SW0
);
683 while (!of_property_read_u32_index(node
, "mti,reserved-cpu-vectors",
685 reserved
|= BIT(cpu_vec
);
687 cpu_vec
= find_first_zero_bit(&reserved
, hweight_long(ST0_IM
));
688 if (cpu_vec
== hweight_long(ST0_IM
)) {
689 pr_err("No CPU vectors available\n");
693 if (of_address_to_resource(node
, 0, &res
)) {
695 * Probe the CM for the GIC base address if not specified
696 * in the device-tree.
698 if (mips_cm_present()) {
699 gic_base
= read_gcr_gic_base() &
700 ~CM_GCR_GIC_BASE_GICEN
;
702 pr_warn("Using inherited base address %pa\n",
705 pr_err("Failed to get memory range\n");
709 gic_base
= res
.start
;
710 gic_len
= resource_size(&res
);
713 if (mips_cm_present()) {
714 write_gcr_gic_base(gic_base
| CM_GCR_GIC_BASE_GICEN
);
715 /* Ensure GIC region is enabled before trying to access it */
719 mips_gic_base
= ioremap_nocache(gic_base
, gic_len
);
721 gicconfig
= read_gic_config();
722 gic_shared_intrs
= gicconfig
& GIC_CONFIG_NUMINTERRUPTS
;
723 gic_shared_intrs
>>= __ffs(GIC_CONFIG_NUMINTERRUPTS
);
724 gic_shared_intrs
= (gic_shared_intrs
+ 1) * 8;
727 /* Always use vector 1 in EIC mode */
729 timer_cpu_pin
= gic_cpu_pin
;
730 set_vi_handler(gic_cpu_pin
+ GIC_PIN_TO_VEC_OFFSET
,
733 gic_cpu_pin
= cpu_vec
- GIC_CPU_PIN_OFFSET
;
734 irq_set_chained_handler(MIPS_CPU_IRQ_BASE
+ cpu_vec
,
737 * With the CMP implementation of SMP (deprecated), other CPUs
738 * are started by the bootloader and put into a timer based
739 * waiting poll loop. We must not re-route those CPU's local
740 * timer interrupts as the wait instruction will never finish,
741 * so just handle whatever CPU interrupt it is routed to by
744 * This workaround should be removed when CMP support is
747 if (IS_ENABLED(CONFIG_MIPS_CMP
) &&
748 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER
)) {
749 timer_cpu_pin
= read_gic_vl_timer_map() & GIC_MAP_PIN_MAP
;
750 irq_set_chained_handler(MIPS_CPU_IRQ_BASE
+
755 timer_cpu_pin
= gic_cpu_pin
;
759 gic_irq_domain
= irq_domain_add_simple(node
, GIC_NUM_LOCAL_INTRS
+
761 &gic_irq_domain_ops
, NULL
);
762 if (!gic_irq_domain
) {
763 pr_err("Failed to add IRQ domain");
767 gic_ipi_domain
= irq_domain_add_hierarchy(gic_irq_domain
,
768 IRQ_DOMAIN_FLAG_IPI_PER_CPU
,
769 GIC_NUM_LOCAL_INTRS
+ gic_shared_intrs
,
770 node
, &gic_ipi_domain_ops
, NULL
);
771 if (!gic_ipi_domain
) {
772 pr_err("Failed to add IPI domain");
776 irq_domain_update_bus_token(gic_ipi_domain
, DOMAIN_BUS_IPI
);
779 !of_property_read_u32_array(node
, "mti,reserved-ipi-vectors", v
, 2)) {
780 bitmap_set(ipi_resrv
, v
[0], v
[1]);
783 * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
784 * meeting the requirements of arch/mips SMP.
786 num_ipis
= 2 * num_possible_cpus();
787 bitmap_set(ipi_resrv
, gic_shared_intrs
- num_ipis
, num_ipis
);
790 bitmap_copy(ipi_available
, ipi_resrv
, GIC_MAX_INTRS
);
792 board_bind_eic_interrupt
= &gic_bind_eic_interrupt
;
795 for (i
= 0; i
< gic_shared_intrs
; i
++) {
796 change_gic_pol(i
, GIC_POL_ACTIVE_HIGH
);
797 change_gic_trig(i
, GIC_TRIG_LEVEL
);
801 return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING
,
802 "irqchip/mips/gic:starting",
803 gic_cpu_startup
, NULL
);
805 IRQCHIP_DECLARE(mips_gic
, "mti,gic", gic_of_init
);