2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 #include <linux/bitmap.h>
10 #include <linux/clocksource.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqchip/mips-gic.h>
16 #include <linux/of_address.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
20 #include <asm/mips-cm.h>
21 #include <asm/setup.h>
22 #include <asm/traps.h>
24 #include <dt-bindings/interrupt-controller/mips-gic.h>
26 unsigned int gic_present
;
28 struct gic_pcpu_mask
{
29 DECLARE_BITMAP(pcpu_mask
, GIC_MAX_INTRS
);
39 struct cpumask
*ipimask
;
44 static unsigned long __gic_base_addr
;
46 static void __iomem
*gic_base
;
47 static struct gic_pcpu_mask pcpu_masks
[NR_CPUS
];
48 static DEFINE_SPINLOCK(gic_lock
);
49 static struct irq_domain
*gic_irq_domain
;
50 static struct irq_domain
*gic_dev_domain
;
51 static struct irq_domain
*gic_ipi_domain
;
52 static int gic_shared_intrs
;
54 static unsigned int gic_cpu_pin
;
55 static unsigned int timer_cpu_pin
;
56 static struct irq_chip gic_level_irq_controller
, gic_edge_irq_controller
;
57 DECLARE_BITMAP(ipi_resrv
, GIC_MAX_INTRS
);
59 static void __gic_irq_dispatch(void);
61 static inline u32
gic_read32(unsigned int reg
)
63 return __raw_readl(gic_base
+ reg
);
66 static inline u64
gic_read64(unsigned int reg
)
68 return __raw_readq(gic_base
+ reg
);
71 static inline unsigned long gic_read(unsigned int reg
)
74 return gic_read32(reg
);
76 return gic_read64(reg
);
79 static inline void gic_write32(unsigned int reg
, u32 val
)
81 return __raw_writel(val
, gic_base
+ reg
);
84 static inline void gic_write64(unsigned int reg
, u64 val
)
86 return __raw_writeq(val
, gic_base
+ reg
);
89 static inline void gic_write(unsigned int reg
, unsigned long val
)
92 return gic_write32(reg
, (u32
)val
);
94 return gic_write64(reg
, (u64
)val
);
97 static inline void gic_update_bits(unsigned int reg
, unsigned long mask
,
100 unsigned long regval
;
102 regval
= gic_read(reg
);
105 gic_write(reg
, regval
);
108 static inline void gic_reset_mask(unsigned int intr
)
110 gic_write(GIC_REG(SHARED
, GIC_SH_RMASK
) + GIC_INTR_OFS(intr
),
111 1ul << GIC_INTR_BIT(intr
));
114 static inline void gic_set_mask(unsigned int intr
)
116 gic_write(GIC_REG(SHARED
, GIC_SH_SMASK
) + GIC_INTR_OFS(intr
),
117 1ul << GIC_INTR_BIT(intr
));
120 static inline void gic_set_polarity(unsigned int intr
, unsigned int pol
)
122 gic_update_bits(GIC_REG(SHARED
, GIC_SH_SET_POLARITY
) +
123 GIC_INTR_OFS(intr
), 1ul << GIC_INTR_BIT(intr
),
124 (unsigned long)pol
<< GIC_INTR_BIT(intr
));
127 static inline void gic_set_trigger(unsigned int intr
, unsigned int trig
)
129 gic_update_bits(GIC_REG(SHARED
, GIC_SH_SET_TRIGGER
) +
130 GIC_INTR_OFS(intr
), 1ul << GIC_INTR_BIT(intr
),
131 (unsigned long)trig
<< GIC_INTR_BIT(intr
));
134 static inline void gic_set_dual_edge(unsigned int intr
, unsigned int dual
)
136 gic_update_bits(GIC_REG(SHARED
, GIC_SH_SET_DUAL
) + GIC_INTR_OFS(intr
),
137 1ul << GIC_INTR_BIT(intr
),
138 (unsigned long)dual
<< GIC_INTR_BIT(intr
));
141 static inline void gic_map_to_pin(unsigned int intr
, unsigned int pin
)
143 gic_write32(GIC_REG(SHARED
, GIC_SH_INTR_MAP_TO_PIN_BASE
) +
144 GIC_SH_MAP_TO_PIN(intr
), GIC_MAP_TO_PIN_MSK
| pin
);
147 static inline void gic_map_to_vpe(unsigned int intr
, unsigned int vpe
)
149 gic_write(GIC_REG(SHARED
, GIC_SH_INTR_MAP_TO_VPE_BASE
) +
150 GIC_SH_MAP_TO_VPE_REG_OFF(intr
, vpe
),
151 GIC_SH_MAP_TO_VPE_REG_BIT(vpe
));
154 #ifdef CONFIG_CLKSRC_MIPS_GIC
155 u64
gic_read_count(void)
157 unsigned int hi
, hi2
, lo
;
160 return (u64
)gic_read(GIC_REG(SHARED
, GIC_SH_COUNTER
));
163 hi
= gic_read32(GIC_REG(SHARED
, GIC_SH_COUNTER_63_32
));
164 lo
= gic_read32(GIC_REG(SHARED
, GIC_SH_COUNTER_31_00
));
165 hi2
= gic_read32(GIC_REG(SHARED
, GIC_SH_COUNTER_63_32
));
168 return (((u64
) hi
) << 32) + lo
;
171 unsigned int gic_get_count_width(void)
173 unsigned int bits
, config
;
175 config
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
176 bits
= 32 + 4 * ((config
& GIC_SH_CONFIG_COUNTBITS_MSK
) >>
177 GIC_SH_CONFIG_COUNTBITS_SHF
);
182 void gic_write_compare(u64 cnt
)
185 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE
), cnt
);
187 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_HI
),
189 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_LO
),
190 (int)(cnt
& 0xffffffff));
194 void gic_write_cpu_compare(u64 cnt
, int cpu
)
198 local_irq_save(flags
);
200 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
), mips_cm_vp_id(cpu
));
203 gic_write(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE
), cnt
);
205 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE_HI
),
207 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE_LO
),
208 (int)(cnt
& 0xffffffff));
211 local_irq_restore(flags
);
214 u64
gic_read_compare(void)
219 return (u64
)gic_read(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE
));
221 hi
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_HI
));
222 lo
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_LO
));
224 return (((u64
) hi
) << 32) + lo
;
227 void gic_start_count(void)
231 /* Start the counter */
232 gicconfig
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
233 gicconfig
&= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF
);
234 gic_write(GIC_REG(SHARED
, GIC_SH_CONFIG
), gicconfig
);
237 void gic_stop_count(void)
241 /* Stop the counter */
242 gicconfig
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
243 gicconfig
|= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF
;
244 gic_write(GIC_REG(SHARED
, GIC_SH_CONFIG
), gicconfig
);
249 unsigned gic_read_local_vp_id(void)
253 ident
= gic_read(GIC_REG(VPE_LOCAL
, GIC_VP_IDENT
));
254 return ident
& GIC_VP_IDENT_VCNUM_MSK
;
257 static bool gic_local_irq_is_routable(int intr
)
261 /* All local interrupts are routable in EIC mode. */
265 vpe_ctl
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_CTL
));
267 case GIC_LOCAL_INT_TIMER
:
268 return vpe_ctl
& GIC_VPE_CTL_TIMER_RTBL_MSK
;
269 case GIC_LOCAL_INT_PERFCTR
:
270 return vpe_ctl
& GIC_VPE_CTL_PERFCNT_RTBL_MSK
;
271 case GIC_LOCAL_INT_FDC
:
272 return vpe_ctl
& GIC_VPE_CTL_FDC_RTBL_MSK
;
273 case GIC_LOCAL_INT_SWINT0
:
274 case GIC_LOCAL_INT_SWINT1
:
275 return vpe_ctl
& GIC_VPE_CTL_SWINT_RTBL_MSK
;
281 static void gic_bind_eic_interrupt(int irq
, int set
)
283 /* Convert irq vector # to hw int # */
284 irq
-= GIC_PIN_TO_VEC_OFFSET
;
286 /* Set irq to use shadow set */
287 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_EIC_SHADOW_SET_BASE
) +
288 GIC_VPE_EIC_SS(irq
), set
);
291 static void gic_send_ipi(struct irq_data
*d
, unsigned int cpu
)
293 irq_hw_number_t hwirq
= GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d
));
295 gic_write(GIC_REG(SHARED
, GIC_SH_WEDGE
), GIC_SH_WEDGE_SET(hwirq
));
298 int gic_get_c0_compare_int(void)
300 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER
))
301 return MIPS_CPU_IRQ_BASE
+ cp0_compare_irq
;
302 return irq_create_mapping(gic_irq_domain
,
303 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER
));
306 int gic_get_c0_perfcount_int(void)
308 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR
)) {
309 /* Is the performance counter shared with the timer? */
310 if (cp0_perfcount_irq
< 0)
312 return MIPS_CPU_IRQ_BASE
+ cp0_perfcount_irq
;
314 return irq_create_mapping(gic_irq_domain
,
315 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR
));
318 int gic_get_c0_fdc_int(void)
320 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC
)) {
321 /* Is the FDC IRQ even present? */
324 return MIPS_CPU_IRQ_BASE
+ cp0_fdc_irq
;
327 return irq_create_mapping(gic_irq_domain
,
328 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC
));
331 int gic_get_usm_range(struct resource
*gic_usm_res
)
336 gic_usm_res
->start
= __gic_base_addr
+ USM_VISIBLE_SECTION_OFS
;
337 gic_usm_res
->end
= gic_usm_res
->start
+ (USM_VISIBLE_SECTION_SIZE
- 1);
342 static void gic_handle_shared_int(bool chained
)
344 unsigned int i
, intr
, virq
, gic_reg_step
= mips_cm_is64
? 8 : 4;
345 unsigned long *pcpu_mask
;
346 unsigned long pending_reg
, intrmask_reg
;
347 DECLARE_BITMAP(pending
, GIC_MAX_INTRS
);
348 DECLARE_BITMAP(intrmask
, GIC_MAX_INTRS
);
350 /* Get per-cpu bitmaps */
351 pcpu_mask
= pcpu_masks
[smp_processor_id()].pcpu_mask
;
353 pending_reg
= GIC_REG(SHARED
, GIC_SH_PEND
);
354 intrmask_reg
= GIC_REG(SHARED
, GIC_SH_MASK
);
356 for (i
= 0; i
< BITS_TO_LONGS(gic_shared_intrs
); i
++) {
357 pending
[i
] = gic_read(pending_reg
);
358 intrmask
[i
] = gic_read(intrmask_reg
);
359 pending_reg
+= gic_reg_step
;
360 intrmask_reg
+= gic_reg_step
;
362 if (!IS_ENABLED(CONFIG_64BIT
) || mips_cm_is64
)
365 pending
[i
] |= (u64
)gic_read(pending_reg
) << 32;
366 intrmask
[i
] |= (u64
)gic_read(intrmask_reg
) << 32;
367 pending_reg
+= gic_reg_step
;
368 intrmask_reg
+= gic_reg_step
;
371 bitmap_and(pending
, pending
, intrmask
, gic_shared_intrs
);
372 bitmap_and(pending
, pending
, pcpu_mask
, gic_shared_intrs
);
374 for_each_set_bit(intr
, pending
, gic_shared_intrs
) {
375 virq
= irq_linear_revmap(gic_irq_domain
,
376 GIC_SHARED_TO_HWIRQ(intr
));
378 generic_handle_irq(virq
);
384 static void gic_mask_irq(struct irq_data
*d
)
386 gic_reset_mask(GIC_HWIRQ_TO_SHARED(d
->hwirq
));
389 static void gic_unmask_irq(struct irq_data
*d
)
391 gic_set_mask(GIC_HWIRQ_TO_SHARED(d
->hwirq
));
394 static void gic_ack_irq(struct irq_data
*d
)
396 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
398 gic_write(GIC_REG(SHARED
, GIC_SH_WEDGE
), GIC_SH_WEDGE_CLR(irq
));
401 static int gic_set_type(struct irq_data
*d
, unsigned int type
)
403 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
407 spin_lock_irqsave(&gic_lock
, flags
);
408 switch (type
& IRQ_TYPE_SENSE_MASK
) {
409 case IRQ_TYPE_EDGE_FALLING
:
410 gic_set_polarity(irq
, GIC_POL_NEG
);
411 gic_set_trigger(irq
, GIC_TRIG_EDGE
);
412 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
415 case IRQ_TYPE_EDGE_RISING
:
416 gic_set_polarity(irq
, GIC_POL_POS
);
417 gic_set_trigger(irq
, GIC_TRIG_EDGE
);
418 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
421 case IRQ_TYPE_EDGE_BOTH
:
422 /* polarity is irrelevant in this case */
423 gic_set_trigger(irq
, GIC_TRIG_EDGE
);
424 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_ENABLE
);
427 case IRQ_TYPE_LEVEL_LOW
:
428 gic_set_polarity(irq
, GIC_POL_NEG
);
429 gic_set_trigger(irq
, GIC_TRIG_LEVEL
);
430 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
433 case IRQ_TYPE_LEVEL_HIGH
:
435 gic_set_polarity(irq
, GIC_POL_POS
);
436 gic_set_trigger(irq
, GIC_TRIG_LEVEL
);
437 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
443 irq_set_chip_handler_name_locked(d
, &gic_edge_irq_controller
,
444 handle_edge_irq
, NULL
);
446 irq_set_chip_handler_name_locked(d
, &gic_level_irq_controller
,
447 handle_level_irq
, NULL
);
448 spin_unlock_irqrestore(&gic_lock
, flags
);
454 static int gic_set_affinity(struct irq_data
*d
, const struct cpumask
*cpumask
,
457 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
458 cpumask_t tmp
= CPU_MASK_NONE
;
462 cpumask_and(&tmp
, cpumask
, cpu_online_mask
);
463 if (cpumask_empty(&tmp
))
466 /* Assumption : cpumask refers to a single CPU */
467 spin_lock_irqsave(&gic_lock
, flags
);
469 /* Re-route this IRQ */
470 gic_map_to_vpe(irq
, mips_cm_vp_id(cpumask_first(&tmp
)));
472 /* Update the pcpu_masks */
473 for (i
= 0; i
< min(gic_vpes
, NR_CPUS
); i
++)
474 clear_bit(irq
, pcpu_masks
[i
].pcpu_mask
);
475 set_bit(irq
, pcpu_masks
[cpumask_first(&tmp
)].pcpu_mask
);
477 cpumask_copy(irq_data_get_affinity_mask(d
), cpumask
);
478 spin_unlock_irqrestore(&gic_lock
, flags
);
480 return IRQ_SET_MASK_OK_NOCOPY
;
484 static struct irq_chip gic_level_irq_controller
= {
486 .irq_mask
= gic_mask_irq
,
487 .irq_unmask
= gic_unmask_irq
,
488 .irq_set_type
= gic_set_type
,
490 .irq_set_affinity
= gic_set_affinity
,
494 static struct irq_chip gic_edge_irq_controller
= {
496 .irq_ack
= gic_ack_irq
,
497 .irq_mask
= gic_mask_irq
,
498 .irq_unmask
= gic_unmask_irq
,
499 .irq_set_type
= gic_set_type
,
501 .irq_set_affinity
= gic_set_affinity
,
503 .ipi_send_single
= gic_send_ipi
,
506 static void gic_handle_local_int(bool chained
)
508 unsigned long pending
, masked
;
509 unsigned int intr
, virq
;
511 pending
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_PEND
));
512 masked
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_MASK
));
514 bitmap_and(&pending
, &pending
, &masked
, GIC_NUM_LOCAL_INTRS
);
516 for_each_set_bit(intr
, &pending
, GIC_NUM_LOCAL_INTRS
) {
517 virq
= irq_linear_revmap(gic_irq_domain
,
518 GIC_LOCAL_TO_HWIRQ(intr
));
520 generic_handle_irq(virq
);
526 static void gic_mask_local_irq(struct irq_data
*d
)
528 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
530 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_RMASK
), 1 << intr
);
533 static void gic_unmask_local_irq(struct irq_data
*d
)
535 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
537 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_SMASK
), 1 << intr
);
540 static struct irq_chip gic_local_irq_controller
= {
541 .name
= "MIPS GIC Local",
542 .irq_mask
= gic_mask_local_irq
,
543 .irq_unmask
= gic_unmask_local_irq
,
546 static void gic_mask_local_irq_all_vpes(struct irq_data
*d
)
548 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
552 spin_lock_irqsave(&gic_lock
, flags
);
553 for (i
= 0; i
< gic_vpes
; i
++) {
554 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
556 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_RMASK
), 1 << intr
);
558 spin_unlock_irqrestore(&gic_lock
, flags
);
561 static void gic_unmask_local_irq_all_vpes(struct irq_data
*d
)
563 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
567 spin_lock_irqsave(&gic_lock
, flags
);
568 for (i
= 0; i
< gic_vpes
; i
++) {
569 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
571 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_SMASK
), 1 << intr
);
573 spin_unlock_irqrestore(&gic_lock
, flags
);
576 static struct irq_chip gic_all_vpes_local_irq_controller
= {
577 .name
= "MIPS GIC Local",
578 .irq_mask
= gic_mask_local_irq_all_vpes
,
579 .irq_unmask
= gic_unmask_local_irq_all_vpes
,
582 static void __gic_irq_dispatch(void)
584 gic_handle_local_int(false);
585 gic_handle_shared_int(false);
588 static void gic_irq_dispatch(struct irq_desc
*desc
)
590 gic_handle_local_int(true);
591 gic_handle_shared_int(true);
594 static void __init
gic_basic_init(void)
598 board_bind_eic_interrupt
= &gic_bind_eic_interrupt
;
601 for (i
= 0; i
< gic_shared_intrs
; i
++) {
602 gic_set_polarity(i
, GIC_POL_POS
);
603 gic_set_trigger(i
, GIC_TRIG_LEVEL
);
607 for (i
= 0; i
< gic_vpes
; i
++) {
610 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
612 for (j
= 0; j
< GIC_NUM_LOCAL_INTRS
; j
++) {
613 if (!gic_local_irq_is_routable(j
))
615 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_RMASK
), 1 << j
);
620 static int gic_local_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
623 int intr
= GIC_HWIRQ_TO_LOCAL(hw
);
628 if (!gic_local_irq_is_routable(intr
))
631 spin_lock_irqsave(&gic_lock
, flags
);
632 for (i
= 0; i
< gic_vpes
; i
++) {
633 u32 val
= GIC_MAP_TO_PIN_MSK
| gic_cpu_pin
;
635 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
639 case GIC_LOCAL_INT_WD
:
640 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_WD_MAP
), val
);
642 case GIC_LOCAL_INT_COMPARE
:
643 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE_MAP
),
646 case GIC_LOCAL_INT_TIMER
:
647 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
648 val
= GIC_MAP_TO_PIN_MSK
| timer_cpu_pin
;
649 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_TIMER_MAP
),
652 case GIC_LOCAL_INT_PERFCTR
:
653 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_PERFCTR_MAP
),
656 case GIC_LOCAL_INT_SWINT0
:
657 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_SWINT0_MAP
),
660 case GIC_LOCAL_INT_SWINT1
:
661 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_SWINT1_MAP
),
664 case GIC_LOCAL_INT_FDC
:
665 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_FDC_MAP
), val
);
668 pr_err("Invalid local IRQ %d\n", intr
);
673 spin_unlock_irqrestore(&gic_lock
, flags
);
678 static int gic_shared_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
679 irq_hw_number_t hw
, unsigned int vpe
)
681 int intr
= GIC_HWIRQ_TO_SHARED(hw
);
685 spin_lock_irqsave(&gic_lock
, flags
);
686 gic_map_to_pin(intr
, gic_cpu_pin
);
687 gic_map_to_vpe(intr
, mips_cm_vp_id(vpe
));
688 for (i
= 0; i
< min(gic_vpes
, NR_CPUS
); i
++)
689 clear_bit(intr
, pcpu_masks
[i
].pcpu_mask
);
690 set_bit(intr
, pcpu_masks
[vpe
].pcpu_mask
);
691 spin_unlock_irqrestore(&gic_lock
, flags
);
696 static int gic_setup_dev_chip(struct irq_domain
*d
, unsigned int virq
,
699 struct irq_chip
*chip
;
702 if (hwirq
>= GIC_SHARED_HWIRQ_BASE
) {
703 err
= irq_domain_set_hwirq_and_chip(d
, virq
, hwirq
,
704 &gic_level_irq_controller
,
707 switch (GIC_HWIRQ_TO_LOCAL(hwirq
)) {
708 case GIC_LOCAL_INT_TIMER
:
709 case GIC_LOCAL_INT_PERFCTR
:
710 case GIC_LOCAL_INT_FDC
:
712 * HACK: These are all really percpu interrupts, but
713 * the rest of the MIPS kernel code does not use the
714 * percpu IRQ API for them.
716 chip
= &gic_all_vpes_local_irq_controller
;
717 irq_set_handler(virq
, handle_percpu_irq
);
721 chip
= &gic_local_irq_controller
;
722 irq_set_handler(virq
, handle_percpu_devid_irq
);
723 irq_set_percpu_devid(virq
);
727 err
= irq_domain_set_hwirq_and_chip(d
, virq
, hwirq
,
734 static int gic_irq_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
735 unsigned int nr_irqs
, void *arg
)
737 struct gic_irq_spec
*spec
= arg
;
738 irq_hw_number_t hwirq
, base_hwirq
;
741 if (spec
->type
== GIC_DEVICE
) {
742 /* verify that shared irqs don't conflict with an IPI irq */
743 if ((spec
->hwirq
>= GIC_SHARED_HWIRQ_BASE
) &&
744 test_bit(GIC_HWIRQ_TO_SHARED(spec
->hwirq
), ipi_resrv
))
747 return gic_setup_dev_chip(d
, virq
, spec
->hwirq
);
749 base_hwirq
= find_first_bit(ipi_resrv
, gic_shared_intrs
);
750 if (base_hwirq
== gic_shared_intrs
) {
754 /* check that we have enough space */
755 for (i
= base_hwirq
; i
< nr_irqs
; i
++) {
756 if (!test_bit(i
, ipi_resrv
))
759 bitmap_clear(ipi_resrv
, base_hwirq
, nr_irqs
);
761 /* map the hwirq for each cpu consecutively */
763 for_each_cpu(cpu
, spec
->ipimask
) {
764 hwirq
= GIC_SHARED_TO_HWIRQ(base_hwirq
+ i
);
766 ret
= irq_domain_set_hwirq_and_chip(d
, virq
+ i
, hwirq
,
767 &gic_level_irq_controller
,
772 irq_set_handler(virq
+ i
, handle_level_irq
);
774 ret
= gic_shared_irq_domain_map(d
, virq
+ i
, hwirq
, cpu
);
782 * tell the parent about the base hwirq we allocated so it can
783 * set its own domain data
785 spec
->hwirq
= base_hwirq
;
790 bitmap_set(ipi_resrv
, base_hwirq
, nr_irqs
);
794 void gic_irq_domain_free(struct irq_domain
*d
, unsigned int virq
,
795 unsigned int nr_irqs
)
797 irq_hw_number_t base_hwirq
;
798 struct irq_data
*data
;
800 data
= irq_get_irq_data(virq
);
804 base_hwirq
= GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data
));
805 bitmap_set(ipi_resrv
, base_hwirq
, nr_irqs
);
808 int gic_irq_domain_match(struct irq_domain
*d
, struct device_node
*node
,
809 enum irq_domain_bus_token bus_token
)
811 /* this domain should'nt be accessed directly */
815 static const struct irq_domain_ops gic_irq_domain_ops
= {
816 .alloc
= gic_irq_domain_alloc
,
817 .free
= gic_irq_domain_free
,
818 .match
= gic_irq_domain_match
,
821 static int gic_dev_domain_xlate(struct irq_domain
*d
, struct device_node
*ctrlr
,
822 const u32
*intspec
, unsigned int intsize
,
823 irq_hw_number_t
*out_hwirq
,
824 unsigned int *out_type
)
829 if (intspec
[0] == GIC_SHARED
)
830 *out_hwirq
= GIC_SHARED_TO_HWIRQ(intspec
[1]);
831 else if (intspec
[0] == GIC_LOCAL
)
832 *out_hwirq
= GIC_LOCAL_TO_HWIRQ(intspec
[1]);
835 *out_type
= intspec
[2] & IRQ_TYPE_SENSE_MASK
;
840 static int gic_dev_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
841 unsigned int nr_irqs
, void *arg
)
843 struct irq_fwspec
*fwspec
= arg
;
844 struct gic_irq_spec spec
= {
849 if (fwspec
->param
[0] == GIC_SHARED
)
850 spec
.hwirq
= GIC_SHARED_TO_HWIRQ(fwspec
->param
[1]);
852 spec
.hwirq
= GIC_LOCAL_TO_HWIRQ(fwspec
->param
[1]);
854 ret
= irq_domain_alloc_irqs_parent(d
, virq
, nr_irqs
, &spec
);
858 for (i
= 0; i
< nr_irqs
; i
++) {
859 ret
= gic_setup_dev_chip(d
, virq
+ i
, spec
.hwirq
+ i
);
867 irq_domain_free_irqs_parent(d
, virq
, nr_irqs
);
871 void gic_dev_domain_free(struct irq_domain
*d
, unsigned int virq
,
872 unsigned int nr_irqs
)
874 /* no real allocation is done for dev irqs, so no need to free anything */
878 static void gic_dev_domain_activate(struct irq_domain
*domain
,
881 if (GIC_HWIRQ_TO_LOCAL(d
->hwirq
) < GIC_NUM_LOCAL_INTRS
)
882 gic_local_irq_domain_map(domain
, d
->irq
, d
->hwirq
);
884 gic_shared_irq_domain_map(domain
, d
->irq
, d
->hwirq
, 0);
887 static struct irq_domain_ops gic_dev_domain_ops
= {
888 .xlate
= gic_dev_domain_xlate
,
889 .alloc
= gic_dev_domain_alloc
,
890 .free
= gic_dev_domain_free
,
891 .activate
= gic_dev_domain_activate
,
894 static int gic_ipi_domain_xlate(struct irq_domain
*d
, struct device_node
*ctrlr
,
895 const u32
*intspec
, unsigned int intsize
,
896 irq_hw_number_t
*out_hwirq
,
897 unsigned int *out_type
)
900 * There's nothing to translate here. hwirq is dynamically allocated and
901 * the irq type is always edge triggered.
904 *out_type
= IRQ_TYPE_EDGE_RISING
;
909 static int gic_ipi_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
910 unsigned int nr_irqs
, void *arg
)
912 struct cpumask
*ipimask
= arg
;
913 struct gic_irq_spec spec
= {
919 ret
= irq_domain_alloc_irqs_parent(d
, virq
, nr_irqs
, &spec
);
923 /* the parent should have set spec.hwirq to the base_hwirq it allocated */
924 for (i
= 0; i
< nr_irqs
; i
++) {
925 ret
= irq_domain_set_hwirq_and_chip(d
, virq
+ i
,
926 GIC_SHARED_TO_HWIRQ(spec
.hwirq
+ i
),
927 &gic_edge_irq_controller
,
932 ret
= irq_set_irq_type(virq
+ i
, IRQ_TYPE_EDGE_RISING
);
939 irq_domain_free_irqs_parent(d
, virq
, nr_irqs
);
943 void gic_ipi_domain_free(struct irq_domain
*d
, unsigned int virq
,
944 unsigned int nr_irqs
)
946 irq_domain_free_irqs_parent(d
, virq
, nr_irqs
);
949 int gic_ipi_domain_match(struct irq_domain
*d
, struct device_node
*node
,
950 enum irq_domain_bus_token bus_token
)
956 is_ipi
= d
->bus_token
== bus_token
;
957 return (!node
|| to_of_node(d
->fwnode
) == node
) && is_ipi
;
964 static struct irq_domain_ops gic_ipi_domain_ops
= {
965 .xlate
= gic_ipi_domain_xlate
,
966 .alloc
= gic_ipi_domain_alloc
,
967 .free
= gic_ipi_domain_free
,
968 .match
= gic_ipi_domain_match
,
971 static void __init
__gic_init(unsigned long gic_base_addr
,
972 unsigned long gic_addrspace_size
,
973 unsigned int cpu_vec
, unsigned int irqbase
,
974 struct device_node
*node
)
976 unsigned int gicconfig
, cpu
;
979 __gic_base_addr
= gic_base_addr
;
981 gic_base
= ioremap_nocache(gic_base_addr
, gic_addrspace_size
);
983 gicconfig
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
984 gic_shared_intrs
= (gicconfig
& GIC_SH_CONFIG_NUMINTRS_MSK
) >>
985 GIC_SH_CONFIG_NUMINTRS_SHF
;
986 gic_shared_intrs
= ((gic_shared_intrs
+ 1) * 8);
988 gic_vpes
= (gicconfig
& GIC_SH_CONFIG_NUMVPES_MSK
) >>
989 GIC_SH_CONFIG_NUMVPES_SHF
;
990 gic_vpes
= gic_vpes
+ 1;
993 /* Set EIC mode for all VPEs */
994 for_each_present_cpu(cpu
) {
995 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
997 gic_write(GIC_REG(VPE_OTHER
, GIC_VPE_CTL
),
998 GIC_VPE_CTL_EIC_MODE_MSK
);
1001 /* Always use vector 1 in EIC mode */
1003 timer_cpu_pin
= gic_cpu_pin
;
1004 set_vi_handler(gic_cpu_pin
+ GIC_PIN_TO_VEC_OFFSET
,
1005 __gic_irq_dispatch
);
1007 gic_cpu_pin
= cpu_vec
- GIC_CPU_PIN_OFFSET
;
1008 irq_set_chained_handler(MIPS_CPU_IRQ_BASE
+ cpu_vec
,
1011 * With the CMP implementation of SMP (deprecated), other CPUs
1012 * are started by the bootloader and put into a timer based
1013 * waiting poll loop. We must not re-route those CPU's local
1014 * timer interrupts as the wait instruction will never finish,
1015 * so just handle whatever CPU interrupt it is routed to by
1018 * This workaround should be removed when CMP support is
1021 if (IS_ENABLED(CONFIG_MIPS_CMP
) &&
1022 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER
)) {
1023 timer_cpu_pin
= gic_read32(GIC_REG(VPE_LOCAL
,
1024 GIC_VPE_TIMER_MAP
)) &
1026 irq_set_chained_handler(MIPS_CPU_IRQ_BASE
+
1027 GIC_CPU_PIN_OFFSET
+
1031 timer_cpu_pin
= gic_cpu_pin
;
1035 gic_irq_domain
= irq_domain_add_simple(node
, GIC_NUM_LOCAL_INTRS
+
1036 gic_shared_intrs
, irqbase
,
1037 &gic_irq_domain_ops
, NULL
);
1038 if (!gic_irq_domain
)
1039 panic("Failed to add GIC IRQ domain");
1040 gic_irq_domain
->name
= "mips-gic-irq";
1042 gic_dev_domain
= irq_domain_add_hierarchy(gic_irq_domain
, 0,
1043 GIC_NUM_LOCAL_INTRS
+ gic_shared_intrs
,
1044 node
, &gic_dev_domain_ops
, NULL
);
1045 if (!gic_dev_domain
)
1046 panic("Failed to add GIC DEV domain");
1047 gic_dev_domain
->name
= "mips-gic-dev";
1049 gic_ipi_domain
= irq_domain_add_hierarchy(gic_irq_domain
,
1050 IRQ_DOMAIN_FLAG_IPI_PER_CPU
,
1051 GIC_NUM_LOCAL_INTRS
+ gic_shared_intrs
,
1052 node
, &gic_ipi_domain_ops
, NULL
);
1053 if (!gic_ipi_domain
)
1054 panic("Failed to add GIC IPI domain");
1056 gic_ipi_domain
->name
= "mips-gic-ipi";
1057 gic_ipi_domain
->bus_token
= DOMAIN_BUS_IPI
;
1060 !of_property_read_u32_array(node
, "mti,reserved-ipi-vectors", v
, 2)) {
1061 bitmap_set(ipi_resrv
, v
[0], v
[1]);
1063 /* Make the last 2 * gic_vpes available for IPIs */
1064 bitmap_set(ipi_resrv
,
1065 gic_shared_intrs
- 2 * gic_vpes
,
1072 void __init
gic_init(unsigned long gic_base_addr
,
1073 unsigned long gic_addrspace_size
,
1074 unsigned int cpu_vec
, unsigned int irqbase
)
1076 __gic_init(gic_base_addr
, gic_addrspace_size
, cpu_vec
, irqbase
, NULL
);
1079 static int __init
gic_of_init(struct device_node
*node
,
1080 struct device_node
*parent
)
1082 struct resource res
;
1083 unsigned int cpu_vec
, i
= 0, reserved
= 0;
1084 phys_addr_t gic_base
;
1087 /* Find the first available CPU vector. */
1088 while (!of_property_read_u32_index(node
, "mti,reserved-cpu-vectors",
1090 reserved
|= BIT(cpu_vec
);
1091 for (cpu_vec
= 2; cpu_vec
< 8; cpu_vec
++) {
1092 if (!(reserved
& BIT(cpu_vec
)))
1096 pr_err("No CPU vectors available for GIC\n");
1100 if (of_address_to_resource(node
, 0, &res
)) {
1102 * Probe the CM for the GIC base address if not specified
1103 * in the device-tree.
1105 if (mips_cm_present()) {
1106 gic_base
= read_gcr_gic_base() &
1107 ~CM_GCR_GIC_BASE_GICEN_MSK
;
1110 pr_err("Failed to get GIC memory range\n");
1114 gic_base
= res
.start
;
1115 gic_len
= resource_size(&res
);
1118 if (mips_cm_present())
1119 write_gcr_gic_base(gic_base
| CM_GCR_GIC_BASE_GICEN_MSK
);
1122 __gic_init(gic_base
, gic_len
, cpu_vec
, 0, node
);
1126 IRQCHIP_DECLARE(mips_gic
, "mti,gic", gic_of_init
);