2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 #include <linux/bitmap.h>
10 #include <linux/clocksource.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqchip/mips-gic.h>
16 #include <linux/of_address.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
20 #include <asm/mips-cm.h>
21 #include <asm/setup.h>
22 #include <asm/traps.h>
24 #include <dt-bindings/interrupt-controller/mips-gic.h>
26 unsigned int gic_present
;
28 struct gic_pcpu_mask
{
29 DECLARE_BITMAP(pcpu_mask
, GIC_MAX_INTRS
);
32 static unsigned long __gic_base_addr
;
34 static void __iomem
*gic_base
;
35 static struct gic_pcpu_mask pcpu_masks
[NR_CPUS
];
36 static DEFINE_SPINLOCK(gic_lock
);
37 static struct irq_domain
*gic_irq_domain
;
38 static struct irq_domain
*gic_ipi_domain
;
39 static int gic_shared_intrs
;
41 static unsigned int gic_cpu_pin
;
42 static unsigned int timer_cpu_pin
;
43 static struct irq_chip gic_level_irq_controller
, gic_edge_irq_controller
;
44 DECLARE_BITMAP(ipi_resrv
, GIC_MAX_INTRS
);
45 DECLARE_BITMAP(ipi_available
, GIC_MAX_INTRS
);
47 static void __gic_irq_dispatch(void);
49 static inline u32
gic_read32(unsigned int reg
)
51 return __raw_readl(gic_base
+ reg
);
54 static inline u64
gic_read64(unsigned int reg
)
56 return __raw_readq(gic_base
+ reg
);
59 static inline unsigned long gic_read(unsigned int reg
)
62 return gic_read32(reg
);
64 return gic_read64(reg
);
67 static inline void gic_write32(unsigned int reg
, u32 val
)
69 return __raw_writel(val
, gic_base
+ reg
);
72 static inline void gic_write64(unsigned int reg
, u64 val
)
74 return __raw_writeq(val
, gic_base
+ reg
);
77 static inline void gic_write(unsigned int reg
, unsigned long val
)
80 return gic_write32(reg
, (u32
)val
);
82 return gic_write64(reg
, (u64
)val
);
85 static inline void gic_update_bits(unsigned int reg
, unsigned long mask
,
90 regval
= gic_read(reg
);
93 gic_write(reg
, regval
);
96 static inline void gic_reset_mask(unsigned int intr
)
98 gic_write(GIC_REG(SHARED
, GIC_SH_RMASK
) + GIC_INTR_OFS(intr
),
99 1ul << GIC_INTR_BIT(intr
));
102 static inline void gic_set_mask(unsigned int intr
)
104 gic_write(GIC_REG(SHARED
, GIC_SH_SMASK
) + GIC_INTR_OFS(intr
),
105 1ul << GIC_INTR_BIT(intr
));
108 static inline void gic_set_polarity(unsigned int intr
, unsigned int pol
)
110 gic_update_bits(GIC_REG(SHARED
, GIC_SH_SET_POLARITY
) +
111 GIC_INTR_OFS(intr
), 1ul << GIC_INTR_BIT(intr
),
112 (unsigned long)pol
<< GIC_INTR_BIT(intr
));
115 static inline void gic_set_trigger(unsigned int intr
, unsigned int trig
)
117 gic_update_bits(GIC_REG(SHARED
, GIC_SH_SET_TRIGGER
) +
118 GIC_INTR_OFS(intr
), 1ul << GIC_INTR_BIT(intr
),
119 (unsigned long)trig
<< GIC_INTR_BIT(intr
));
122 static inline void gic_set_dual_edge(unsigned int intr
, unsigned int dual
)
124 gic_update_bits(GIC_REG(SHARED
, GIC_SH_SET_DUAL
) + GIC_INTR_OFS(intr
),
125 1ul << GIC_INTR_BIT(intr
),
126 (unsigned long)dual
<< GIC_INTR_BIT(intr
));
129 static inline void gic_map_to_pin(unsigned int intr
, unsigned int pin
)
131 gic_write32(GIC_REG(SHARED
, GIC_SH_INTR_MAP_TO_PIN_BASE
) +
132 GIC_SH_MAP_TO_PIN(intr
), GIC_MAP_TO_PIN_MSK
| pin
);
135 static inline void gic_map_to_vpe(unsigned int intr
, unsigned int vpe
)
137 gic_write(GIC_REG(SHARED
, GIC_SH_INTR_MAP_TO_VPE_BASE
) +
138 GIC_SH_MAP_TO_VPE_REG_OFF(intr
, vpe
),
139 GIC_SH_MAP_TO_VPE_REG_BIT(vpe
));
142 #ifdef CONFIG_CLKSRC_MIPS_GIC
143 u64 notrace
gic_read_count(void)
145 unsigned int hi
, hi2
, lo
;
148 return (u64
)gic_read(GIC_REG(SHARED
, GIC_SH_COUNTER
));
151 hi
= gic_read32(GIC_REG(SHARED
, GIC_SH_COUNTER_63_32
));
152 lo
= gic_read32(GIC_REG(SHARED
, GIC_SH_COUNTER_31_00
));
153 hi2
= gic_read32(GIC_REG(SHARED
, GIC_SH_COUNTER_63_32
));
156 return (((u64
) hi
) << 32) + lo
;
159 unsigned int gic_get_count_width(void)
161 unsigned int bits
, config
;
163 config
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
164 bits
= 32 + 4 * ((config
& GIC_SH_CONFIG_COUNTBITS_MSK
) >>
165 GIC_SH_CONFIG_COUNTBITS_SHF
);
170 void notrace
gic_write_compare(u64 cnt
)
173 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE
), cnt
);
175 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_HI
),
177 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_LO
),
178 (int)(cnt
& 0xffffffff));
182 void notrace
gic_write_cpu_compare(u64 cnt
, int cpu
)
186 local_irq_save(flags
);
188 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
), mips_cm_vp_id(cpu
));
191 gic_write(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE
), cnt
);
193 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE_HI
),
195 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE_LO
),
196 (int)(cnt
& 0xffffffff));
199 local_irq_restore(flags
);
202 u64
gic_read_compare(void)
207 return (u64
)gic_read(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE
));
209 hi
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_HI
));
210 lo
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_LO
));
212 return (((u64
) hi
) << 32) + lo
;
215 void gic_start_count(void)
219 /* Start the counter */
220 gicconfig
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
221 gicconfig
&= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF
);
222 gic_write(GIC_REG(SHARED
, GIC_SH_CONFIG
), gicconfig
);
225 void gic_stop_count(void)
229 /* Stop the counter */
230 gicconfig
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
231 gicconfig
|= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF
;
232 gic_write(GIC_REG(SHARED
, GIC_SH_CONFIG
), gicconfig
);
237 unsigned gic_read_local_vp_id(void)
241 ident
= gic_read(GIC_REG(VPE_LOCAL
, GIC_VP_IDENT
));
242 return ident
& GIC_VP_IDENT_VCNUM_MSK
;
245 static bool gic_local_irq_is_routable(int intr
)
249 /* All local interrupts are routable in EIC mode. */
253 vpe_ctl
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_CTL
));
255 case GIC_LOCAL_INT_TIMER
:
256 return vpe_ctl
& GIC_VPE_CTL_TIMER_RTBL_MSK
;
257 case GIC_LOCAL_INT_PERFCTR
:
258 return vpe_ctl
& GIC_VPE_CTL_PERFCNT_RTBL_MSK
;
259 case GIC_LOCAL_INT_FDC
:
260 return vpe_ctl
& GIC_VPE_CTL_FDC_RTBL_MSK
;
261 case GIC_LOCAL_INT_SWINT0
:
262 case GIC_LOCAL_INT_SWINT1
:
263 return vpe_ctl
& GIC_VPE_CTL_SWINT_RTBL_MSK
;
269 static void gic_bind_eic_interrupt(int irq
, int set
)
271 /* Convert irq vector # to hw int # */
272 irq
-= GIC_PIN_TO_VEC_OFFSET
;
274 /* Set irq to use shadow set */
275 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_EIC_SHADOW_SET_BASE
) +
276 GIC_VPE_EIC_SS(irq
), set
);
279 static void gic_send_ipi(struct irq_data
*d
, unsigned int cpu
)
281 irq_hw_number_t hwirq
= GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d
));
283 gic_write(GIC_REG(SHARED
, GIC_SH_WEDGE
), GIC_SH_WEDGE_SET(hwirq
));
286 int gic_get_c0_compare_int(void)
288 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER
))
289 return MIPS_CPU_IRQ_BASE
+ cp0_compare_irq
;
290 return irq_create_mapping(gic_irq_domain
,
291 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER
));
294 int gic_get_c0_perfcount_int(void)
296 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR
)) {
297 /* Is the performance counter shared with the timer? */
298 if (cp0_perfcount_irq
< 0)
300 return MIPS_CPU_IRQ_BASE
+ cp0_perfcount_irq
;
302 return irq_create_mapping(gic_irq_domain
,
303 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR
));
306 int gic_get_c0_fdc_int(void)
308 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC
)) {
309 /* Is the FDC IRQ even present? */
312 return MIPS_CPU_IRQ_BASE
+ cp0_fdc_irq
;
315 return irq_create_mapping(gic_irq_domain
,
316 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC
));
319 int gic_get_usm_range(struct resource
*gic_usm_res
)
324 gic_usm_res
->start
= __gic_base_addr
+ USM_VISIBLE_SECTION_OFS
;
325 gic_usm_res
->end
= gic_usm_res
->start
+ (USM_VISIBLE_SECTION_SIZE
- 1);
330 static void gic_handle_shared_int(bool chained
)
332 unsigned int i
, intr
, virq
, gic_reg_step
= mips_cm_is64
? 8 : 4;
333 unsigned long *pcpu_mask
;
334 unsigned long pending_reg
, intrmask_reg
;
335 DECLARE_BITMAP(pending
, GIC_MAX_INTRS
);
336 DECLARE_BITMAP(intrmask
, GIC_MAX_INTRS
);
338 /* Get per-cpu bitmaps */
339 pcpu_mask
= pcpu_masks
[smp_processor_id()].pcpu_mask
;
341 pending_reg
= GIC_REG(SHARED
, GIC_SH_PEND
);
342 intrmask_reg
= GIC_REG(SHARED
, GIC_SH_MASK
);
344 for (i
= 0; i
< BITS_TO_LONGS(gic_shared_intrs
); i
++) {
345 pending
[i
] = gic_read(pending_reg
);
346 intrmask
[i
] = gic_read(intrmask_reg
);
347 pending_reg
+= gic_reg_step
;
348 intrmask_reg
+= gic_reg_step
;
350 if (!IS_ENABLED(CONFIG_64BIT
) || mips_cm_is64
)
353 pending
[i
] |= (u64
)gic_read(pending_reg
) << 32;
354 intrmask
[i
] |= (u64
)gic_read(intrmask_reg
) << 32;
355 pending_reg
+= gic_reg_step
;
356 intrmask_reg
+= gic_reg_step
;
359 bitmap_and(pending
, pending
, intrmask
, gic_shared_intrs
);
360 bitmap_and(pending
, pending
, pcpu_mask
, gic_shared_intrs
);
362 for_each_set_bit(intr
, pending
, gic_shared_intrs
) {
363 virq
= irq_linear_revmap(gic_irq_domain
,
364 GIC_SHARED_TO_HWIRQ(intr
));
366 generic_handle_irq(virq
);
372 static void gic_mask_irq(struct irq_data
*d
)
374 gic_reset_mask(GIC_HWIRQ_TO_SHARED(d
->hwirq
));
377 static void gic_unmask_irq(struct irq_data
*d
)
379 gic_set_mask(GIC_HWIRQ_TO_SHARED(d
->hwirq
));
382 static void gic_ack_irq(struct irq_data
*d
)
384 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
386 gic_write(GIC_REG(SHARED
, GIC_SH_WEDGE
), GIC_SH_WEDGE_CLR(irq
));
389 static int gic_set_type(struct irq_data
*d
, unsigned int type
)
391 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
395 spin_lock_irqsave(&gic_lock
, flags
);
396 switch (type
& IRQ_TYPE_SENSE_MASK
) {
397 case IRQ_TYPE_EDGE_FALLING
:
398 gic_set_polarity(irq
, GIC_POL_NEG
);
399 gic_set_trigger(irq
, GIC_TRIG_EDGE
);
400 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
403 case IRQ_TYPE_EDGE_RISING
:
404 gic_set_polarity(irq
, GIC_POL_POS
);
405 gic_set_trigger(irq
, GIC_TRIG_EDGE
);
406 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
409 case IRQ_TYPE_EDGE_BOTH
:
410 /* polarity is irrelevant in this case */
411 gic_set_trigger(irq
, GIC_TRIG_EDGE
);
412 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_ENABLE
);
415 case IRQ_TYPE_LEVEL_LOW
:
416 gic_set_polarity(irq
, GIC_POL_NEG
);
417 gic_set_trigger(irq
, GIC_TRIG_LEVEL
);
418 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
421 case IRQ_TYPE_LEVEL_HIGH
:
423 gic_set_polarity(irq
, GIC_POL_POS
);
424 gic_set_trigger(irq
, GIC_TRIG_LEVEL
);
425 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
431 irq_set_chip_handler_name_locked(d
, &gic_edge_irq_controller
,
432 handle_edge_irq
, NULL
);
434 irq_set_chip_handler_name_locked(d
, &gic_level_irq_controller
,
435 handle_level_irq
, NULL
);
436 spin_unlock_irqrestore(&gic_lock
, flags
);
442 static int gic_set_affinity(struct irq_data
*d
, const struct cpumask
*cpumask
,
445 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
446 cpumask_t tmp
= CPU_MASK_NONE
;
450 cpumask_and(&tmp
, cpumask
, cpu_online_mask
);
451 if (cpumask_empty(&tmp
))
454 /* Assumption : cpumask refers to a single CPU */
455 spin_lock_irqsave(&gic_lock
, flags
);
457 /* Re-route this IRQ */
458 gic_map_to_vpe(irq
, mips_cm_vp_id(cpumask_first(&tmp
)));
460 /* Update the pcpu_masks */
461 for (i
= 0; i
< min(gic_vpes
, NR_CPUS
); i
++)
462 clear_bit(irq
, pcpu_masks
[i
].pcpu_mask
);
463 set_bit(irq
, pcpu_masks
[cpumask_first(&tmp
)].pcpu_mask
);
465 cpumask_copy(irq_data_get_affinity_mask(d
), cpumask
);
466 spin_unlock_irqrestore(&gic_lock
, flags
);
468 return IRQ_SET_MASK_OK_NOCOPY
;
472 static struct irq_chip gic_level_irq_controller
= {
474 .irq_mask
= gic_mask_irq
,
475 .irq_unmask
= gic_unmask_irq
,
476 .irq_set_type
= gic_set_type
,
478 .irq_set_affinity
= gic_set_affinity
,
482 static struct irq_chip gic_edge_irq_controller
= {
484 .irq_ack
= gic_ack_irq
,
485 .irq_mask
= gic_mask_irq
,
486 .irq_unmask
= gic_unmask_irq
,
487 .irq_set_type
= gic_set_type
,
489 .irq_set_affinity
= gic_set_affinity
,
491 .ipi_send_single
= gic_send_ipi
,
494 static void gic_handle_local_int(bool chained
)
496 unsigned long pending
, masked
;
497 unsigned int intr
, virq
;
499 pending
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_PEND
));
500 masked
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_MASK
));
502 bitmap_and(&pending
, &pending
, &masked
, GIC_NUM_LOCAL_INTRS
);
504 for_each_set_bit(intr
, &pending
, GIC_NUM_LOCAL_INTRS
) {
505 virq
= irq_linear_revmap(gic_irq_domain
,
506 GIC_LOCAL_TO_HWIRQ(intr
));
508 generic_handle_irq(virq
);
514 static void gic_mask_local_irq(struct irq_data
*d
)
516 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
518 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_RMASK
), 1 << intr
);
521 static void gic_unmask_local_irq(struct irq_data
*d
)
523 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
525 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_SMASK
), 1 << intr
);
528 static struct irq_chip gic_local_irq_controller
= {
529 .name
= "MIPS GIC Local",
530 .irq_mask
= gic_mask_local_irq
,
531 .irq_unmask
= gic_unmask_local_irq
,
534 static void gic_mask_local_irq_all_vpes(struct irq_data
*d
)
536 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
540 spin_lock_irqsave(&gic_lock
, flags
);
541 for (i
= 0; i
< gic_vpes
; i
++) {
542 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
544 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_RMASK
), 1 << intr
);
546 spin_unlock_irqrestore(&gic_lock
, flags
);
549 static void gic_unmask_local_irq_all_vpes(struct irq_data
*d
)
551 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
555 spin_lock_irqsave(&gic_lock
, flags
);
556 for (i
= 0; i
< gic_vpes
; i
++) {
557 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
559 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_SMASK
), 1 << intr
);
561 spin_unlock_irqrestore(&gic_lock
, flags
);
564 static struct irq_chip gic_all_vpes_local_irq_controller
= {
565 .name
= "MIPS GIC Local",
566 .irq_mask
= gic_mask_local_irq_all_vpes
,
567 .irq_unmask
= gic_unmask_local_irq_all_vpes
,
570 static void __gic_irq_dispatch(void)
572 gic_handle_local_int(false);
573 gic_handle_shared_int(false);
576 static void gic_irq_dispatch(struct irq_desc
*desc
)
578 gic_handle_local_int(true);
579 gic_handle_shared_int(true);
582 static void __init
gic_basic_init(void)
586 board_bind_eic_interrupt
= &gic_bind_eic_interrupt
;
589 for (i
= 0; i
< gic_shared_intrs
; i
++) {
590 gic_set_polarity(i
, GIC_POL_POS
);
591 gic_set_trigger(i
, GIC_TRIG_LEVEL
);
595 for (i
= 0; i
< gic_vpes
; i
++) {
598 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
600 for (j
= 0; j
< GIC_NUM_LOCAL_INTRS
; j
++) {
601 if (!gic_local_irq_is_routable(j
))
603 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_RMASK
), 1 << j
);
608 static int gic_local_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
611 int intr
= GIC_HWIRQ_TO_LOCAL(hw
);
616 if (!gic_local_irq_is_routable(intr
))
619 spin_lock_irqsave(&gic_lock
, flags
);
620 for (i
= 0; i
< gic_vpes
; i
++) {
621 u32 val
= GIC_MAP_TO_PIN_MSK
| gic_cpu_pin
;
623 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
627 case GIC_LOCAL_INT_WD
:
628 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_WD_MAP
), val
);
630 case GIC_LOCAL_INT_COMPARE
:
631 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE_MAP
),
634 case GIC_LOCAL_INT_TIMER
:
635 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
636 val
= GIC_MAP_TO_PIN_MSK
| timer_cpu_pin
;
637 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_TIMER_MAP
),
640 case GIC_LOCAL_INT_PERFCTR
:
641 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_PERFCTR_MAP
),
644 case GIC_LOCAL_INT_SWINT0
:
645 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_SWINT0_MAP
),
648 case GIC_LOCAL_INT_SWINT1
:
649 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_SWINT1_MAP
),
652 case GIC_LOCAL_INT_FDC
:
653 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_FDC_MAP
), val
);
656 pr_err("Invalid local IRQ %d\n", intr
);
661 spin_unlock_irqrestore(&gic_lock
, flags
);
666 static int gic_shared_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
667 irq_hw_number_t hw
, unsigned int vpe
)
669 int intr
= GIC_HWIRQ_TO_SHARED(hw
);
673 spin_lock_irqsave(&gic_lock
, flags
);
674 gic_map_to_pin(intr
, gic_cpu_pin
);
675 gic_map_to_vpe(intr
, mips_cm_vp_id(vpe
));
676 for (i
= 0; i
< min(gic_vpes
, NR_CPUS
); i
++)
677 clear_bit(intr
, pcpu_masks
[i
].pcpu_mask
);
678 set_bit(intr
, pcpu_masks
[vpe
].pcpu_mask
);
679 spin_unlock_irqrestore(&gic_lock
, flags
);
684 static int gic_irq_domain_xlate(struct irq_domain
*d
, struct device_node
*ctrlr
,
685 const u32
*intspec
, unsigned int intsize
,
686 irq_hw_number_t
*out_hwirq
,
687 unsigned int *out_type
)
692 if (intspec
[0] == GIC_SHARED
)
693 *out_hwirq
= GIC_SHARED_TO_HWIRQ(intspec
[1]);
694 else if (intspec
[0] == GIC_LOCAL
)
695 *out_hwirq
= GIC_LOCAL_TO_HWIRQ(intspec
[1]);
698 *out_type
= intspec
[2] & IRQ_TYPE_SENSE_MASK
;
703 static int gic_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
704 irq_hw_number_t hwirq
)
708 if (hwirq
>= GIC_SHARED_HWIRQ_BASE
) {
709 /* verify that shared irqs don't conflict with an IPI irq */
710 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq
), ipi_resrv
))
713 err
= irq_domain_set_hwirq_and_chip(d
, virq
, hwirq
,
714 &gic_level_irq_controller
,
719 return gic_shared_irq_domain_map(d
, virq
, hwirq
, 0);
722 switch (GIC_HWIRQ_TO_LOCAL(hwirq
)) {
723 case GIC_LOCAL_INT_TIMER
:
724 case GIC_LOCAL_INT_PERFCTR
:
725 case GIC_LOCAL_INT_FDC
:
727 * HACK: These are all really percpu interrupts, but
728 * the rest of the MIPS kernel code does not use the
729 * percpu IRQ API for them.
731 err
= irq_domain_set_hwirq_and_chip(d
, virq
, hwirq
,
732 &gic_all_vpes_local_irq_controller
,
737 irq_set_handler(virq
, handle_percpu_irq
);
741 err
= irq_domain_set_hwirq_and_chip(d
, virq
, hwirq
,
742 &gic_local_irq_controller
,
747 irq_set_handler(virq
, handle_percpu_devid_irq
);
748 irq_set_percpu_devid(virq
);
752 return gic_local_irq_domain_map(d
, virq
, hwirq
);
755 static int gic_irq_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
756 unsigned int nr_irqs
, void *arg
)
758 struct irq_fwspec
*fwspec
= arg
;
759 irq_hw_number_t hwirq
;
761 if (fwspec
->param
[0] == GIC_SHARED
)
762 hwirq
= GIC_SHARED_TO_HWIRQ(fwspec
->param
[1]);
764 hwirq
= GIC_LOCAL_TO_HWIRQ(fwspec
->param
[1]);
766 return gic_irq_domain_map(d
, virq
, hwirq
);
769 void gic_irq_domain_free(struct irq_domain
*d
, unsigned int virq
,
770 unsigned int nr_irqs
)
774 static const struct irq_domain_ops gic_irq_domain_ops
= {
775 .xlate
= gic_irq_domain_xlate
,
776 .alloc
= gic_irq_domain_alloc
,
777 .free
= gic_irq_domain_free
,
778 .map
= gic_irq_domain_map
,
781 static int gic_ipi_domain_xlate(struct irq_domain
*d
, struct device_node
*ctrlr
,
782 const u32
*intspec
, unsigned int intsize
,
783 irq_hw_number_t
*out_hwirq
,
784 unsigned int *out_type
)
787 * There's nothing to translate here. hwirq is dynamically allocated and
788 * the irq type is always edge triggered.
791 *out_type
= IRQ_TYPE_EDGE_RISING
;
796 static int gic_ipi_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
797 unsigned int nr_irqs
, void *arg
)
799 struct cpumask
*ipimask
= arg
;
800 irq_hw_number_t hwirq
, base_hwirq
;
803 base_hwirq
= find_first_bit(ipi_available
, gic_shared_intrs
);
804 if (base_hwirq
== gic_shared_intrs
)
807 /* check that we have enough space */
808 for (i
= base_hwirq
; i
< nr_irqs
; i
++) {
809 if (!test_bit(i
, ipi_available
))
812 bitmap_clear(ipi_available
, base_hwirq
, nr_irqs
);
814 /* map the hwirq for each cpu consecutively */
816 for_each_cpu(cpu
, ipimask
) {
817 hwirq
= GIC_SHARED_TO_HWIRQ(base_hwirq
+ i
);
819 ret
= irq_domain_set_hwirq_and_chip(d
, virq
+ i
, hwirq
,
820 &gic_edge_irq_controller
,
825 ret
= irq_domain_set_hwirq_and_chip(d
->parent
, virq
+ i
, hwirq
,
826 &gic_edge_irq_controller
,
831 ret
= irq_set_irq_type(virq
+ i
, IRQ_TYPE_EDGE_RISING
);
835 ret
= gic_shared_irq_domain_map(d
, virq
+ i
, hwirq
, cpu
);
844 bitmap_set(ipi_available
, base_hwirq
, nr_irqs
);
848 void gic_ipi_domain_free(struct irq_domain
*d
, unsigned int virq
,
849 unsigned int nr_irqs
)
851 irq_hw_number_t base_hwirq
;
852 struct irq_data
*data
;
854 data
= irq_get_irq_data(virq
);
858 base_hwirq
= GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data
));
859 bitmap_set(ipi_available
, base_hwirq
, nr_irqs
);
862 int gic_ipi_domain_match(struct irq_domain
*d
, struct device_node
*node
,
863 enum irq_domain_bus_token bus_token
)
869 is_ipi
= d
->bus_token
== bus_token
;
870 return (!node
|| to_of_node(d
->fwnode
) == node
) && is_ipi
;
877 static const struct irq_domain_ops gic_ipi_domain_ops
= {
878 .xlate
= gic_ipi_domain_xlate
,
879 .alloc
= gic_ipi_domain_alloc
,
880 .free
= gic_ipi_domain_free
,
881 .match
= gic_ipi_domain_match
,
884 static void __init
__gic_init(unsigned long gic_base_addr
,
885 unsigned long gic_addrspace_size
,
886 unsigned int cpu_vec
, unsigned int irqbase
,
887 struct device_node
*node
)
889 unsigned int gicconfig
, cpu
;
892 __gic_base_addr
= gic_base_addr
;
894 gic_base
= ioremap_nocache(gic_base_addr
, gic_addrspace_size
);
896 gicconfig
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
897 gic_shared_intrs
= (gicconfig
& GIC_SH_CONFIG_NUMINTRS_MSK
) >>
898 GIC_SH_CONFIG_NUMINTRS_SHF
;
899 gic_shared_intrs
= ((gic_shared_intrs
+ 1) * 8);
901 gic_vpes
= (gicconfig
& GIC_SH_CONFIG_NUMVPES_MSK
) >>
902 GIC_SH_CONFIG_NUMVPES_SHF
;
903 gic_vpes
= gic_vpes
+ 1;
906 /* Set EIC mode for all VPEs */
907 for_each_present_cpu(cpu
) {
908 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
910 gic_write(GIC_REG(VPE_OTHER
, GIC_VPE_CTL
),
911 GIC_VPE_CTL_EIC_MODE_MSK
);
914 /* Always use vector 1 in EIC mode */
916 timer_cpu_pin
= gic_cpu_pin
;
917 set_vi_handler(gic_cpu_pin
+ GIC_PIN_TO_VEC_OFFSET
,
920 gic_cpu_pin
= cpu_vec
- GIC_CPU_PIN_OFFSET
;
921 irq_set_chained_handler(MIPS_CPU_IRQ_BASE
+ cpu_vec
,
924 * With the CMP implementation of SMP (deprecated), other CPUs
925 * are started by the bootloader and put into a timer based
926 * waiting poll loop. We must not re-route those CPU's local
927 * timer interrupts as the wait instruction will never finish,
928 * so just handle whatever CPU interrupt it is routed to by
931 * This workaround should be removed when CMP support is
934 if (IS_ENABLED(CONFIG_MIPS_CMP
) &&
935 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER
)) {
936 timer_cpu_pin
= gic_read32(GIC_REG(VPE_LOCAL
,
937 GIC_VPE_TIMER_MAP
)) &
939 irq_set_chained_handler(MIPS_CPU_IRQ_BASE
+
944 timer_cpu_pin
= gic_cpu_pin
;
948 gic_irq_domain
= irq_domain_add_simple(node
, GIC_NUM_LOCAL_INTRS
+
949 gic_shared_intrs
, irqbase
,
950 &gic_irq_domain_ops
, NULL
);
952 panic("Failed to add GIC IRQ domain");
953 gic_irq_domain
->name
= "mips-gic-irq";
955 gic_ipi_domain
= irq_domain_add_hierarchy(gic_irq_domain
,
956 IRQ_DOMAIN_FLAG_IPI_PER_CPU
,
957 GIC_NUM_LOCAL_INTRS
+ gic_shared_intrs
,
958 node
, &gic_ipi_domain_ops
, NULL
);
960 panic("Failed to add GIC IPI domain");
962 gic_ipi_domain
->name
= "mips-gic-ipi";
963 irq_domain_update_bus_token(gic_ipi_domain
, DOMAIN_BUS_IPI
);
966 !of_property_read_u32_array(node
, "mti,reserved-ipi-vectors", v
, 2)) {
967 bitmap_set(ipi_resrv
, v
[0], v
[1]);
969 /* Make the last 2 * gic_vpes available for IPIs */
970 bitmap_set(ipi_resrv
,
971 gic_shared_intrs
- 2 * gic_vpes
,
975 bitmap_copy(ipi_available
, ipi_resrv
, GIC_MAX_INTRS
);
979 void __init
gic_init(unsigned long gic_base_addr
,
980 unsigned long gic_addrspace_size
,
981 unsigned int cpu_vec
, unsigned int irqbase
)
983 __gic_init(gic_base_addr
, gic_addrspace_size
, cpu_vec
, irqbase
, NULL
);
986 static int __init
gic_of_init(struct device_node
*node
,
987 struct device_node
*parent
)
990 unsigned int cpu_vec
, i
= 0, reserved
= 0;
991 phys_addr_t gic_base
;
994 /* Find the first available CPU vector. */
995 while (!of_property_read_u32_index(node
, "mti,reserved-cpu-vectors",
997 reserved
|= BIT(cpu_vec
);
998 for (cpu_vec
= 2; cpu_vec
< 8; cpu_vec
++) {
999 if (!(reserved
& BIT(cpu_vec
)))
1003 pr_err("No CPU vectors available for GIC\n");
1007 if (of_address_to_resource(node
, 0, &res
)) {
1009 * Probe the CM for the GIC base address if not specified
1010 * in the device-tree.
1012 if (mips_cm_present()) {
1013 gic_base
= read_gcr_gic_base() &
1014 ~CM_GCR_GIC_BASE_GICEN_MSK
;
1017 pr_err("Failed to get GIC memory range\n");
1021 gic_base
= res
.start
;
1022 gic_len
= resource_size(&res
);
1025 if (mips_cm_present())
1026 write_gcr_gic_base(gic_base
| CM_GCR_GIC_BASE_GICEN_MSK
);
1029 __gic_init(gic_base
, gic_len
, cpu_vec
, 0, node
);
1033 IRQCHIP_DECLARE(mips_gic
, "mti,gic", gic_of_init
);