2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 #include <linux/bitmap.h>
10 #include <linux/clocksource.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqchip/mips-gic.h>
16 #include <linux/of_address.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
20 #include <asm/mips-cm.h>
21 #include <asm/setup.h>
22 #include <asm/traps.h>
24 #include <dt-bindings/interrupt-controller/mips-gic.h>
26 unsigned int gic_present
;
28 struct gic_pcpu_mask
{
29 DECLARE_BITMAP(pcpu_mask
, GIC_MAX_INTRS
);
39 struct cpumask
*ipimask
;
44 static unsigned long __gic_base_addr
;
46 static void __iomem
*gic_base
;
47 static struct gic_pcpu_mask pcpu_masks
[NR_CPUS
];
48 static DEFINE_SPINLOCK(gic_lock
);
49 static struct irq_domain
*gic_irq_domain
;
50 static struct irq_domain
*gic_dev_domain
;
51 static struct irq_domain
*gic_ipi_domain
;
52 static int gic_shared_intrs
;
54 static unsigned int gic_cpu_pin
;
55 static unsigned int timer_cpu_pin
;
56 static struct irq_chip gic_level_irq_controller
, gic_edge_irq_controller
;
57 DECLARE_BITMAP(ipi_resrv
, GIC_MAX_INTRS
);
59 static void __gic_irq_dispatch(void);
61 static inline u32
gic_read32(unsigned int reg
)
63 return __raw_readl(gic_base
+ reg
);
66 static inline u64
gic_read64(unsigned int reg
)
68 return __raw_readq(gic_base
+ reg
);
71 static inline unsigned long gic_read(unsigned int reg
)
74 return gic_read32(reg
);
76 return gic_read64(reg
);
79 static inline void gic_write32(unsigned int reg
, u32 val
)
81 return __raw_writel(val
, gic_base
+ reg
);
84 static inline void gic_write64(unsigned int reg
, u64 val
)
86 return __raw_writeq(val
, gic_base
+ reg
);
89 static inline void gic_write(unsigned int reg
, unsigned long val
)
92 return gic_write32(reg
, (u32
)val
);
94 return gic_write64(reg
, (u64
)val
);
97 static inline void gic_update_bits(unsigned int reg
, unsigned long mask
,
100 unsigned long regval
;
102 regval
= gic_read(reg
);
105 gic_write(reg
, regval
);
108 static inline void gic_reset_mask(unsigned int intr
)
110 gic_write(GIC_REG(SHARED
, GIC_SH_RMASK
) + GIC_INTR_OFS(intr
),
111 1ul << GIC_INTR_BIT(intr
));
114 static inline void gic_set_mask(unsigned int intr
)
116 gic_write(GIC_REG(SHARED
, GIC_SH_SMASK
) + GIC_INTR_OFS(intr
),
117 1ul << GIC_INTR_BIT(intr
));
120 static inline void gic_set_polarity(unsigned int intr
, unsigned int pol
)
122 gic_update_bits(GIC_REG(SHARED
, GIC_SH_SET_POLARITY
) +
123 GIC_INTR_OFS(intr
), 1ul << GIC_INTR_BIT(intr
),
124 (unsigned long)pol
<< GIC_INTR_BIT(intr
));
127 static inline void gic_set_trigger(unsigned int intr
, unsigned int trig
)
129 gic_update_bits(GIC_REG(SHARED
, GIC_SH_SET_TRIGGER
) +
130 GIC_INTR_OFS(intr
), 1ul << GIC_INTR_BIT(intr
),
131 (unsigned long)trig
<< GIC_INTR_BIT(intr
));
134 static inline void gic_set_dual_edge(unsigned int intr
, unsigned int dual
)
136 gic_update_bits(GIC_REG(SHARED
, GIC_SH_SET_DUAL
) + GIC_INTR_OFS(intr
),
137 1ul << GIC_INTR_BIT(intr
),
138 (unsigned long)dual
<< GIC_INTR_BIT(intr
));
141 static inline void gic_map_to_pin(unsigned int intr
, unsigned int pin
)
143 gic_write32(GIC_REG(SHARED
, GIC_SH_INTR_MAP_TO_PIN_BASE
) +
144 GIC_SH_MAP_TO_PIN(intr
), GIC_MAP_TO_PIN_MSK
| pin
);
147 static inline void gic_map_to_vpe(unsigned int intr
, unsigned int vpe
)
149 gic_write(GIC_REG(SHARED
, GIC_SH_INTR_MAP_TO_VPE_BASE
) +
150 GIC_SH_MAP_TO_VPE_REG_OFF(intr
, vpe
),
151 GIC_SH_MAP_TO_VPE_REG_BIT(vpe
));
154 #ifdef CONFIG_CLKSRC_MIPS_GIC
155 cycle_t
gic_read_count(void)
157 unsigned int hi
, hi2
, lo
;
160 return (cycle_t
)gic_read(GIC_REG(SHARED
, GIC_SH_COUNTER
));
163 hi
= gic_read32(GIC_REG(SHARED
, GIC_SH_COUNTER_63_32
));
164 lo
= gic_read32(GIC_REG(SHARED
, GIC_SH_COUNTER_31_00
));
165 hi2
= gic_read32(GIC_REG(SHARED
, GIC_SH_COUNTER_63_32
));
168 return (((cycle_t
) hi
) << 32) + lo
;
171 unsigned int gic_get_count_width(void)
173 unsigned int bits
, config
;
175 config
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
176 bits
= 32 + 4 * ((config
& GIC_SH_CONFIG_COUNTBITS_MSK
) >>
177 GIC_SH_CONFIG_COUNTBITS_SHF
);
182 void gic_write_compare(cycle_t cnt
)
185 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE
), cnt
);
187 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_HI
),
189 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_LO
),
190 (int)(cnt
& 0xffffffff));
194 void gic_write_cpu_compare(cycle_t cnt
, int cpu
)
198 local_irq_save(flags
);
200 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
), cpu
);
203 gic_write(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE
), cnt
);
205 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE_HI
),
207 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE_LO
),
208 (int)(cnt
& 0xffffffff));
211 local_irq_restore(flags
);
214 cycle_t
gic_read_compare(void)
219 return (cycle_t
)gic_read(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE
));
221 hi
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_HI
));
222 lo
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_LO
));
224 return (((cycle_t
) hi
) << 32) + lo
;
227 void gic_start_count(void)
231 /* Start the counter */
232 gicconfig
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
233 gicconfig
&= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF
);
234 gic_write(GIC_REG(SHARED
, GIC_SH_CONFIG
), gicconfig
);
237 void gic_stop_count(void)
241 /* Stop the counter */
242 gicconfig
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
243 gicconfig
|= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF
;
244 gic_write(GIC_REG(SHARED
, GIC_SH_CONFIG
), gicconfig
);
249 static bool gic_local_irq_is_routable(int intr
)
253 /* All local interrupts are routable in EIC mode. */
257 vpe_ctl
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_CTL
));
259 case GIC_LOCAL_INT_TIMER
:
260 return vpe_ctl
& GIC_VPE_CTL_TIMER_RTBL_MSK
;
261 case GIC_LOCAL_INT_PERFCTR
:
262 return vpe_ctl
& GIC_VPE_CTL_PERFCNT_RTBL_MSK
;
263 case GIC_LOCAL_INT_FDC
:
264 return vpe_ctl
& GIC_VPE_CTL_FDC_RTBL_MSK
;
265 case GIC_LOCAL_INT_SWINT0
:
266 case GIC_LOCAL_INT_SWINT1
:
267 return vpe_ctl
& GIC_VPE_CTL_SWINT_RTBL_MSK
;
273 static void gic_bind_eic_interrupt(int irq
, int set
)
275 /* Convert irq vector # to hw int # */
276 irq
-= GIC_PIN_TO_VEC_OFFSET
;
278 /* Set irq to use shadow set */
279 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_EIC_SHADOW_SET_BASE
) +
280 GIC_VPE_EIC_SS(irq
), set
);
283 static void gic_send_ipi(struct irq_data
*d
, unsigned int cpu
)
285 irq_hw_number_t hwirq
= GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d
));
287 gic_write(GIC_REG(SHARED
, GIC_SH_WEDGE
), GIC_SH_WEDGE_SET(hwirq
));
290 int gic_get_c0_compare_int(void)
292 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER
))
293 return MIPS_CPU_IRQ_BASE
+ cp0_compare_irq
;
294 return irq_create_mapping(gic_irq_domain
,
295 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER
));
298 int gic_get_c0_perfcount_int(void)
300 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR
)) {
301 /* Is the performance counter shared with the timer? */
302 if (cp0_perfcount_irq
< 0)
304 return MIPS_CPU_IRQ_BASE
+ cp0_perfcount_irq
;
306 return irq_create_mapping(gic_irq_domain
,
307 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR
));
310 int gic_get_c0_fdc_int(void)
312 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC
)) {
313 /* Is the FDC IRQ even present? */
316 return MIPS_CPU_IRQ_BASE
+ cp0_fdc_irq
;
319 return irq_create_mapping(gic_irq_domain
,
320 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC
));
323 int gic_get_usm_range(struct resource
*gic_usm_res
)
328 gic_usm_res
->start
= __gic_base_addr
+ USM_VISIBLE_SECTION_OFS
;
329 gic_usm_res
->end
= gic_usm_res
->start
+ (USM_VISIBLE_SECTION_SIZE
- 1);
334 static void gic_handle_shared_int(bool chained
)
336 unsigned int i
, intr
, virq
, gic_reg_step
= mips_cm_is64
? 8 : 4;
337 unsigned long *pcpu_mask
;
338 unsigned long pending_reg
, intrmask_reg
;
339 DECLARE_BITMAP(pending
, GIC_MAX_INTRS
);
340 DECLARE_BITMAP(intrmask
, GIC_MAX_INTRS
);
342 /* Get per-cpu bitmaps */
343 pcpu_mask
= pcpu_masks
[smp_processor_id()].pcpu_mask
;
345 pending_reg
= GIC_REG(SHARED
, GIC_SH_PEND
);
346 intrmask_reg
= GIC_REG(SHARED
, GIC_SH_MASK
);
348 for (i
= 0; i
< BITS_TO_LONGS(gic_shared_intrs
); i
++) {
349 pending
[i
] = gic_read(pending_reg
);
350 intrmask
[i
] = gic_read(intrmask_reg
);
351 pending_reg
+= gic_reg_step
;
352 intrmask_reg
+= gic_reg_step
;
354 if (!config_enabled(CONFIG_64BIT
) || mips_cm_is64
)
357 pending
[i
] |= (u64
)gic_read(pending_reg
) << 32;
358 intrmask
[i
] |= (u64
)gic_read(intrmask_reg
) << 32;
359 pending_reg
+= gic_reg_step
;
360 intrmask_reg
+= gic_reg_step
;
363 bitmap_and(pending
, pending
, intrmask
, gic_shared_intrs
);
364 bitmap_and(pending
, pending
, pcpu_mask
, gic_shared_intrs
);
366 intr
= find_first_bit(pending
, gic_shared_intrs
);
367 while (intr
!= gic_shared_intrs
) {
368 virq
= irq_linear_revmap(gic_irq_domain
,
369 GIC_SHARED_TO_HWIRQ(intr
));
371 generic_handle_irq(virq
);
375 /* go to next pending bit */
376 bitmap_clear(pending
, intr
, 1);
377 intr
= find_first_bit(pending
, gic_shared_intrs
);
381 static void gic_mask_irq(struct irq_data
*d
)
383 gic_reset_mask(GIC_HWIRQ_TO_SHARED(d
->hwirq
));
386 static void gic_unmask_irq(struct irq_data
*d
)
388 gic_set_mask(GIC_HWIRQ_TO_SHARED(d
->hwirq
));
391 static void gic_ack_irq(struct irq_data
*d
)
393 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
395 gic_write(GIC_REG(SHARED
, GIC_SH_WEDGE
), GIC_SH_WEDGE_CLR(irq
));
398 static int gic_set_type(struct irq_data
*d
, unsigned int type
)
400 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
404 spin_lock_irqsave(&gic_lock
, flags
);
405 switch (type
& IRQ_TYPE_SENSE_MASK
) {
406 case IRQ_TYPE_EDGE_FALLING
:
407 gic_set_polarity(irq
, GIC_POL_NEG
);
408 gic_set_trigger(irq
, GIC_TRIG_EDGE
);
409 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
412 case IRQ_TYPE_EDGE_RISING
:
413 gic_set_polarity(irq
, GIC_POL_POS
);
414 gic_set_trigger(irq
, GIC_TRIG_EDGE
);
415 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
418 case IRQ_TYPE_EDGE_BOTH
:
419 /* polarity is irrelevant in this case */
420 gic_set_trigger(irq
, GIC_TRIG_EDGE
);
421 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_ENABLE
);
424 case IRQ_TYPE_LEVEL_LOW
:
425 gic_set_polarity(irq
, GIC_POL_NEG
);
426 gic_set_trigger(irq
, GIC_TRIG_LEVEL
);
427 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
430 case IRQ_TYPE_LEVEL_HIGH
:
432 gic_set_polarity(irq
, GIC_POL_POS
);
433 gic_set_trigger(irq
, GIC_TRIG_LEVEL
);
434 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
440 irq_set_chip_handler_name_locked(d
, &gic_edge_irq_controller
,
441 handle_edge_irq
, NULL
);
443 irq_set_chip_handler_name_locked(d
, &gic_level_irq_controller
,
444 handle_level_irq
, NULL
);
445 spin_unlock_irqrestore(&gic_lock
, flags
);
451 static int gic_set_affinity(struct irq_data
*d
, const struct cpumask
*cpumask
,
454 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
455 cpumask_t tmp
= CPU_MASK_NONE
;
459 cpumask_and(&tmp
, cpumask
, cpu_online_mask
);
460 if (cpumask_empty(&tmp
))
463 /* Assumption : cpumask refers to a single CPU */
464 spin_lock_irqsave(&gic_lock
, flags
);
466 /* Re-route this IRQ */
467 gic_map_to_vpe(irq
, mips_cm_vp_id(cpumask_first(&tmp
)));
469 /* Update the pcpu_masks */
470 for (i
= 0; i
< min(gic_vpes
, NR_CPUS
); i
++)
471 clear_bit(irq
, pcpu_masks
[i
].pcpu_mask
);
472 set_bit(irq
, pcpu_masks
[cpumask_first(&tmp
)].pcpu_mask
);
474 cpumask_copy(irq_data_get_affinity_mask(d
), cpumask
);
475 spin_unlock_irqrestore(&gic_lock
, flags
);
477 return IRQ_SET_MASK_OK_NOCOPY
;
481 static struct irq_chip gic_level_irq_controller
= {
483 .irq_mask
= gic_mask_irq
,
484 .irq_unmask
= gic_unmask_irq
,
485 .irq_set_type
= gic_set_type
,
487 .irq_set_affinity
= gic_set_affinity
,
491 static struct irq_chip gic_edge_irq_controller
= {
493 .irq_ack
= gic_ack_irq
,
494 .irq_mask
= gic_mask_irq
,
495 .irq_unmask
= gic_unmask_irq
,
496 .irq_set_type
= gic_set_type
,
498 .irq_set_affinity
= gic_set_affinity
,
500 .ipi_send_single
= gic_send_ipi
,
503 static void gic_handle_local_int(bool chained
)
505 unsigned long pending
, masked
;
506 unsigned int intr
, virq
;
508 pending
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_PEND
));
509 masked
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_MASK
));
511 bitmap_and(&pending
, &pending
, &masked
, GIC_NUM_LOCAL_INTRS
);
513 intr
= find_first_bit(&pending
, GIC_NUM_LOCAL_INTRS
);
514 while (intr
!= GIC_NUM_LOCAL_INTRS
) {
515 virq
= irq_linear_revmap(gic_irq_domain
,
516 GIC_LOCAL_TO_HWIRQ(intr
));
518 generic_handle_irq(virq
);
522 /* go to next pending bit */
523 bitmap_clear(&pending
, intr
, 1);
524 intr
= find_first_bit(&pending
, GIC_NUM_LOCAL_INTRS
);
528 static void gic_mask_local_irq(struct irq_data
*d
)
530 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
532 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_RMASK
), 1 << intr
);
535 static void gic_unmask_local_irq(struct irq_data
*d
)
537 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
539 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_SMASK
), 1 << intr
);
542 static struct irq_chip gic_local_irq_controller
= {
543 .name
= "MIPS GIC Local",
544 .irq_mask
= gic_mask_local_irq
,
545 .irq_unmask
= gic_unmask_local_irq
,
548 static void gic_mask_local_irq_all_vpes(struct irq_data
*d
)
550 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
554 spin_lock_irqsave(&gic_lock
, flags
);
555 for (i
= 0; i
< gic_vpes
; i
++) {
556 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
), i
);
557 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_RMASK
), 1 << intr
);
559 spin_unlock_irqrestore(&gic_lock
, flags
);
562 static void gic_unmask_local_irq_all_vpes(struct irq_data
*d
)
564 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
568 spin_lock_irqsave(&gic_lock
, flags
);
569 for (i
= 0; i
< gic_vpes
; i
++) {
570 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
), i
);
571 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_SMASK
), 1 << intr
);
573 spin_unlock_irqrestore(&gic_lock
, flags
);
576 static struct irq_chip gic_all_vpes_local_irq_controller
= {
577 .name
= "MIPS GIC Local",
578 .irq_mask
= gic_mask_local_irq_all_vpes
,
579 .irq_unmask
= gic_unmask_local_irq_all_vpes
,
582 static void __gic_irq_dispatch(void)
584 gic_handle_local_int(false);
585 gic_handle_shared_int(false);
588 static void gic_irq_dispatch(struct irq_desc
*desc
)
590 gic_handle_local_int(true);
591 gic_handle_shared_int(true);
594 static void __init
gic_basic_init(void)
598 board_bind_eic_interrupt
= &gic_bind_eic_interrupt
;
601 for (i
= 0; i
< gic_shared_intrs
; i
++) {
602 gic_set_polarity(i
, GIC_POL_POS
);
603 gic_set_trigger(i
, GIC_TRIG_LEVEL
);
607 for (i
= 0; i
< gic_vpes
; i
++) {
610 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
), i
);
611 for (j
= 0; j
< GIC_NUM_LOCAL_INTRS
; j
++) {
612 if (!gic_local_irq_is_routable(j
))
614 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_RMASK
), 1 << j
);
619 static int gic_local_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
622 int intr
= GIC_HWIRQ_TO_LOCAL(hw
);
627 if (!gic_local_irq_is_routable(intr
))
631 * HACK: These are all really percpu interrupts, but the rest
632 * of the MIPS kernel code does not use the percpu IRQ API for
633 * the CP0 timer and performance counter interrupts.
636 case GIC_LOCAL_INT_TIMER
:
637 case GIC_LOCAL_INT_PERFCTR
:
638 case GIC_LOCAL_INT_FDC
:
639 irq_set_chip_and_handler(virq
,
640 &gic_all_vpes_local_irq_controller
,
644 irq_set_chip_and_handler(virq
,
645 &gic_local_irq_controller
,
646 handle_percpu_devid_irq
);
647 irq_set_percpu_devid(virq
);
651 spin_lock_irqsave(&gic_lock
, flags
);
652 for (i
= 0; i
< gic_vpes
; i
++) {
653 u32 val
= GIC_MAP_TO_PIN_MSK
| gic_cpu_pin
;
655 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
), i
);
658 case GIC_LOCAL_INT_WD
:
659 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_WD_MAP
), val
);
661 case GIC_LOCAL_INT_COMPARE
:
662 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE_MAP
),
665 case GIC_LOCAL_INT_TIMER
:
666 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
667 val
= GIC_MAP_TO_PIN_MSK
| timer_cpu_pin
;
668 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_TIMER_MAP
),
671 case GIC_LOCAL_INT_PERFCTR
:
672 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_PERFCTR_MAP
),
675 case GIC_LOCAL_INT_SWINT0
:
676 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_SWINT0_MAP
),
679 case GIC_LOCAL_INT_SWINT1
:
680 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_SWINT1_MAP
),
683 case GIC_LOCAL_INT_FDC
:
684 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_FDC_MAP
), val
);
687 pr_err("Invalid local IRQ %d\n", intr
);
692 spin_unlock_irqrestore(&gic_lock
, flags
);
697 static int gic_shared_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
698 irq_hw_number_t hw
, unsigned int vpe
)
700 int intr
= GIC_HWIRQ_TO_SHARED(hw
);
704 irq_set_chip_and_handler(virq
, &gic_level_irq_controller
,
707 spin_lock_irqsave(&gic_lock
, flags
);
708 gic_map_to_pin(intr
, gic_cpu_pin
);
709 gic_map_to_vpe(intr
, vpe
);
710 for (i
= 0; i
< min(gic_vpes
, NR_CPUS
); i
++)
711 clear_bit(intr
, pcpu_masks
[i
].pcpu_mask
);
712 set_bit(intr
, pcpu_masks
[vpe
].pcpu_mask
);
713 spin_unlock_irqrestore(&gic_lock
, flags
);
718 static int gic_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
721 if (GIC_HWIRQ_TO_LOCAL(hw
) < GIC_NUM_LOCAL_INTRS
)
722 return gic_local_irq_domain_map(d
, virq
, hw
);
723 return gic_shared_irq_domain_map(d
, virq
, hw
, 0);
726 static int gic_irq_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
727 unsigned int nr_irqs
, void *arg
)
729 struct gic_irq_spec
*spec
= arg
;
730 irq_hw_number_t hwirq
, base_hwirq
;
733 if (spec
->type
== GIC_DEVICE
) {
734 /* verify that it doesn't conflict with an IPI irq */
735 if (test_bit(spec
->hwirq
, ipi_resrv
))
738 base_hwirq
= find_first_bit(ipi_resrv
, gic_shared_intrs
);
739 if (base_hwirq
== gic_shared_intrs
) {
743 /* check that we have enough space */
744 for (i
= base_hwirq
; i
< nr_irqs
; i
++) {
745 if (!test_bit(i
, ipi_resrv
))
748 bitmap_clear(ipi_resrv
, base_hwirq
, nr_irqs
);
750 /* map the hwirq for each cpu consecutively */
752 for_each_cpu(cpu
, spec
->ipimask
) {
753 hwirq
= GIC_SHARED_TO_HWIRQ(base_hwirq
+ i
);
755 ret
= irq_domain_set_hwirq_and_chip(d
, virq
+ i
, hwirq
,
756 &gic_edge_irq_controller
,
761 ret
= gic_shared_irq_domain_map(d
, virq
+ i
, hwirq
, cpu
);
769 * tell the parent about the base hwirq we allocated so it can
770 * set its own domain data
772 spec
->hwirq
= base_hwirq
;
777 bitmap_set(ipi_resrv
, base_hwirq
, nr_irqs
);
781 void gic_irq_domain_free(struct irq_domain
*d
, unsigned int virq
,
782 unsigned int nr_irqs
)
784 irq_hw_number_t base_hwirq
;
785 struct irq_data
*data
;
787 data
= irq_get_irq_data(virq
);
791 base_hwirq
= GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data
));
792 bitmap_set(ipi_resrv
, base_hwirq
, nr_irqs
);
795 int gic_irq_domain_match(struct irq_domain
*d
, struct device_node
*node
,
796 enum irq_domain_bus_token bus_token
)
798 /* this domain should'nt be accessed directly */
802 static const struct irq_domain_ops gic_irq_domain_ops
= {
803 .map
= gic_irq_domain_map
,
804 .alloc
= gic_irq_domain_alloc
,
805 .free
= gic_irq_domain_free
,
806 .match
= gic_irq_domain_match
,
809 static int gic_dev_domain_xlate(struct irq_domain
*d
, struct device_node
*ctrlr
,
810 const u32
*intspec
, unsigned int intsize
,
811 irq_hw_number_t
*out_hwirq
,
812 unsigned int *out_type
)
817 if (intspec
[0] == GIC_SHARED
)
818 *out_hwirq
= GIC_SHARED_TO_HWIRQ(intspec
[1]);
819 else if (intspec
[0] == GIC_LOCAL
)
820 *out_hwirq
= GIC_LOCAL_TO_HWIRQ(intspec
[1]);
823 *out_type
= intspec
[2] & IRQ_TYPE_SENSE_MASK
;
828 static int gic_dev_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
829 unsigned int nr_irqs
, void *arg
)
831 struct irq_fwspec
*fwspec
= arg
;
832 struct gic_irq_spec spec
= {
834 .hwirq
= fwspec
->param
[1],
837 bool is_shared
= fwspec
->param
[0] == GIC_SHARED
;
840 ret
= irq_domain_alloc_irqs_parent(d
, virq
, nr_irqs
, &spec
);
845 for (i
= 0; i
< nr_irqs
; i
++) {
846 irq_hw_number_t hwirq
;
849 hwirq
= GIC_SHARED_TO_HWIRQ(spec
.hwirq
+ i
);
851 hwirq
= GIC_LOCAL_TO_HWIRQ(spec
.hwirq
+ i
);
853 ret
= irq_domain_set_hwirq_and_chip(d
, virq
+ i
,
855 &gic_level_irq_controller
,
864 void gic_dev_domain_free(struct irq_domain
*d
, unsigned int virq
,
865 unsigned int nr_irqs
)
867 /* no real allocation is done for dev irqs, so no need to free anything */
871 static struct irq_domain_ops gic_dev_domain_ops
= {
872 .xlate
= gic_dev_domain_xlate
,
873 .alloc
= gic_dev_domain_alloc
,
874 .free
= gic_dev_domain_free
,
877 static int gic_ipi_domain_xlate(struct irq_domain
*d
, struct device_node
*ctrlr
,
878 const u32
*intspec
, unsigned int intsize
,
879 irq_hw_number_t
*out_hwirq
,
880 unsigned int *out_type
)
883 * There's nothing to translate here. hwirq is dynamically allocated and
884 * the irq type is always edge triggered.
887 *out_type
= IRQ_TYPE_EDGE_RISING
;
892 static int gic_ipi_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
893 unsigned int nr_irqs
, void *arg
)
895 struct cpumask
*ipimask
= arg
;
896 struct gic_irq_spec spec
= {
902 ret
= irq_domain_alloc_irqs_parent(d
, virq
, nr_irqs
, &spec
);
906 /* the parent should have set spec.hwirq to the base_hwirq it allocated */
907 for (i
= 0; i
< nr_irqs
; i
++) {
908 ret
= irq_domain_set_hwirq_and_chip(d
, virq
+ i
,
909 GIC_SHARED_TO_HWIRQ(spec
.hwirq
+ i
),
910 &gic_edge_irq_controller
,
915 ret
= irq_set_irq_type(virq
+ i
, IRQ_TYPE_EDGE_RISING
);
922 irq_domain_free_irqs_parent(d
, virq
, nr_irqs
);
926 void gic_ipi_domain_free(struct irq_domain
*d
, unsigned int virq
,
927 unsigned int nr_irqs
)
929 irq_domain_free_irqs_parent(d
, virq
, nr_irqs
);
932 int gic_ipi_domain_match(struct irq_domain
*d
, struct device_node
*node
,
933 enum irq_domain_bus_token bus_token
)
939 is_ipi
= d
->bus_token
== bus_token
;
940 return to_of_node(d
->fwnode
) == node
&& is_ipi
;
947 static struct irq_domain_ops gic_ipi_domain_ops
= {
948 .xlate
= gic_ipi_domain_xlate
,
949 .alloc
= gic_ipi_domain_alloc
,
950 .free
= gic_ipi_domain_free
,
951 .match
= gic_ipi_domain_match
,
954 static void __init
__gic_init(unsigned long gic_base_addr
,
955 unsigned long gic_addrspace_size
,
956 unsigned int cpu_vec
, unsigned int irqbase
,
957 struct device_node
*node
)
959 unsigned int gicconfig
;
962 __gic_base_addr
= gic_base_addr
;
964 gic_base
= ioremap_nocache(gic_base_addr
, gic_addrspace_size
);
966 gicconfig
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
967 gic_shared_intrs
= (gicconfig
& GIC_SH_CONFIG_NUMINTRS_MSK
) >>
968 GIC_SH_CONFIG_NUMINTRS_SHF
;
969 gic_shared_intrs
= ((gic_shared_intrs
+ 1) * 8);
971 gic_vpes
= (gicconfig
& GIC_SH_CONFIG_NUMVPES_MSK
) >>
972 GIC_SH_CONFIG_NUMVPES_SHF
;
973 gic_vpes
= gic_vpes
+ 1;
976 /* Always use vector 1 in EIC mode */
978 timer_cpu_pin
= gic_cpu_pin
;
979 set_vi_handler(gic_cpu_pin
+ GIC_PIN_TO_VEC_OFFSET
,
982 gic_cpu_pin
= cpu_vec
- GIC_CPU_PIN_OFFSET
;
983 irq_set_chained_handler(MIPS_CPU_IRQ_BASE
+ cpu_vec
,
986 * With the CMP implementation of SMP (deprecated), other CPUs
987 * are started by the bootloader and put into a timer based
988 * waiting poll loop. We must not re-route those CPU's local
989 * timer interrupts as the wait instruction will never finish,
990 * so just handle whatever CPU interrupt it is routed to by
993 * This workaround should be removed when CMP support is
996 if (IS_ENABLED(CONFIG_MIPS_CMP
) &&
997 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER
)) {
998 timer_cpu_pin
= gic_read32(GIC_REG(VPE_LOCAL
,
999 GIC_VPE_TIMER_MAP
)) &
1001 irq_set_chained_handler(MIPS_CPU_IRQ_BASE
+
1002 GIC_CPU_PIN_OFFSET
+
1006 timer_cpu_pin
= gic_cpu_pin
;
1010 gic_irq_domain
= irq_domain_add_simple(node
, GIC_NUM_LOCAL_INTRS
+
1011 gic_shared_intrs
, irqbase
,
1012 &gic_irq_domain_ops
, NULL
);
1013 if (!gic_irq_domain
)
1014 panic("Failed to add GIC IRQ domain");
1016 gic_dev_domain
= irq_domain_add_hierarchy(gic_irq_domain
, 0,
1017 GIC_NUM_LOCAL_INTRS
+ gic_shared_intrs
,
1018 node
, &gic_dev_domain_ops
, NULL
);
1019 if (!gic_dev_domain
)
1020 panic("Failed to add GIC DEV domain");
1022 gic_ipi_domain
= irq_domain_add_hierarchy(gic_irq_domain
,
1023 IRQ_DOMAIN_FLAG_IPI_PER_CPU
,
1024 GIC_NUM_LOCAL_INTRS
+ gic_shared_intrs
,
1025 node
, &gic_ipi_domain_ops
, NULL
);
1026 if (!gic_ipi_domain
)
1027 panic("Failed to add GIC IPI domain");
1029 gic_ipi_domain
->bus_token
= DOMAIN_BUS_IPI
;
1032 !of_property_read_u32_array(node
, "mti,reserved-ipi-vectors", v
, 2)) {
1033 bitmap_set(ipi_resrv
, v
[0], v
[1]);
1035 /* Make the last 2 * gic_vpes available for IPIs */
1036 bitmap_set(ipi_resrv
,
1037 gic_shared_intrs
- 2 * gic_vpes
,
1044 void __init
gic_init(unsigned long gic_base_addr
,
1045 unsigned long gic_addrspace_size
,
1046 unsigned int cpu_vec
, unsigned int irqbase
)
1048 __gic_init(gic_base_addr
, gic_addrspace_size
, cpu_vec
, irqbase
, NULL
);
1051 static int __init
gic_of_init(struct device_node
*node
,
1052 struct device_node
*parent
)
1054 struct resource res
;
1055 unsigned int cpu_vec
, i
= 0, reserved
= 0;
1056 phys_addr_t gic_base
;
1059 /* Find the first available CPU vector. */
1060 while (!of_property_read_u32_index(node
, "mti,reserved-cpu-vectors",
1062 reserved
|= BIT(cpu_vec
);
1063 for (cpu_vec
= 2; cpu_vec
< 8; cpu_vec
++) {
1064 if (!(reserved
& BIT(cpu_vec
)))
1068 pr_err("No CPU vectors available for GIC\n");
1072 if (of_address_to_resource(node
, 0, &res
)) {
1074 * Probe the CM for the GIC base address if not specified
1075 * in the device-tree.
1077 if (mips_cm_present()) {
1078 gic_base
= read_gcr_gic_base() &
1079 ~CM_GCR_GIC_BASE_GICEN_MSK
;
1082 pr_err("Failed to get GIC memory range\n");
1086 gic_base
= res
.start
;
1087 gic_len
= resource_size(&res
);
1090 if (mips_cm_present())
1091 write_gcr_gic_base(gic_base
| CM_GCR_GIC_BASE_GICEN_MSK
);
1094 __gic_init(gic_base
, gic_len
, cpu_vec
, 0, node
);
1098 IRQCHIP_DECLARE(mips_gic
, "mti,gic", gic_of_init
);