3 #include <linux/bitmap.h>
4 #include <linux/init.h>
9 #include <asm/gcmpregs.h>
10 #include <asm/mips-boards/maltaint.h>
12 #include <linux/hardirq.h>
13 #include <asm-generic/bitops/find.h>
16 static unsigned long _gic_base
;
17 static unsigned int _irqbase
, _mapsize
, numvpes
, numintrs
;
18 static struct gic_intr_map
*_intrmap
;
20 static struct gic_pcpu_mask pcpu_masks
[NR_CPUS
];
21 static struct gic_pending_regs pending_regs
[NR_CPUS
];
22 static struct gic_intrmask_regs intrmask_regs
[NR_CPUS
];
24 #define gic_wedgeb2bok 0 /*
25 * Can GIC handle b2b writes to wedge register?
27 #if gic_wedgeb2bok == 0
28 static DEFINE_SPINLOCK(gic_wedgeb2b_lock
);
31 void gic_send_ipi(unsigned int intr
)
33 #if gic_wedgeb2bok == 0
36 pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__
,
39 spin_lock_irqsave(&gic_wedgeb2b_lock
, flags
);
40 GICWRITE(GIC_REG(SHARED
, GIC_SH_WEDGE
), 0x80000000 | intr
);
41 if (!gic_wedgeb2bok
) {
42 (void) GIC_REG(SHARED
, GIC_SH_CONFIG
);
43 spin_unlock_irqrestore(&gic_wedgeb2b_lock
, flags
);
47 /* This is Malta specific and needs to be exported */
48 static void vpe_local_setup(unsigned int numvpes
)
51 unsigned long timer_interrupt
= 5, perf_interrupt
= 5;
55 * Setup the default performance counter timer interrupts
58 for (i
= 0; i
< numvpes
; i
++) {
59 GICWRITE(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
), i
);
61 /* Are Interrupts locally routable? */
62 GICREAD(GIC_REG(VPE_OTHER
, GIC_VPE_CTL
), vpe_ctl
);
63 if (vpe_ctl
& GIC_VPE_CTL_TIMER_RTBL_MSK
)
64 GICWRITE(GIC_REG(VPE_OTHER
, GIC_VPE_TIMER_MAP
),
65 GIC_MAP_TO_PIN_MSK
| timer_interrupt
);
67 if (vpe_ctl
& GIC_VPE_CTL_PERFCNT_RTBL_MSK
)
68 GICWRITE(GIC_REG(VPE_OTHER
, GIC_VPE_PERFCTR_MAP
),
69 GIC_MAP_TO_PIN_MSK
| perf_interrupt
);
73 unsigned int gic_get_int(void)
76 unsigned long *pending
, *intrmask
, *pcpu_mask
;
77 unsigned long *pending_abs
, *intrmask_abs
;
79 /* Get per-cpu bitmaps */
80 pending
= pending_regs
[smp_processor_id()].pending
;
81 intrmask
= intrmask_regs
[smp_processor_id()].intrmask
;
82 pcpu_mask
= pcpu_masks
[smp_processor_id()].pcpu_mask
;
84 pending_abs
= (unsigned long *) GIC_REG_ABS_ADDR(SHARED
,
85 GIC_SH_PEND_31_0_OFS
);
86 intrmask_abs
= (unsigned long *) GIC_REG_ABS_ADDR(SHARED
,
87 GIC_SH_MASK_31_0_OFS
);
89 for (i
= 0; i
< BITS_TO_LONGS(GIC_NUM_INTRS
); i
++) {
90 GICREAD(*pending_abs
, pending
[i
]);
91 GICREAD(*intrmask_abs
, intrmask
[i
]);
96 bitmap_and(pending
, pending
, intrmask
, GIC_NUM_INTRS
);
97 bitmap_and(pending
, pending
, pcpu_mask
, GIC_NUM_INTRS
);
99 i
= find_first_bit(pending
, GIC_NUM_INTRS
);
101 pr_debug("CPU%d: %s pend=%d\n", smp_processor_id(), __func__
, i
);
106 static unsigned int gic_irq_startup(unsigned int irq
)
108 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__
, irq
);
110 GIC_SET_INTR_MASK(irq
, 1);
114 static void gic_irq_ack(unsigned int irq
)
116 #if gic_wedgeb2bok == 0
119 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__
, irq
);
121 GIC_CLR_INTR_MASK(irq
, 1);
123 if (_intrmap
[irq
].trigtype
== GIC_TRIG_EDGE
) {
125 spin_lock_irqsave(&gic_wedgeb2b_lock
, flags
);
126 GICWRITE(GIC_REG(SHARED
, GIC_SH_WEDGE
), irq
);
127 if (!gic_wedgeb2bok
) {
128 (void) GIC_REG(SHARED
, GIC_SH_CONFIG
);
129 spin_unlock_irqrestore(&gic_wedgeb2b_lock
, flags
);
134 static void gic_mask_irq(unsigned int irq
)
136 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__
, irq
);
138 GIC_CLR_INTR_MASK(irq
, 1);
141 static void gic_unmask_irq(unsigned int irq
)
143 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__
, irq
);
145 GIC_SET_INTR_MASK(irq
, 1);
150 static DEFINE_SPINLOCK(gic_lock
);
152 static int gic_set_affinity(unsigned int irq
, const struct cpumask
*cpumask
)
154 cpumask_t tmp
= CPU_MASK_NONE
;
158 pr_debug(KERN_DEBUG
"%s called\n", __func__
);
161 cpumask_and(&tmp
, cpumask
, cpu_online_mask
);
165 /* Assumption : cpumask refers to a single CPU */
166 spin_lock_irqsave(&gic_lock
, flags
);
168 /* Re-route this IRQ */
169 GIC_SH_MAP_TO_VPE_SMASK(irq
, first_cpu(tmp
));
172 * FIXME: assumption that _intrmap is ordered and has no holes
175 /* Update the intr_map */
176 _intrmap
[irq
].cpunum
= first_cpu(tmp
);
178 /* Update the pcpu_masks */
179 for (i
= 0; i
< NR_CPUS
; i
++)
180 clear_bit(irq
, pcpu_masks
[i
].pcpu_mask
);
181 set_bit(irq
, pcpu_masks
[first_cpu(tmp
)].pcpu_mask
);
184 cpumask_copy(irq_desc
[irq
].affinity
, cpumask
);
185 spin_unlock_irqrestore(&gic_lock
, flags
);
191 static struct irq_chip gic_irq_controller
= {
193 .startup
= gic_irq_startup
,
195 .mask
= gic_mask_irq
,
196 .mask_ack
= gic_mask_irq
,
197 .unmask
= gic_unmask_irq
,
198 .eoi
= gic_unmask_irq
,
200 .set_affinity
= gic_set_affinity
,
204 static void __init
setup_intr(unsigned int intr
, unsigned int cpu
,
205 unsigned int pin
, unsigned int polarity
, unsigned int trigtype
)
207 /* Setup Intr to Pin mapping */
208 if (pin
& GIC_MAP_TO_NMI_MSK
) {
209 GICWRITE(GIC_REG_ADDR(SHARED
, GIC_SH_MAP_TO_PIN(intr
)), pin
);
210 /* FIXME: hack to route NMI to all cpu's */
211 for (cpu
= 0; cpu
< NR_CPUS
; cpu
+= 32) {
212 GICWRITE(GIC_REG_ADDR(SHARED
,
213 GIC_SH_MAP_TO_VPE_REG_OFF(intr
, cpu
)),
217 GICWRITE(GIC_REG_ADDR(SHARED
, GIC_SH_MAP_TO_PIN(intr
)),
218 GIC_MAP_TO_PIN_MSK
| pin
);
219 /* Setup Intr to CPU mapping */
220 GIC_SH_MAP_TO_VPE_SMASK(intr
, cpu
);
223 /* Setup Intr Polarity */
224 GIC_SET_POLARITY(intr
, polarity
);
226 /* Setup Intr Trigger Type */
227 GIC_SET_TRIGGER(intr
, trigtype
);
229 /* Init Intr Masks */
230 GIC_SET_INTR_MASK(intr
, 0);
233 static void __init
gic_basic_init(void)
238 for (i
= 0; i
< GIC_NUM_INTRS
; i
++) {
239 GIC_SET_POLARITY(i
, GIC_POL_POS
);
240 GIC_SET_TRIGGER(i
, GIC_TRIG_LEVEL
);
241 GIC_SET_INTR_MASK(i
, 0);
244 /* Setup specifics */
245 for (i
= 0; i
< _mapsize
; i
++) {
246 cpu
= _intrmap
[i
].cpunum
;
250 if (cpu
== 0 && i
!= 0 && _intrmap
[i
].intrnum
== 0 &&
251 _intrmap
[i
].ipiflag
== 0)
254 setup_intr(_intrmap
[i
].intrnum
,
257 _intrmap
[i
].polarity
,
258 _intrmap
[i
].trigtype
);
259 /* Initialise per-cpu Interrupt software masks */
260 if (_intrmap
[i
].ipiflag
)
261 set_bit(_intrmap
[i
].intrnum
, pcpu_masks
[cpu
].pcpu_mask
);
264 vpe_local_setup(numvpes
);
266 for (i
= _irqbase
; i
< (_irqbase
+ numintrs
); i
++)
267 set_irq_chip(i
, &gic_irq_controller
);
270 void __init
gic_init(unsigned long gic_base_addr
,
271 unsigned long gic_addrspace_size
,
272 struct gic_intr_map
*intr_map
, unsigned int intr_map_size
,
273 unsigned int irqbase
)
275 unsigned int gicconfig
;
277 _gic_base
= (unsigned long) ioremap_nocache(gic_base_addr
,
281 _mapsize
= intr_map_size
;
283 GICREAD(GIC_REG(SHARED
, GIC_SH_CONFIG
), gicconfig
);
284 numintrs
= (gicconfig
& GIC_SH_CONFIG_NUMINTRS_MSK
) >>
285 GIC_SH_CONFIG_NUMINTRS_SHF
;
286 numintrs
= ((numintrs
+ 1) * 8);
288 numvpes
= (gicconfig
& GIC_SH_CONFIG_NUMVPES_MSK
) >>
289 GIC_SH_CONFIG_NUMVPES_SHF
;
291 pr_debug("%s called\n", __func__
);