2 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
3 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
10 #include <linux/of_platform.h>
11 #include <linux/interrupt.h>
12 #include <linux/of_device.h>
13 #include <linux/clocksource.h>
14 #include <linux/clockchips.h>
16 #include <asm/oplib.h>
17 #include <asm/timer.h>
20 #include <asm/leon_amba.h>
21 #include <asm/traps.h>
22 #include <asm/cacheflush.h>
24 #include <asm/setup.h>
30 struct leon3_irqctrl_regs_map
*leon3_irqctrl_regs
; /* interrupt controller base address */
31 struct leon3_gptimer_regs_map
*leon3_gptimer_regs
; /* timer controller base address */
33 int leondebug_irq_disable
;
34 int leon_debug_irqout
;
35 static int dummy_master_l10_counter
;
36 unsigned long amba_system_id
;
37 static DEFINE_SPINLOCK(leon_irq_lock
);
39 unsigned long leon3_gptimer_irq
; /* interrupt controller irq number */
40 unsigned long leon3_gptimer_idx
; /* Timer Index (0..6) within Timer Core */
41 unsigned int sparc_leon_eirq
;
42 #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
43 #define LEON_IACK (&leon3_irqctrl_regs->iclear)
44 #define LEON_DO_ACK_HW 1
46 /* Return the last ACKed IRQ by the Extended IRQ controller. It has already
47 * been (automatically) ACKed when the CPU takes the trap.
49 static inline unsigned int leon_eirq_get(int cpu
)
51 return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs
->intid
[cpu
]) & 0x1f;
54 /* Handle one or multiple IRQs from the extended interrupt controller */
55 static void leon_handle_ext_irq(unsigned int irq
, struct irq_desc
*desc
)
59 int cpu
= sparc_leon3_cpuid();
61 eirq
= leon_eirq_get(cpu
);
63 if ((eirq
& 0x10) && p
&& p
->irq
) /* bit4 tells if IRQ happened */
64 generic_handle_irq(p
->irq
);
67 /* The extended IRQ controller has been found, this function registers it */
68 void leon_eirq_setup(unsigned int eirq
)
70 unsigned long mask
, oldmask
;
73 if (eirq
< 1 || eirq
> 0xf) {
74 printk(KERN_ERR
"LEON EXT IRQ NUMBER BAD: %d\n", eirq
);
78 veirq
= leon_build_device_irq(eirq
, leon_handle_ext_irq
, "extirq", 0);
81 * Unmask the Extended IRQ, the IRQs routed through the Ext-IRQ
82 * controller have a mask-bit of their own, so this is safe.
86 oldmask
= LEON3_BYPASS_LOAD_PA(LEON_IMASK(boot_cpu_id
));
87 LEON3_BYPASS_STORE_PA(LEON_IMASK(boot_cpu_id
), (oldmask
| mask
));
88 sparc_leon_eirq
= eirq
;
91 unsigned long leon_get_irqmask(unsigned int irq
)
95 if (!irq
|| ((irq
> 0xf) && !sparc_leon_eirq
)
96 || ((irq
> 0x1f) && sparc_leon_eirq
)) {
98 "leon_get_irqmask: false irq number: %d\n", irq
);
101 mask
= LEON_HARD_INT(irq
);
107 static int irq_choose_cpu(const struct cpumask
*affinity
)
111 cpumask_and(&mask
, cpu_online_mask
, affinity
);
112 if (cpumask_equal(&mask
, cpu_online_mask
) || cpumask_empty(&mask
))
115 return cpumask_first(&mask
);
118 #define irq_choose_cpu(affinity) boot_cpu_id
121 static int leon_set_affinity(struct irq_data
*data
, const struct cpumask
*dest
,
124 unsigned long mask
, oldmask
, flags
;
127 mask
= (unsigned long)data
->chip_data
;
128 oldcpu
= irq_choose_cpu(data
->affinity
);
129 newcpu
= irq_choose_cpu(dest
);
131 if (oldcpu
== newcpu
)
134 /* unmask on old CPU first before enabling on the selected CPU */
135 spin_lock_irqsave(&leon_irq_lock
, flags
);
136 oldmask
= LEON3_BYPASS_LOAD_PA(LEON_IMASK(oldcpu
));
137 LEON3_BYPASS_STORE_PA(LEON_IMASK(oldcpu
), (oldmask
& ~mask
));
138 oldmask
= LEON3_BYPASS_LOAD_PA(LEON_IMASK(newcpu
));
139 LEON3_BYPASS_STORE_PA(LEON_IMASK(newcpu
), (oldmask
| mask
));
140 spin_unlock_irqrestore(&leon_irq_lock
, flags
);
142 return IRQ_SET_MASK_OK
;
145 static void leon_unmask_irq(struct irq_data
*data
)
147 unsigned long mask
, oldmask
, flags
;
150 mask
= (unsigned long)data
->chip_data
;
151 cpu
= irq_choose_cpu(data
->affinity
);
152 spin_lock_irqsave(&leon_irq_lock
, flags
);
153 oldmask
= LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu
));
154 LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu
), (oldmask
| mask
));
155 spin_unlock_irqrestore(&leon_irq_lock
, flags
);
158 static void leon_mask_irq(struct irq_data
*data
)
160 unsigned long mask
, oldmask
, flags
;
163 mask
= (unsigned long)data
->chip_data
;
164 cpu
= irq_choose_cpu(data
->affinity
);
165 spin_lock_irqsave(&leon_irq_lock
, flags
);
166 oldmask
= LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu
));
167 LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu
), (oldmask
& ~mask
));
168 spin_unlock_irqrestore(&leon_irq_lock
, flags
);
171 static unsigned int leon_startup_irq(struct irq_data
*data
)
174 leon_unmask_irq(data
);
178 static void leon_shutdown_irq(struct irq_data
*data
)
181 irq_unlink(data
->irq
);
184 /* Used by external level sensitive IRQ handlers on the LEON: ACK IRQ ctrl */
185 static void leon_eoi_irq(struct irq_data
*data
)
187 unsigned long mask
= (unsigned long)data
->chip_data
;
189 if (mask
& LEON_DO_ACK_HW
)
190 LEON3_BYPASS_STORE_PA(LEON_IACK
, mask
& ~LEON_DO_ACK_HW
);
193 static struct irq_chip leon_irq
= {
195 .irq_startup
= leon_startup_irq
,
196 .irq_shutdown
= leon_shutdown_irq
,
197 .irq_mask
= leon_mask_irq
,
198 .irq_unmask
= leon_unmask_irq
,
199 .irq_eoi
= leon_eoi_irq
,
200 .irq_set_affinity
= leon_set_affinity
,
204 * Build a LEON IRQ for the edge triggered LEON IRQ controller:
205 * Edge (normal) IRQ - handle_simple_irq, ack=DONT-CARE, never ack
206 * Level IRQ (PCI|Level-GPIO) - handle_fasteoi_irq, ack=1, ack after ISR
207 * Per-CPU Edge - handle_percpu_irq, ack=0
209 unsigned int leon_build_device_irq(unsigned int real_irq
,
210 irq_flow_handler_t flow_handler
,
211 const char *name
, int do_ack
)
215 struct irq_desc
*desc
;
218 mask
= leon_get_irqmask(real_irq
);
222 irq
= irq_alloc(real_irq
, real_irq
);
227 mask
|= LEON_DO_ACK_HW
;
229 desc
= irq_to_desc(irq
);
230 if (!desc
|| !desc
->handle_irq
|| desc
->handle_irq
== handle_bad_irq
) {
231 irq_set_chip_and_handler_name(irq
, &leon_irq
,
233 irq_set_chip_data(irq
, (void *)mask
);
240 static unsigned int _leon_build_device_irq(struct platform_device
*op
,
241 unsigned int real_irq
)
243 return leon_build_device_irq(real_irq
, handle_simple_irq
, "edge", 0);
246 void leon_update_virq_handling(unsigned int virq
,
247 irq_flow_handler_t flow_handler
,
248 const char *name
, int do_ack
)
250 unsigned long mask
= (unsigned long)irq_get_chip_data(virq
);
252 mask
&= ~LEON_DO_ACK_HW
;
254 mask
|= LEON_DO_ACK_HW
;
256 irq_set_chip_and_handler_name(virq
, &leon_irq
,
258 irq_set_chip_data(virq
, (void *)mask
);
261 static u32
leon_cycles_offset(void)
264 rld
= LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].rld
);
265 val
= LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].val
);
272 /* smp clockevent irq */
273 irqreturn_t
leon_percpu_timer_ce_interrupt(int irq
, void *unused
)
275 struct clock_event_device
*ce
;
276 int cpu
= smp_processor_id();
278 leon_clear_profile_irq(cpu
);
280 if (cpu
== boot_cpu_id
)
281 timer_interrupt(irq
, NULL
);
283 ce
= &per_cpu(sparc32_clockevent
, cpu
);
286 if (ce
->event_handler
)
287 ce
->event_handler(ce
);
293 #endif /* CONFIG_SMP */
295 void __init
leon_init_timers(void)
298 struct device_node
*rootnp
, *np
, *nnp
;
306 sparc_config
.get_cycles_offset
= leon_cycles_offset
;
307 sparc_config
.cs_period
= 1000000 / HZ
;
308 sparc_config
.features
|= FEAT_L10_CLOCKSOURCE
;
311 sparc_config
.features
|= FEAT_L10_CLOCKEVENT
;
314 leondebug_irq_disable
= 0;
315 leon_debug_irqout
= 0;
316 master_l10_counter
= (unsigned int *)&dummy_master_l10_counter
;
317 dummy_master_l10_counter
= 0;
319 rootnp
= of_find_node_by_path("/ambapp0");
323 /* Find System ID: GRLIB build ID and optional CHIP ID */
324 pp
= of_find_property(rootnp
, "systemid", &len
);
326 amba_system_id
= *(unsigned long *)pp
->value
;
328 /* Find IRQMP IRQ Controller Registers base adr otherwise bail out */
329 np
= of_find_node_by_name(rootnp
, "GAISLER_IRQMP");
331 np
= of_find_node_by_name(rootnp
, "01_00d");
335 pp
= of_find_property(np
, "reg", &len
);
338 leon3_irqctrl_regs
= *(struct leon3_irqctrl_regs_map
**)pp
->value
;
340 /* Find GPTIMER Timer Registers base address otherwise bail out. */
343 np
= of_find_node_by_name(nnp
, "GAISLER_GPTIMER");
345 np
= of_find_node_by_name(nnp
, "01_011");
351 pp
= of_find_property(np
, "ampopts", &len
);
353 ampopts
= *(int *)pp
->value
;
355 /* Skip this instance, resource already
356 * allocated by other OS */
362 /* Select Timer-Instance on Timer Core. Default is zero */
363 leon3_gptimer_idx
= ampopts
& 0x7;
365 pp
= of_find_property(np
, "reg", &len
);
367 leon3_gptimer_regs
= *(struct leon3_gptimer_regs_map
**)
369 pp
= of_find_property(np
, "interrupts", &len
);
371 leon3_gptimer_irq
= *(unsigned int *)pp
->value
;
374 if (!(leon3_gptimer_regs
&& leon3_irqctrl_regs
&& leon3_gptimer_irq
))
377 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].val
, 0);
378 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].rld
,
379 (((1000000 / HZ
) - 1)));
380 LEON3_BYPASS_STORE_PA(
381 &leon3_gptimer_regs
->e
[leon3_gptimer_idx
].ctrl
, 0);
384 * The IRQ controller may (if implemented) consist of multiple
385 * IRQ controllers, each mapped on a 4Kb boundary.
386 * Each CPU may be routed to different IRQCTRLs, however
387 * we assume that all CPUs (in SMP system) is routed to the
388 * same IRQ Controller, and for non-SMP only one IRQCTRL is
390 * In AMP systems, Linux must run on CPU0 for the time being.
392 icsel
= LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs
->icsel
[boot_cpu_id
/8]);
393 icsel
= (icsel
>> ((7 - (boot_cpu_id
&0x7)) * 4)) & 0xf;
394 leon3_irqctrl_regs
+= icsel
;
396 /* Mask all IRQs on boot-cpu IRQ controller */
397 LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs
->mask
[boot_cpu_id
], 0);
399 /* Probe extended IRQ controller */
400 eirq
= (LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs
->mpstatus
)
403 leon_eirq_setup(eirq
);
410 * In SMP, sun4m adds a IPI handler to IRQ trap handler that
411 * LEON never must take, sun4d and LEON overwrites the branch
414 local_irq_save(flags
);
415 patchme_maybe_smp_msg
[0] = 0x01000000; /* NOP out the branch */
416 local_ops
->cache_all();
417 local_irq_restore(flags
);
421 config
= LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs
->config
);
422 if (config
& (1 << LEON3_GPTIMER_SEPIRQ
))
423 leon3_gptimer_irq
+= leon3_gptimer_idx
;
424 else if ((config
& LEON3_GPTIMER_TIMERS
) > 1)
425 pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n");
428 /* Install per-cpu IRQ handler for broadcasted ticker */
429 irq
= leon_build_device_irq(leon3_gptimer_irq
, handle_percpu_irq
,
431 err
= request_irq(irq
, leon_percpu_timer_ce_interrupt
,
432 IRQF_PERCPU
| IRQF_TIMER
, "timer", NULL
);
434 irq
= _leon_build_device_irq(NULL
, leon3_gptimer_irq
);
435 err
= request_irq(irq
, timer_interrupt
, IRQF_TIMER
, "timer", NULL
);
438 pr_err("Unable to attach timer IRQ%d\n", irq
);
441 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].ctrl
,
445 LEON3_GPTIMER_IRQEN
);
448 printk(KERN_ERR
"No Timer/irqctrl found\n");
453 static void leon_clear_clock_irq(void)
457 static void leon_load_profile_irq(int cpu
, unsigned int limit
)
461 void __init
leon_trans_init(struct device_node
*dp
)
463 if (strcmp(dp
->type
, "cpu") == 0 && strcmp(dp
->name
, "<NULL>") == 0) {
465 p
= of_find_property(dp
, "mid", (void *)0);
468 dp
->name
= prom_early_alloc(5 + 1);
469 memcpy(&mid
, p
->value
, p
->length
);
470 sprintf((char *)dp
->name
, "cpu%.2d", mid
);
476 void leon_clear_profile_irq(int cpu
)
480 void leon_enable_irq_cpu(unsigned int irq_nr
, unsigned int cpu
)
482 unsigned long mask
, flags
, *addr
;
483 mask
= leon_get_irqmask(irq_nr
);
484 spin_lock_irqsave(&leon_irq_lock
, flags
);
485 addr
= (unsigned long *)LEON_IMASK(cpu
);
486 LEON3_BYPASS_STORE_PA(addr
, (LEON3_BYPASS_LOAD_PA(addr
) | mask
));
487 spin_unlock_irqrestore(&leon_irq_lock
, flags
);
492 void __init
leon_init_IRQ(void)
494 sparc_config
.init_timers
= leon_init_timers
;
495 sparc_config
.build_device_irq
= _leon_build_device_irq
;
496 sparc_config
.clock_rate
= 1000000;
497 sparc_config
.clear_clock_irq
= leon_clear_clock_irq
;
498 sparc_config
.load_profile_irq
= leon_load_profile_irq
;