2 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
3 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
10 #include <linux/of_platform.h>
11 #include <linux/interrupt.h>
12 #include <linux/of_device.h>
13 #include <linux/clocksource.h>
14 #include <linux/clockchips.h>
16 #include <asm/oplib.h>
17 #include <asm/timer.h>
20 #include <asm/leon_amba.h>
21 #include <asm/traps.h>
22 #include <asm/cacheflush.h>
24 #include <asm/setup.h>
30 struct leon3_irqctrl_regs_map
*leon3_irqctrl_regs
; /* interrupt controller base address */
31 struct leon3_gptimer_regs_map
*leon3_gptimer_regs
; /* timer controller base address */
33 int leondebug_irq_disable
;
34 int leon_debug_irqout
;
35 static volatile u32 dummy_master_l10_counter
;
36 unsigned long amba_system_id
;
37 static DEFINE_SPINLOCK(leon_irq_lock
);
39 static unsigned long leon3_gptimer_idx
; /* Timer Index (0..6) within Timer Core */
40 static unsigned long leon3_gptimer_ackmask
; /* For clearing pending bit */
41 unsigned long leon3_gptimer_irq
; /* interrupt controller irq number */
42 unsigned int sparc_leon_eirq
;
43 #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
44 #define LEON_IACK (&leon3_irqctrl_regs->iclear)
45 #define LEON_DO_ACK_HW 1
47 /* Return the last ACKed IRQ by the Extended IRQ controller. It has already
48 * been (automatically) ACKed when the CPU takes the trap.
50 static inline unsigned int leon_eirq_get(int cpu
)
52 return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs
->intid
[cpu
]) & 0x1f;
55 /* Handle one or multiple IRQs from the extended interrupt controller */
56 static void leon_handle_ext_irq(struct irq_desc
*desc
)
60 int cpu
= sparc_leon3_cpuid();
62 eirq
= leon_eirq_get(cpu
);
64 if ((eirq
& 0x10) && p
&& p
->irq
) /* bit4 tells if IRQ happened */
65 generic_handle_irq(p
->irq
);
68 /* The extended IRQ controller has been found, this function registers it */
69 static void leon_eirq_setup(unsigned int eirq
)
71 unsigned long mask
, oldmask
;
74 if (eirq
< 1 || eirq
> 0xf) {
75 printk(KERN_ERR
"LEON EXT IRQ NUMBER BAD: %d\n", eirq
);
79 veirq
= leon_build_device_irq(eirq
, leon_handle_ext_irq
, "extirq", 0);
82 * Unmask the Extended IRQ, the IRQs routed through the Ext-IRQ
83 * controller have a mask-bit of their own, so this is safe.
87 oldmask
= LEON3_BYPASS_LOAD_PA(LEON_IMASK(boot_cpu_id
));
88 LEON3_BYPASS_STORE_PA(LEON_IMASK(boot_cpu_id
), (oldmask
| mask
));
89 sparc_leon_eirq
= eirq
;
92 unsigned long leon_get_irqmask(unsigned int irq
)
96 if (!irq
|| ((irq
> 0xf) && !sparc_leon_eirq
)
97 || ((irq
> 0x1f) && sparc_leon_eirq
)) {
99 "leon_get_irqmask: false irq number: %d\n", irq
);
102 mask
= LEON_HARD_INT(irq
);
108 static int irq_choose_cpu(const struct cpumask
*affinity
)
112 cpumask_and(&mask
, cpu_online_mask
, affinity
);
113 if (cpumask_equal(&mask
, cpu_online_mask
) || cpumask_empty(&mask
))
116 return cpumask_first(&mask
);
119 #define irq_choose_cpu(affinity) boot_cpu_id
122 static int leon_set_affinity(struct irq_data
*data
, const struct cpumask
*dest
,
125 unsigned long mask
, oldmask
, flags
;
128 mask
= (unsigned long)data
->chip_data
;
129 oldcpu
= irq_choose_cpu(irq_data_get_affinity_mask(data
));
130 newcpu
= irq_choose_cpu(dest
);
132 if (oldcpu
== newcpu
)
135 /* unmask on old CPU first before enabling on the selected CPU */
136 spin_lock_irqsave(&leon_irq_lock
, flags
);
137 oldmask
= LEON3_BYPASS_LOAD_PA(LEON_IMASK(oldcpu
));
138 LEON3_BYPASS_STORE_PA(LEON_IMASK(oldcpu
), (oldmask
& ~mask
));
139 oldmask
= LEON3_BYPASS_LOAD_PA(LEON_IMASK(newcpu
));
140 LEON3_BYPASS_STORE_PA(LEON_IMASK(newcpu
), (oldmask
| mask
));
141 spin_unlock_irqrestore(&leon_irq_lock
, flags
);
143 return IRQ_SET_MASK_OK
;
146 static void leon_unmask_irq(struct irq_data
*data
)
148 unsigned long mask
, oldmask
, flags
;
151 mask
= (unsigned long)data
->chip_data
;
152 cpu
= irq_choose_cpu(irq_data_get_affinity_mask(data
));
153 spin_lock_irqsave(&leon_irq_lock
, flags
);
154 oldmask
= LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu
));
155 LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu
), (oldmask
| mask
));
156 spin_unlock_irqrestore(&leon_irq_lock
, flags
);
159 static void leon_mask_irq(struct irq_data
*data
)
161 unsigned long mask
, oldmask
, flags
;
164 mask
= (unsigned long)data
->chip_data
;
165 cpu
= irq_choose_cpu(irq_data_get_affinity_mask(data
));
166 spin_lock_irqsave(&leon_irq_lock
, flags
);
167 oldmask
= LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu
));
168 LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu
), (oldmask
& ~mask
));
169 spin_unlock_irqrestore(&leon_irq_lock
, flags
);
172 static unsigned int leon_startup_irq(struct irq_data
*data
)
175 leon_unmask_irq(data
);
179 static void leon_shutdown_irq(struct irq_data
*data
)
182 irq_unlink(data
->irq
);
185 /* Used by external level sensitive IRQ handlers on the LEON: ACK IRQ ctrl */
186 static void leon_eoi_irq(struct irq_data
*data
)
188 unsigned long mask
= (unsigned long)data
->chip_data
;
190 if (mask
& LEON_DO_ACK_HW
)
191 LEON3_BYPASS_STORE_PA(LEON_IACK
, mask
& ~LEON_DO_ACK_HW
);
194 static struct irq_chip leon_irq
= {
196 .irq_startup
= leon_startup_irq
,
197 .irq_shutdown
= leon_shutdown_irq
,
198 .irq_mask
= leon_mask_irq
,
199 .irq_unmask
= leon_unmask_irq
,
200 .irq_eoi
= leon_eoi_irq
,
201 .irq_set_affinity
= leon_set_affinity
,
205 * Build a LEON IRQ for the edge triggered LEON IRQ controller:
206 * Edge (normal) IRQ - handle_simple_irq, ack=DON'T-CARE, never ack
207 * Level IRQ (PCI|Level-GPIO) - handle_fasteoi_irq, ack=1, ack after ISR
208 * Per-CPU Edge - handle_percpu_irq, ack=0
210 unsigned int leon_build_device_irq(unsigned int real_irq
,
211 irq_flow_handler_t flow_handler
,
212 const char *name
, int do_ack
)
216 struct irq_desc
*desc
;
219 mask
= leon_get_irqmask(real_irq
);
223 irq
= irq_alloc(real_irq
, real_irq
);
228 mask
|= LEON_DO_ACK_HW
;
230 desc
= irq_to_desc(irq
);
231 if (!desc
|| !desc
->handle_irq
|| desc
->handle_irq
== handle_bad_irq
) {
232 irq_set_chip_and_handler_name(irq
, &leon_irq
,
234 irq_set_chip_data(irq
, (void *)mask
);
241 static unsigned int _leon_build_device_irq(struct platform_device
*op
,
242 unsigned int real_irq
)
244 return leon_build_device_irq(real_irq
, handle_simple_irq
, "edge", 0);
247 void leon_update_virq_handling(unsigned int virq
,
248 irq_flow_handler_t flow_handler
,
249 const char *name
, int do_ack
)
251 unsigned long mask
= (unsigned long)irq_get_chip_data(virq
);
253 mask
&= ~LEON_DO_ACK_HW
;
255 mask
|= LEON_DO_ACK_HW
;
257 irq_set_chip_and_handler_name(virq
, &leon_irq
,
259 irq_set_chip_data(virq
, (void *)mask
);
262 static u32
leon_cycles_offset(void)
264 u32 rld
, val
, ctrl
, off
;
266 rld
= LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].rld
);
267 val
= LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].val
);
268 ctrl
= LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].ctrl
);
269 if (LEON3_GPTIMER_CTRL_ISPENDING(ctrl
)) {
270 val
= LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].val
);
281 /* smp clockevent irq */
282 static irqreturn_t
leon_percpu_timer_ce_interrupt(int irq
, void *unused
)
284 struct clock_event_device
*ce
;
285 int cpu
= smp_processor_id();
287 leon_clear_profile_irq(cpu
);
289 if (cpu
== boot_cpu_id
)
290 timer_interrupt(irq
, NULL
);
292 ce
= &per_cpu(sparc32_clockevent
, cpu
);
295 if (ce
->event_handler
)
296 ce
->event_handler(ce
);
302 #endif /* CONFIG_SMP */
304 void __init
leon_init_timers(void)
307 struct device_node
*rootnp
, *np
, *nnp
;
316 sparc_config
.get_cycles_offset
= leon_cycles_offset
;
317 sparc_config
.cs_period
= 1000000 / HZ
;
318 sparc_config
.features
|= FEAT_L10_CLOCKSOURCE
;
321 sparc_config
.features
|= FEAT_L10_CLOCKEVENT
;
324 leondebug_irq_disable
= 0;
325 leon_debug_irqout
= 0;
326 master_l10_counter
= (u32 __iomem
*)&dummy_master_l10_counter
;
327 dummy_master_l10_counter
= 0;
329 rootnp
= of_find_node_by_path("/ambapp0");
333 /* Find System ID: GRLIB build ID and optional CHIP ID */
334 pp
= of_find_property(rootnp
, "systemid", &len
);
336 amba_system_id
= *(unsigned long *)pp
->value
;
338 /* Find IRQMP IRQ Controller Registers base adr otherwise bail out */
339 np
= of_find_node_by_name(rootnp
, "GAISLER_IRQMP");
341 np
= of_find_node_by_name(rootnp
, "01_00d");
345 pp
= of_find_property(np
, "reg", &len
);
348 leon3_irqctrl_regs
= *(struct leon3_irqctrl_regs_map
**)pp
->value
;
350 /* Find GPTIMER Timer Registers base address otherwise bail out. */
353 np
= of_find_node_by_name(nnp
, "GAISLER_GPTIMER");
355 np
= of_find_node_by_name(nnp
, "01_011");
361 pp
= of_find_property(np
, "ampopts", &len
);
363 ampopts
= *(int *)pp
->value
;
365 /* Skip this instance, resource already
366 * allocated by other OS */
372 /* Select Timer-Instance on Timer Core. Default is zero */
373 leon3_gptimer_idx
= ampopts
& 0x7;
375 pp
= of_find_property(np
, "reg", &len
);
377 leon3_gptimer_regs
= *(struct leon3_gptimer_regs_map
**)
379 pp
= of_find_property(np
, "interrupts", &len
);
381 leon3_gptimer_irq
= *(unsigned int *)pp
->value
;
384 if (!(leon3_gptimer_regs
&& leon3_irqctrl_regs
&& leon3_gptimer_irq
))
387 ctrl
= LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].ctrl
);
388 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].ctrl
,
389 ctrl
| LEON3_GPTIMER_CTRL_PENDING
);
390 ctrl
= LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].ctrl
);
392 if ((ctrl
& LEON3_GPTIMER_CTRL_PENDING
) != 0)
393 leon3_gptimer_ackmask
= ~LEON3_GPTIMER_CTRL_PENDING
;
395 leon3_gptimer_ackmask
= ~0;
397 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].val
, 0);
398 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].rld
,
399 (((1000000 / HZ
) - 1)));
400 LEON3_BYPASS_STORE_PA(
401 &leon3_gptimer_regs
->e
[leon3_gptimer_idx
].ctrl
, 0);
404 * The IRQ controller may (if implemented) consist of multiple
405 * IRQ controllers, each mapped on a 4Kb boundary.
406 * Each CPU may be routed to different IRQCTRLs, however
407 * we assume that all CPUs (in SMP system) is routed to the
408 * same IRQ Controller, and for non-SMP only one IRQCTRL is
410 * In AMP systems, Linux must run on CPU0 for the time being.
412 icsel
= LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs
->icsel
[boot_cpu_id
/8]);
413 icsel
= (icsel
>> ((7 - (boot_cpu_id
&0x7)) * 4)) & 0xf;
414 leon3_irqctrl_regs
+= icsel
;
416 /* Mask all IRQs on boot-cpu IRQ controller */
417 LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs
->mask
[boot_cpu_id
], 0);
419 /* Probe extended IRQ controller */
420 eirq
= (LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs
->mpstatus
)
423 leon_eirq_setup(eirq
);
430 * In SMP, sun4m adds a IPI handler to IRQ trap handler that
431 * LEON never must take, sun4d and LEON overwrites the branch
434 local_irq_save(flags
);
435 patchme_maybe_smp_msg
[0] = 0x01000000; /* NOP out the branch */
436 local_ops
->cache_all();
437 local_irq_restore(flags
);
441 config
= LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs
->config
);
442 if (config
& (1 << LEON3_GPTIMER_SEPIRQ
))
443 leon3_gptimer_irq
+= leon3_gptimer_idx
;
444 else if ((config
& LEON3_GPTIMER_TIMERS
) > 1)
445 pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n");
448 /* Install per-cpu IRQ handler for broadcasted ticker */
449 irq
= leon_build_device_irq(leon3_gptimer_irq
, handle_percpu_irq
,
451 err
= request_irq(irq
, leon_percpu_timer_ce_interrupt
,
452 IRQF_PERCPU
| IRQF_TIMER
, "timer", NULL
);
454 irq
= _leon_build_device_irq(NULL
, leon3_gptimer_irq
);
455 err
= request_irq(irq
, timer_interrupt
, IRQF_TIMER
, "timer", NULL
);
458 pr_err("Unable to attach timer IRQ%d\n", irq
);
461 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].ctrl
,
465 LEON3_GPTIMER_IRQEN
);
468 printk(KERN_ERR
"No Timer/irqctrl found\n");
473 static void leon_clear_clock_irq(void)
477 ctrl
= LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].ctrl
);
478 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs
->e
[leon3_gptimer_idx
].ctrl
,
479 ctrl
& leon3_gptimer_ackmask
);
482 static void leon_load_profile_irq(int cpu
, unsigned int limit
)
486 void __init
leon_trans_init(struct device_node
*dp
)
488 if (strcmp(dp
->type
, "cpu") == 0 && strcmp(dp
->name
, "<NULL>") == 0) {
490 p
= of_find_property(dp
, "mid", (void *)0);
493 dp
->name
= prom_early_alloc(5 + 1);
494 memcpy(&mid
, p
->value
, p
->length
);
495 sprintf((char *)dp
->name
, "cpu%.2d", mid
);
501 void leon_clear_profile_irq(int cpu
)
505 void leon_enable_irq_cpu(unsigned int irq_nr
, unsigned int cpu
)
507 unsigned long mask
, flags
, *addr
;
508 mask
= leon_get_irqmask(irq_nr
);
509 spin_lock_irqsave(&leon_irq_lock
, flags
);
510 addr
= (unsigned long *)LEON_IMASK(cpu
);
511 LEON3_BYPASS_STORE_PA(addr
, (LEON3_BYPASS_LOAD_PA(addr
) | mask
));
512 spin_unlock_irqrestore(&leon_irq_lock
, flags
);
517 void __init
leon_init_IRQ(void)
519 sparc_config
.init_timers
= leon_init_timers
;
520 sparc_config
.build_device_irq
= _leon_build_device_irq
;
521 sparc_config
.clock_rate
= 1000000;
522 sparc_config
.clear_clock_irq
= leon_clear_clock_irq
;
523 sparc_config
.load_profile_irq
= leon_load_profile_irq
;