of: MSI: Simplify irqdomain lookup
[linux/fpc-iii.git] / arch / powerpc / sysdev / qe_lib / qe_ic.c
blobef36f16f9f6fbc9bdfd0c02e6df29e77fc83817c
1 /*
2 * arch/powerpc/sysdev/qe_lib/qe_ic.c
4 * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
6 * Author: Li Yang <leoli@freescale.com>
7 * Based on code from Shlomi Gridish <gridish@freescale.com>
9 * QUICC ENGINE Interrupt Controller
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/reboot.h>
21 #include <linux/slab.h>
22 #include <linux/stddef.h>
23 #include <linux/sched.h>
24 #include <linux/signal.h>
25 #include <linux/device.h>
26 #include <linux/spinlock.h>
27 #include <asm/irq.h>
28 #include <asm/io.h>
29 #include <asm/prom.h>
30 #include <asm/qe_ic.h>
32 #include "qe_ic.h"
34 static DEFINE_RAW_SPINLOCK(qe_ic_lock);
36 static struct qe_ic_info qe_ic_info[] = {
37 [1] = {
38 .mask = 0x00008000,
39 .mask_reg = QEIC_CIMR,
40 .pri_code = 0,
41 .pri_reg = QEIC_CIPWCC,
43 [2] = {
44 .mask = 0x00004000,
45 .mask_reg = QEIC_CIMR,
46 .pri_code = 1,
47 .pri_reg = QEIC_CIPWCC,
49 [3] = {
50 .mask = 0x00002000,
51 .mask_reg = QEIC_CIMR,
52 .pri_code = 2,
53 .pri_reg = QEIC_CIPWCC,
55 [10] = {
56 .mask = 0x00000040,
57 .mask_reg = QEIC_CIMR,
58 .pri_code = 1,
59 .pri_reg = QEIC_CIPZCC,
61 [11] = {
62 .mask = 0x00000020,
63 .mask_reg = QEIC_CIMR,
64 .pri_code = 2,
65 .pri_reg = QEIC_CIPZCC,
67 [12] = {
68 .mask = 0x00000010,
69 .mask_reg = QEIC_CIMR,
70 .pri_code = 3,
71 .pri_reg = QEIC_CIPZCC,
73 [13] = {
74 .mask = 0x00000008,
75 .mask_reg = QEIC_CIMR,
76 .pri_code = 4,
77 .pri_reg = QEIC_CIPZCC,
79 [14] = {
80 .mask = 0x00000004,
81 .mask_reg = QEIC_CIMR,
82 .pri_code = 5,
83 .pri_reg = QEIC_CIPZCC,
85 [15] = {
86 .mask = 0x00000002,
87 .mask_reg = QEIC_CIMR,
88 .pri_code = 6,
89 .pri_reg = QEIC_CIPZCC,
91 [20] = {
92 .mask = 0x10000000,
93 .mask_reg = QEIC_CRIMR,
94 .pri_code = 3,
95 .pri_reg = QEIC_CIPRTA,
97 [25] = {
98 .mask = 0x00800000,
99 .mask_reg = QEIC_CRIMR,
100 .pri_code = 0,
101 .pri_reg = QEIC_CIPRTB,
103 [26] = {
104 .mask = 0x00400000,
105 .mask_reg = QEIC_CRIMR,
106 .pri_code = 1,
107 .pri_reg = QEIC_CIPRTB,
109 [27] = {
110 .mask = 0x00200000,
111 .mask_reg = QEIC_CRIMR,
112 .pri_code = 2,
113 .pri_reg = QEIC_CIPRTB,
115 [28] = {
116 .mask = 0x00100000,
117 .mask_reg = QEIC_CRIMR,
118 .pri_code = 3,
119 .pri_reg = QEIC_CIPRTB,
121 [32] = {
122 .mask = 0x80000000,
123 .mask_reg = QEIC_CIMR,
124 .pri_code = 0,
125 .pri_reg = QEIC_CIPXCC,
127 [33] = {
128 .mask = 0x40000000,
129 .mask_reg = QEIC_CIMR,
130 .pri_code = 1,
131 .pri_reg = QEIC_CIPXCC,
133 [34] = {
134 .mask = 0x20000000,
135 .mask_reg = QEIC_CIMR,
136 .pri_code = 2,
137 .pri_reg = QEIC_CIPXCC,
139 [35] = {
140 .mask = 0x10000000,
141 .mask_reg = QEIC_CIMR,
142 .pri_code = 3,
143 .pri_reg = QEIC_CIPXCC,
145 [36] = {
146 .mask = 0x08000000,
147 .mask_reg = QEIC_CIMR,
148 .pri_code = 4,
149 .pri_reg = QEIC_CIPXCC,
151 [40] = {
152 .mask = 0x00800000,
153 .mask_reg = QEIC_CIMR,
154 .pri_code = 0,
155 .pri_reg = QEIC_CIPYCC,
157 [41] = {
158 .mask = 0x00400000,
159 .mask_reg = QEIC_CIMR,
160 .pri_code = 1,
161 .pri_reg = QEIC_CIPYCC,
163 [42] = {
164 .mask = 0x00200000,
165 .mask_reg = QEIC_CIMR,
166 .pri_code = 2,
167 .pri_reg = QEIC_CIPYCC,
169 [43] = {
170 .mask = 0x00100000,
171 .mask_reg = QEIC_CIMR,
172 .pri_code = 3,
173 .pri_reg = QEIC_CIPYCC,
177 static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg)
179 return in_be32(base + (reg >> 2));
182 static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg,
183 u32 value)
185 out_be32(base + (reg >> 2), value);
188 static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
190 return irq_get_chip_data(virq);
193 static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
195 return irq_data_get_irq_chip_data(d);
198 static void qe_ic_unmask_irq(struct irq_data *d)
200 struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
201 unsigned int src = irqd_to_hwirq(d);
202 unsigned long flags;
203 u32 temp;
205 raw_spin_lock_irqsave(&qe_ic_lock, flags);
207 temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
208 qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
209 temp | qe_ic_info[src].mask);
211 raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
214 static void qe_ic_mask_irq(struct irq_data *d)
216 struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
217 unsigned int src = irqd_to_hwirq(d);
218 unsigned long flags;
219 u32 temp;
221 raw_spin_lock_irqsave(&qe_ic_lock, flags);
223 temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
224 qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
225 temp & ~qe_ic_info[src].mask);
227 /* Flush the above write before enabling interrupts; otherwise,
228 * spurious interrupts will sometimes happen. To be 100% sure
229 * that the write has reached the device before interrupts are
230 * enabled, the mask register would have to be read back; however,
231 * this is not required for correctness, only to avoid wasting
232 * time on a large number of spurious interrupts. In testing,
233 * a sync reduced the observed spurious interrupts to zero.
235 mb();
237 raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
240 static struct irq_chip qe_ic_irq_chip = {
241 .name = "QEIC",
242 .irq_unmask = qe_ic_unmask_irq,
243 .irq_mask = qe_ic_mask_irq,
244 .irq_mask_ack = qe_ic_mask_irq,
247 static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
248 enum irq_domain_bus_token bus_token)
250 /* Exact match, unless qe_ic node is NULL */
251 struct device_node *of_node = irq_domain_get_of_node(h);
252 return of_node == NULL || of_node == node;
255 static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
256 irq_hw_number_t hw)
258 struct qe_ic *qe_ic = h->host_data;
259 struct irq_chip *chip;
261 if (qe_ic_info[hw].mask == 0) {
262 printk(KERN_ERR "Can't map reserved IRQ\n");
263 return -EINVAL;
265 /* Default chip */
266 chip = &qe_ic->hc_irq;
268 irq_set_chip_data(virq, qe_ic);
269 irq_set_status_flags(virq, IRQ_LEVEL);
271 irq_set_chip_and_handler(virq, chip, handle_level_irq);
273 return 0;
276 static const struct irq_domain_ops qe_ic_host_ops = {
277 .match = qe_ic_host_match,
278 .map = qe_ic_host_map,
279 .xlate = irq_domain_xlate_onetwocell,
282 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
283 unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
285 int irq;
287 BUG_ON(qe_ic == NULL);
289 /* get the interrupt source vector. */
290 irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
292 if (irq == 0)
293 return NO_IRQ;
295 return irq_linear_revmap(qe_ic->irqhost, irq);
298 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
299 unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
301 int irq;
303 BUG_ON(qe_ic == NULL);
305 /* get the interrupt source vector. */
306 irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
308 if (irq == 0)
309 return NO_IRQ;
311 return irq_linear_revmap(qe_ic->irqhost, irq);
314 void __init qe_ic_init(struct device_node *node, unsigned int flags,
315 void (*low_handler)(struct irq_desc *desc),
316 void (*high_handler)(struct irq_desc *desc))
318 struct qe_ic *qe_ic;
319 struct resource res;
320 u32 temp = 0, ret, high_active = 0;
322 ret = of_address_to_resource(node, 0, &res);
323 if (ret)
324 return;
326 qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
327 if (qe_ic == NULL)
328 return;
330 qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
331 &qe_ic_host_ops, qe_ic);
332 if (qe_ic->irqhost == NULL) {
333 kfree(qe_ic);
334 return;
337 qe_ic->regs = ioremap(res.start, resource_size(&res));
339 qe_ic->hc_irq = qe_ic_irq_chip;
341 qe_ic->virq_high = irq_of_parse_and_map(node, 0);
342 qe_ic->virq_low = irq_of_parse_and_map(node, 1);
344 if (qe_ic->virq_low == NO_IRQ) {
345 printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
346 kfree(qe_ic);
347 return;
350 /* default priority scheme is grouped. If spread mode is */
351 /* required, configure cicr accordingly. */
352 if (flags & QE_IC_SPREADMODE_GRP_W)
353 temp |= CICR_GWCC;
354 if (flags & QE_IC_SPREADMODE_GRP_X)
355 temp |= CICR_GXCC;
356 if (flags & QE_IC_SPREADMODE_GRP_Y)
357 temp |= CICR_GYCC;
358 if (flags & QE_IC_SPREADMODE_GRP_Z)
359 temp |= CICR_GZCC;
360 if (flags & QE_IC_SPREADMODE_GRP_RISCA)
361 temp |= CICR_GRTA;
362 if (flags & QE_IC_SPREADMODE_GRP_RISCB)
363 temp |= CICR_GRTB;
365 /* choose destination signal for highest priority interrupt */
366 if (flags & QE_IC_HIGH_SIGNAL) {
367 temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
368 high_active = 1;
371 qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
373 irq_set_handler_data(qe_ic->virq_low, qe_ic);
374 irq_set_chained_handler(qe_ic->virq_low, low_handler);
376 if (qe_ic->virq_high != NO_IRQ &&
377 qe_ic->virq_high != qe_ic->virq_low) {
378 irq_set_handler_data(qe_ic->virq_high, qe_ic);
379 irq_set_chained_handler(qe_ic->virq_high, high_handler);
383 void qe_ic_set_highest_priority(unsigned int virq, int high)
385 struct qe_ic *qe_ic = qe_ic_from_irq(virq);
386 unsigned int src = virq_to_hw(virq);
387 u32 temp = 0;
389 temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
391 temp &= ~CICR_HP_MASK;
392 temp |= src << CICR_HP_SHIFT;
394 temp &= ~CICR_HPIT_MASK;
395 temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
397 qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
400 /* Set Priority level within its group, from 1 to 8 */
401 int qe_ic_set_priority(unsigned int virq, unsigned int priority)
403 struct qe_ic *qe_ic = qe_ic_from_irq(virq);
404 unsigned int src = virq_to_hw(virq);
405 u32 temp;
407 if (priority > 8 || priority == 0)
408 return -EINVAL;
409 if (src > 127)
410 return -EINVAL;
411 if (qe_ic_info[src].pri_reg == 0)
412 return -EINVAL;
414 temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
416 if (priority < 4) {
417 temp &= ~(0x7 << (32 - priority * 3));
418 temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
419 } else {
420 temp &= ~(0x7 << (24 - priority * 3));
421 temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
424 qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
426 return 0;
429 /* Set a QE priority to use high irq, only priority 1~2 can use high irq */
430 int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
432 struct qe_ic *qe_ic = qe_ic_from_irq(virq);
433 unsigned int src = virq_to_hw(virq);
434 u32 temp, control_reg = QEIC_CICNR, shift = 0;
436 if (priority > 2 || priority == 0)
437 return -EINVAL;
439 switch (qe_ic_info[src].pri_reg) {
440 case QEIC_CIPZCC:
441 shift = CICNR_ZCC1T_SHIFT;
442 break;
443 case QEIC_CIPWCC:
444 shift = CICNR_WCC1T_SHIFT;
445 break;
446 case QEIC_CIPYCC:
447 shift = CICNR_YCC1T_SHIFT;
448 break;
449 case QEIC_CIPXCC:
450 shift = CICNR_XCC1T_SHIFT;
451 break;
452 case QEIC_CIPRTA:
453 shift = CRICR_RTA1T_SHIFT;
454 control_reg = QEIC_CRICR;
455 break;
456 case QEIC_CIPRTB:
457 shift = CRICR_RTB1T_SHIFT;
458 control_reg = QEIC_CRICR;
459 break;
460 default:
461 return -EINVAL;
464 shift += (2 - priority) * 2;
465 temp = qe_ic_read(qe_ic->regs, control_reg);
466 temp &= ~(SIGNAL_MASK << shift);
467 temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
468 qe_ic_write(qe_ic->regs, control_reg, temp);
470 return 0;
473 static struct bus_type qe_ic_subsys = {
474 .name = "qe_ic",
475 .dev_name = "qe_ic",
478 static struct device device_qe_ic = {
479 .id = 0,
480 .bus = &qe_ic_subsys,
483 static int __init init_qe_ic_sysfs(void)
485 int rc;
487 printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
489 rc = subsys_system_register(&qe_ic_subsys, NULL);
490 if (rc) {
491 printk(KERN_ERR "Failed registering qe_ic sys class\n");
492 return -ENODEV;
494 rc = device_register(&device_qe_ic);
495 if (rc) {
496 printk(KERN_ERR "Failed registering qe_ic sys device\n");
497 return -ENODEV;
499 return 0;
502 subsys_initcall(init_qe_ic_sysfs);