spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / arch / powerpc / sysdev / ipic.c
blob95da897f05a7f11f8fdf5c0ff297ed54813211ed
1 /*
2 * arch/powerpc/sysdev/ipic.c
4 * IPIC routines implementations.
6 * Copyright 2005 Freescale Semiconductor, Inc.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/reboot.h>
17 #include <linux/slab.h>
18 #include <linux/stddef.h>
19 #include <linux/sched.h>
20 #include <linux/signal.h>
21 #include <linux/syscore_ops.h>
22 #include <linux/device.h>
23 #include <linux/bootmem.h>
24 #include <linux/spinlock.h>
25 #include <linux/fsl_devices.h>
26 #include <asm/irq.h>
27 #include <asm/io.h>
28 #include <asm/prom.h>
29 #include <asm/ipic.h>
31 #include "ipic.h"
33 static struct ipic * primary_ipic;
34 static struct irq_chip ipic_level_irq_chip, ipic_edge_irq_chip;
35 static DEFINE_RAW_SPINLOCK(ipic_lock);
37 static struct ipic_info ipic_info[] = {
38 [1] = {
39 .mask = IPIC_SIMSR_H,
40 .prio = IPIC_SIPRR_C,
41 .force = IPIC_SIFCR_H,
42 .bit = 16,
43 .prio_mask = 0,
45 [2] = {
46 .mask = IPIC_SIMSR_H,
47 .prio = IPIC_SIPRR_C,
48 .force = IPIC_SIFCR_H,
49 .bit = 17,
50 .prio_mask = 1,
52 [3] = {
53 .mask = IPIC_SIMSR_H,
54 .prio = IPIC_SIPRR_C,
55 .force = IPIC_SIFCR_H,
56 .bit = 18,
57 .prio_mask = 2,
59 [4] = {
60 .mask = IPIC_SIMSR_H,
61 .prio = IPIC_SIPRR_C,
62 .force = IPIC_SIFCR_H,
63 .bit = 19,
64 .prio_mask = 3,
66 [5] = {
67 .mask = IPIC_SIMSR_H,
68 .prio = IPIC_SIPRR_C,
69 .force = IPIC_SIFCR_H,
70 .bit = 20,
71 .prio_mask = 4,
73 [6] = {
74 .mask = IPIC_SIMSR_H,
75 .prio = IPIC_SIPRR_C,
76 .force = IPIC_SIFCR_H,
77 .bit = 21,
78 .prio_mask = 5,
80 [7] = {
81 .mask = IPIC_SIMSR_H,
82 .prio = IPIC_SIPRR_C,
83 .force = IPIC_SIFCR_H,
84 .bit = 22,
85 .prio_mask = 6,
87 [8] = {
88 .mask = IPIC_SIMSR_H,
89 .prio = IPIC_SIPRR_C,
90 .force = IPIC_SIFCR_H,
91 .bit = 23,
92 .prio_mask = 7,
94 [9] = {
95 .mask = IPIC_SIMSR_H,
96 .prio = IPIC_SIPRR_D,
97 .force = IPIC_SIFCR_H,
98 .bit = 24,
99 .prio_mask = 0,
101 [10] = {
102 .mask = IPIC_SIMSR_H,
103 .prio = IPIC_SIPRR_D,
104 .force = IPIC_SIFCR_H,
105 .bit = 25,
106 .prio_mask = 1,
108 [11] = {
109 .mask = IPIC_SIMSR_H,
110 .prio = IPIC_SIPRR_D,
111 .force = IPIC_SIFCR_H,
112 .bit = 26,
113 .prio_mask = 2,
115 [12] = {
116 .mask = IPIC_SIMSR_H,
117 .prio = IPIC_SIPRR_D,
118 .force = IPIC_SIFCR_H,
119 .bit = 27,
120 .prio_mask = 3,
122 [13] = {
123 .mask = IPIC_SIMSR_H,
124 .prio = IPIC_SIPRR_D,
125 .force = IPIC_SIFCR_H,
126 .bit = 28,
127 .prio_mask = 4,
129 [14] = {
130 .mask = IPIC_SIMSR_H,
131 .prio = IPIC_SIPRR_D,
132 .force = IPIC_SIFCR_H,
133 .bit = 29,
134 .prio_mask = 5,
136 [15] = {
137 .mask = IPIC_SIMSR_H,
138 .prio = IPIC_SIPRR_D,
139 .force = IPIC_SIFCR_H,
140 .bit = 30,
141 .prio_mask = 6,
143 [16] = {
144 .mask = IPIC_SIMSR_H,
145 .prio = IPIC_SIPRR_D,
146 .force = IPIC_SIFCR_H,
147 .bit = 31,
148 .prio_mask = 7,
150 [17] = {
151 .ack = IPIC_SEPNR,
152 .mask = IPIC_SEMSR,
153 .prio = IPIC_SMPRR_A,
154 .force = IPIC_SEFCR,
155 .bit = 1,
156 .prio_mask = 5,
158 [18] = {
159 .ack = IPIC_SEPNR,
160 .mask = IPIC_SEMSR,
161 .prio = IPIC_SMPRR_A,
162 .force = IPIC_SEFCR,
163 .bit = 2,
164 .prio_mask = 6,
166 [19] = {
167 .ack = IPIC_SEPNR,
168 .mask = IPIC_SEMSR,
169 .prio = IPIC_SMPRR_A,
170 .force = IPIC_SEFCR,
171 .bit = 3,
172 .prio_mask = 7,
174 [20] = {
175 .ack = IPIC_SEPNR,
176 .mask = IPIC_SEMSR,
177 .prio = IPIC_SMPRR_B,
178 .force = IPIC_SEFCR,
179 .bit = 4,
180 .prio_mask = 4,
182 [21] = {
183 .ack = IPIC_SEPNR,
184 .mask = IPIC_SEMSR,
185 .prio = IPIC_SMPRR_B,
186 .force = IPIC_SEFCR,
187 .bit = 5,
188 .prio_mask = 5,
190 [22] = {
191 .ack = IPIC_SEPNR,
192 .mask = IPIC_SEMSR,
193 .prio = IPIC_SMPRR_B,
194 .force = IPIC_SEFCR,
195 .bit = 6,
196 .prio_mask = 6,
198 [23] = {
199 .ack = IPIC_SEPNR,
200 .mask = IPIC_SEMSR,
201 .prio = IPIC_SMPRR_B,
202 .force = IPIC_SEFCR,
203 .bit = 7,
204 .prio_mask = 7,
206 [32] = {
207 .mask = IPIC_SIMSR_H,
208 .prio = IPIC_SIPRR_A,
209 .force = IPIC_SIFCR_H,
210 .bit = 0,
211 .prio_mask = 0,
213 [33] = {
214 .mask = IPIC_SIMSR_H,
215 .prio = IPIC_SIPRR_A,
216 .force = IPIC_SIFCR_H,
217 .bit = 1,
218 .prio_mask = 1,
220 [34] = {
221 .mask = IPIC_SIMSR_H,
222 .prio = IPIC_SIPRR_A,
223 .force = IPIC_SIFCR_H,
224 .bit = 2,
225 .prio_mask = 2,
227 [35] = {
228 .mask = IPIC_SIMSR_H,
229 .prio = IPIC_SIPRR_A,
230 .force = IPIC_SIFCR_H,
231 .bit = 3,
232 .prio_mask = 3,
234 [36] = {
235 .mask = IPIC_SIMSR_H,
236 .prio = IPIC_SIPRR_A,
237 .force = IPIC_SIFCR_H,
238 .bit = 4,
239 .prio_mask = 4,
241 [37] = {
242 .mask = IPIC_SIMSR_H,
243 .prio = IPIC_SIPRR_A,
244 .force = IPIC_SIFCR_H,
245 .bit = 5,
246 .prio_mask = 5,
248 [38] = {
249 .mask = IPIC_SIMSR_H,
250 .prio = IPIC_SIPRR_A,
251 .force = IPIC_SIFCR_H,
252 .bit = 6,
253 .prio_mask = 6,
255 [39] = {
256 .mask = IPIC_SIMSR_H,
257 .prio = IPIC_SIPRR_A,
258 .force = IPIC_SIFCR_H,
259 .bit = 7,
260 .prio_mask = 7,
262 [40] = {
263 .mask = IPIC_SIMSR_H,
264 .prio = IPIC_SIPRR_B,
265 .force = IPIC_SIFCR_H,
266 .bit = 8,
267 .prio_mask = 0,
269 [41] = {
270 .mask = IPIC_SIMSR_H,
271 .prio = IPIC_SIPRR_B,
272 .force = IPIC_SIFCR_H,
273 .bit = 9,
274 .prio_mask = 1,
276 [42] = {
277 .mask = IPIC_SIMSR_H,
278 .prio = IPIC_SIPRR_B,
279 .force = IPIC_SIFCR_H,
280 .bit = 10,
281 .prio_mask = 2,
283 [43] = {
284 .mask = IPIC_SIMSR_H,
285 .prio = IPIC_SIPRR_B,
286 .force = IPIC_SIFCR_H,
287 .bit = 11,
288 .prio_mask = 3,
290 [44] = {
291 .mask = IPIC_SIMSR_H,
292 .prio = IPIC_SIPRR_B,
293 .force = IPIC_SIFCR_H,
294 .bit = 12,
295 .prio_mask = 4,
297 [45] = {
298 .mask = IPIC_SIMSR_H,
299 .prio = IPIC_SIPRR_B,
300 .force = IPIC_SIFCR_H,
301 .bit = 13,
302 .prio_mask = 5,
304 [46] = {
305 .mask = IPIC_SIMSR_H,
306 .prio = IPIC_SIPRR_B,
307 .force = IPIC_SIFCR_H,
308 .bit = 14,
309 .prio_mask = 6,
311 [47] = {
312 .mask = IPIC_SIMSR_H,
313 .prio = IPIC_SIPRR_B,
314 .force = IPIC_SIFCR_H,
315 .bit = 15,
316 .prio_mask = 7,
318 [48] = {
319 .mask = IPIC_SEMSR,
320 .prio = IPIC_SMPRR_A,
321 .force = IPIC_SEFCR,
322 .bit = 0,
323 .prio_mask = 4,
325 [64] = {
326 .mask = IPIC_SIMSR_L,
327 .prio = IPIC_SMPRR_A,
328 .force = IPIC_SIFCR_L,
329 .bit = 0,
330 .prio_mask = 0,
332 [65] = {
333 .mask = IPIC_SIMSR_L,
334 .prio = IPIC_SMPRR_A,
335 .force = IPIC_SIFCR_L,
336 .bit = 1,
337 .prio_mask = 1,
339 [66] = {
340 .mask = IPIC_SIMSR_L,
341 .prio = IPIC_SMPRR_A,
342 .force = IPIC_SIFCR_L,
343 .bit = 2,
344 .prio_mask = 2,
346 [67] = {
347 .mask = IPIC_SIMSR_L,
348 .prio = IPIC_SMPRR_A,
349 .force = IPIC_SIFCR_L,
350 .bit = 3,
351 .prio_mask = 3,
353 [68] = {
354 .mask = IPIC_SIMSR_L,
355 .prio = IPIC_SMPRR_B,
356 .force = IPIC_SIFCR_L,
357 .bit = 4,
358 .prio_mask = 0,
360 [69] = {
361 .mask = IPIC_SIMSR_L,
362 .prio = IPIC_SMPRR_B,
363 .force = IPIC_SIFCR_L,
364 .bit = 5,
365 .prio_mask = 1,
367 [70] = {
368 .mask = IPIC_SIMSR_L,
369 .prio = IPIC_SMPRR_B,
370 .force = IPIC_SIFCR_L,
371 .bit = 6,
372 .prio_mask = 2,
374 [71] = {
375 .mask = IPIC_SIMSR_L,
376 .prio = IPIC_SMPRR_B,
377 .force = IPIC_SIFCR_L,
378 .bit = 7,
379 .prio_mask = 3,
381 [72] = {
382 .mask = IPIC_SIMSR_L,
383 .prio = 0,
384 .force = IPIC_SIFCR_L,
385 .bit = 8,
387 [73] = {
388 .mask = IPIC_SIMSR_L,
389 .prio = 0,
390 .force = IPIC_SIFCR_L,
391 .bit = 9,
393 [74] = {
394 .mask = IPIC_SIMSR_L,
395 .prio = 0,
396 .force = IPIC_SIFCR_L,
397 .bit = 10,
399 [75] = {
400 .mask = IPIC_SIMSR_L,
401 .prio = 0,
402 .force = IPIC_SIFCR_L,
403 .bit = 11,
405 [76] = {
406 .mask = IPIC_SIMSR_L,
407 .prio = 0,
408 .force = IPIC_SIFCR_L,
409 .bit = 12,
411 [77] = {
412 .mask = IPIC_SIMSR_L,
413 .prio = 0,
414 .force = IPIC_SIFCR_L,
415 .bit = 13,
417 [78] = {
418 .mask = IPIC_SIMSR_L,
419 .prio = 0,
420 .force = IPIC_SIFCR_L,
421 .bit = 14,
423 [79] = {
424 .mask = IPIC_SIMSR_L,
425 .prio = 0,
426 .force = IPIC_SIFCR_L,
427 .bit = 15,
429 [80] = {
430 .mask = IPIC_SIMSR_L,
431 .prio = 0,
432 .force = IPIC_SIFCR_L,
433 .bit = 16,
435 [81] = {
436 .mask = IPIC_SIMSR_L,
437 .prio = 0,
438 .force = IPIC_SIFCR_L,
439 .bit = 17,
441 [82] = {
442 .mask = IPIC_SIMSR_L,
443 .prio = 0,
444 .force = IPIC_SIFCR_L,
445 .bit = 18,
447 [83] = {
448 .mask = IPIC_SIMSR_L,
449 .prio = 0,
450 .force = IPIC_SIFCR_L,
451 .bit = 19,
453 [84] = {
454 .mask = IPIC_SIMSR_L,
455 .prio = 0,
456 .force = IPIC_SIFCR_L,
457 .bit = 20,
459 [85] = {
460 .mask = IPIC_SIMSR_L,
461 .prio = 0,
462 .force = IPIC_SIFCR_L,
463 .bit = 21,
465 [86] = {
466 .mask = IPIC_SIMSR_L,
467 .prio = 0,
468 .force = IPIC_SIFCR_L,
469 .bit = 22,
471 [87] = {
472 .mask = IPIC_SIMSR_L,
473 .prio = 0,
474 .force = IPIC_SIFCR_L,
475 .bit = 23,
477 [88] = {
478 .mask = IPIC_SIMSR_L,
479 .prio = 0,
480 .force = IPIC_SIFCR_L,
481 .bit = 24,
483 [89] = {
484 .mask = IPIC_SIMSR_L,
485 .prio = 0,
486 .force = IPIC_SIFCR_L,
487 .bit = 25,
489 [90] = {
490 .mask = IPIC_SIMSR_L,
491 .prio = 0,
492 .force = IPIC_SIFCR_L,
493 .bit = 26,
495 [91] = {
496 .mask = IPIC_SIMSR_L,
497 .prio = 0,
498 .force = IPIC_SIFCR_L,
499 .bit = 27,
501 [94] = {
502 .mask = IPIC_SIMSR_L,
503 .prio = 0,
504 .force = IPIC_SIFCR_L,
505 .bit = 30,
509 static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg)
511 return in_be32(base + (reg >> 2));
514 static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32 value)
516 out_be32(base + (reg >> 2), value);
519 static inline struct ipic * ipic_from_irq(unsigned int virq)
521 return primary_ipic;
524 static void ipic_unmask_irq(struct irq_data *d)
526 struct ipic *ipic = ipic_from_irq(d->irq);
527 unsigned int src = irqd_to_hwirq(d);
528 unsigned long flags;
529 u32 temp;
531 raw_spin_lock_irqsave(&ipic_lock, flags);
533 temp = ipic_read(ipic->regs, ipic_info[src].mask);
534 temp |= (1 << (31 - ipic_info[src].bit));
535 ipic_write(ipic->regs, ipic_info[src].mask, temp);
537 raw_spin_unlock_irqrestore(&ipic_lock, flags);
540 static void ipic_mask_irq(struct irq_data *d)
542 struct ipic *ipic = ipic_from_irq(d->irq);
543 unsigned int src = irqd_to_hwirq(d);
544 unsigned long flags;
545 u32 temp;
547 raw_spin_lock_irqsave(&ipic_lock, flags);
549 temp = ipic_read(ipic->regs, ipic_info[src].mask);
550 temp &= ~(1 << (31 - ipic_info[src].bit));
551 ipic_write(ipic->regs, ipic_info[src].mask, temp);
553 /* mb() can't guarantee that masking is finished. But it does finish
554 * for nearly all cases. */
555 mb();
557 raw_spin_unlock_irqrestore(&ipic_lock, flags);
560 static void ipic_ack_irq(struct irq_data *d)
562 struct ipic *ipic = ipic_from_irq(d->irq);
563 unsigned int src = irqd_to_hwirq(d);
564 unsigned long flags;
565 u32 temp;
567 raw_spin_lock_irqsave(&ipic_lock, flags);
569 temp = 1 << (31 - ipic_info[src].bit);
570 ipic_write(ipic->regs, ipic_info[src].ack, temp);
572 /* mb() can't guarantee that ack is finished. But it does finish
573 * for nearly all cases. */
574 mb();
576 raw_spin_unlock_irqrestore(&ipic_lock, flags);
579 static void ipic_mask_irq_and_ack(struct irq_data *d)
581 struct ipic *ipic = ipic_from_irq(d->irq);
582 unsigned int src = irqd_to_hwirq(d);
583 unsigned long flags;
584 u32 temp;
586 raw_spin_lock_irqsave(&ipic_lock, flags);
588 temp = ipic_read(ipic->regs, ipic_info[src].mask);
589 temp &= ~(1 << (31 - ipic_info[src].bit));
590 ipic_write(ipic->regs, ipic_info[src].mask, temp);
592 temp = 1 << (31 - ipic_info[src].bit);
593 ipic_write(ipic->regs, ipic_info[src].ack, temp);
595 /* mb() can't guarantee that ack is finished. But it does finish
596 * for nearly all cases. */
597 mb();
599 raw_spin_unlock_irqrestore(&ipic_lock, flags);
602 static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)
604 struct ipic *ipic = ipic_from_irq(d->irq);
605 unsigned int src = irqd_to_hwirq(d);
606 unsigned int vold, vnew, edibit;
608 if (flow_type == IRQ_TYPE_NONE)
609 flow_type = IRQ_TYPE_LEVEL_LOW;
611 /* ipic supports only low assertion and high-to-low change senses
613 if (!(flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))) {
614 printk(KERN_ERR "ipic: sense type 0x%x not supported\n",
615 flow_type);
616 return -EINVAL;
618 /* ipic supports only edge mode on external interrupts */
619 if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !ipic_info[src].ack) {
620 printk(KERN_ERR "ipic: edge sense not supported on internal "
621 "interrupts\n");
622 return -EINVAL;
626 irqd_set_trigger_type(d, flow_type);
627 if (flow_type & IRQ_TYPE_LEVEL_LOW) {
628 __irq_set_handler_locked(d->irq, handle_level_irq);
629 d->chip = &ipic_level_irq_chip;
630 } else {
631 __irq_set_handler_locked(d->irq, handle_edge_irq);
632 d->chip = &ipic_edge_irq_chip;
635 /* only EXT IRQ senses are programmable on ipic
636 * internal IRQ senses are LEVEL_LOW
638 if (src == IPIC_IRQ_EXT0)
639 edibit = 15;
640 else
641 if (src >= IPIC_IRQ_EXT1 && src <= IPIC_IRQ_EXT7)
642 edibit = (14 - (src - IPIC_IRQ_EXT1));
643 else
644 return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL;
646 vold = ipic_read(ipic->regs, IPIC_SECNR);
647 if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) {
648 vnew = vold | (1 << edibit);
649 } else {
650 vnew = vold & ~(1 << edibit);
652 if (vold != vnew)
653 ipic_write(ipic->regs, IPIC_SECNR, vnew);
654 return IRQ_SET_MASK_OK_NOCOPY;
657 /* level interrupts and edge interrupts have different ack operations */
658 static struct irq_chip ipic_level_irq_chip = {
659 .name = "IPIC",
660 .irq_unmask = ipic_unmask_irq,
661 .irq_mask = ipic_mask_irq,
662 .irq_mask_ack = ipic_mask_irq,
663 .irq_set_type = ipic_set_irq_type,
666 static struct irq_chip ipic_edge_irq_chip = {
667 .name = "IPIC",
668 .irq_unmask = ipic_unmask_irq,
669 .irq_mask = ipic_mask_irq,
670 .irq_mask_ack = ipic_mask_irq_and_ack,
671 .irq_ack = ipic_ack_irq,
672 .irq_set_type = ipic_set_irq_type,
675 static int ipic_host_match(struct irq_host *h, struct device_node *node)
677 /* Exact match, unless ipic node is NULL */
678 return h->of_node == NULL || h->of_node == node;
681 static int ipic_host_map(struct irq_host *h, unsigned int virq,
682 irq_hw_number_t hw)
684 struct ipic *ipic = h->host_data;
686 irq_set_chip_data(virq, ipic);
687 irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq);
689 /* Set default irq type */
690 irq_set_irq_type(virq, IRQ_TYPE_NONE);
692 return 0;
695 static int ipic_host_xlate(struct irq_host *h, struct device_node *ct,
696 const u32 *intspec, unsigned int intsize,
697 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
700 /* interrupt sense values coming from the device tree equal either
701 * LEVEL_LOW (low assertion) or EDGE_FALLING (high-to-low change)
703 *out_hwirq = intspec[0];
704 if (intsize > 1)
705 *out_flags = intspec[1];
706 else
707 *out_flags = IRQ_TYPE_NONE;
708 return 0;
711 static struct irq_host_ops ipic_host_ops = {
712 .match = ipic_host_match,
713 .map = ipic_host_map,
714 .xlate = ipic_host_xlate,
717 struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
719 struct ipic *ipic;
720 struct resource res;
721 u32 temp = 0, ret;
723 ret = of_address_to_resource(node, 0, &res);
724 if (ret)
725 return NULL;
727 ipic = kzalloc(sizeof(*ipic), GFP_KERNEL);
728 if (ipic == NULL)
729 return NULL;
731 ipic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR,
732 NR_IPIC_INTS,
733 &ipic_host_ops, 0);
734 if (ipic->irqhost == NULL) {
735 kfree(ipic);
736 return NULL;
739 ipic->regs = ioremap(res.start, resource_size(&res));
741 ipic->irqhost->host_data = ipic;
743 /* init hw */
744 ipic_write(ipic->regs, IPIC_SICNR, 0x0);
746 /* default priority scheme is grouped. If spread mode is required
747 * configure SICFR accordingly */
748 if (flags & IPIC_SPREADMODE_GRP_A)
749 temp |= SICFR_IPSA;
750 if (flags & IPIC_SPREADMODE_GRP_B)
751 temp |= SICFR_IPSB;
752 if (flags & IPIC_SPREADMODE_GRP_C)
753 temp |= SICFR_IPSC;
754 if (flags & IPIC_SPREADMODE_GRP_D)
755 temp |= SICFR_IPSD;
756 if (flags & IPIC_SPREADMODE_MIX_A)
757 temp |= SICFR_MPSA;
758 if (flags & IPIC_SPREADMODE_MIX_B)
759 temp |= SICFR_MPSB;
761 ipic_write(ipic->regs, IPIC_SICFR, temp);
763 /* handle MCP route */
764 temp = 0;
765 if (flags & IPIC_DISABLE_MCP_OUT)
766 temp = SERCR_MCPR;
767 ipic_write(ipic->regs, IPIC_SERCR, temp);
769 /* handle routing of IRQ0 to MCP */
770 temp = ipic_read(ipic->regs, IPIC_SEMSR);
772 if (flags & IPIC_IRQ0_MCP)
773 temp |= SEMSR_SIRQ0;
774 else
775 temp &= ~SEMSR_SIRQ0;
777 ipic_write(ipic->regs, IPIC_SEMSR, temp);
779 primary_ipic = ipic;
780 irq_set_default_host(primary_ipic->irqhost);
782 ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
783 ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
785 printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS,
786 primary_ipic->regs);
788 return ipic;
791 int ipic_set_priority(unsigned int virq, unsigned int priority)
793 struct ipic *ipic = ipic_from_irq(virq);
794 unsigned int src = virq_to_hw(virq);
795 u32 temp;
797 if (priority > 7)
798 return -EINVAL;
799 if (src > 127)
800 return -EINVAL;
801 if (ipic_info[src].prio == 0)
802 return -EINVAL;
804 temp = ipic_read(ipic->regs, ipic_info[src].prio);
806 if (priority < 4) {
807 temp &= ~(0x7 << (20 + (3 - priority) * 3));
808 temp |= ipic_info[src].prio_mask << (20 + (3 - priority) * 3);
809 } else {
810 temp &= ~(0x7 << (4 + (7 - priority) * 3));
811 temp |= ipic_info[src].prio_mask << (4 + (7 - priority) * 3);
814 ipic_write(ipic->regs, ipic_info[src].prio, temp);
816 return 0;
819 void ipic_set_highest_priority(unsigned int virq)
821 struct ipic *ipic = ipic_from_irq(virq);
822 unsigned int src = virq_to_hw(virq);
823 u32 temp;
825 temp = ipic_read(ipic->regs, IPIC_SICFR);
827 /* clear and set HPI */
828 temp &= 0x7f000000;
829 temp |= (src & 0x7f) << 24;
831 ipic_write(ipic->regs, IPIC_SICFR, temp);
834 void ipic_set_default_priority(void)
836 ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT);
837 ipic_write(primary_ipic->regs, IPIC_SIPRR_B, IPIC_PRIORITY_DEFAULT);
838 ipic_write(primary_ipic->regs, IPIC_SIPRR_C, IPIC_PRIORITY_DEFAULT);
839 ipic_write(primary_ipic->regs, IPIC_SIPRR_D, IPIC_PRIORITY_DEFAULT);
840 ipic_write(primary_ipic->regs, IPIC_SMPRR_A, IPIC_PRIORITY_DEFAULT);
841 ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_PRIORITY_DEFAULT);
844 void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq)
846 struct ipic *ipic = primary_ipic;
847 u32 temp;
849 temp = ipic_read(ipic->regs, IPIC_SERMR);
850 temp |= (1 << (31 - mcp_irq));
851 ipic_write(ipic->regs, IPIC_SERMR, temp);
854 void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq)
856 struct ipic *ipic = primary_ipic;
857 u32 temp;
859 temp = ipic_read(ipic->regs, IPIC_SERMR);
860 temp &= (1 << (31 - mcp_irq));
861 ipic_write(ipic->regs, IPIC_SERMR, temp);
864 u32 ipic_get_mcp_status(void)
866 return ipic_read(primary_ipic->regs, IPIC_SERMR);
869 void ipic_clear_mcp_status(u32 mask)
871 ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
874 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
875 unsigned int ipic_get_irq(void)
877 int irq;
879 BUG_ON(primary_ipic == NULL);
881 #define IPIC_SIVCR_VECTOR_MASK 0x7f
882 irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & IPIC_SIVCR_VECTOR_MASK;
884 if (irq == 0) /* 0 --> no irq is pending */
885 return NO_IRQ;
887 return irq_linear_revmap(primary_ipic->irqhost, irq);
890 #ifdef CONFIG_SUSPEND
891 static struct {
892 u32 sicfr;
893 u32 siprr[2];
894 u32 simsr[2];
895 u32 sicnr;
896 u32 smprr[2];
897 u32 semsr;
898 u32 secnr;
899 u32 sermr;
900 u32 sercr;
901 } ipic_saved_state;
903 static int ipic_suspend(void)
905 struct ipic *ipic = primary_ipic;
907 ipic_saved_state.sicfr = ipic_read(ipic->regs, IPIC_SICFR);
908 ipic_saved_state.siprr[0] = ipic_read(ipic->regs, IPIC_SIPRR_A);
909 ipic_saved_state.siprr[1] = ipic_read(ipic->regs, IPIC_SIPRR_D);
910 ipic_saved_state.simsr[0] = ipic_read(ipic->regs, IPIC_SIMSR_H);
911 ipic_saved_state.simsr[1] = ipic_read(ipic->regs, IPIC_SIMSR_L);
912 ipic_saved_state.sicnr = ipic_read(ipic->regs, IPIC_SICNR);
913 ipic_saved_state.smprr[0] = ipic_read(ipic->regs, IPIC_SMPRR_A);
914 ipic_saved_state.smprr[1] = ipic_read(ipic->regs, IPIC_SMPRR_B);
915 ipic_saved_state.semsr = ipic_read(ipic->regs, IPIC_SEMSR);
916 ipic_saved_state.secnr = ipic_read(ipic->regs, IPIC_SECNR);
917 ipic_saved_state.sermr = ipic_read(ipic->regs, IPIC_SERMR);
918 ipic_saved_state.sercr = ipic_read(ipic->regs, IPIC_SERCR);
920 if (fsl_deep_sleep()) {
921 /* In deep sleep, make sure there can be no
922 * pending interrupts, as this can cause
923 * problems on 831x.
925 ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
926 ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
927 ipic_write(ipic->regs, IPIC_SEMSR, 0);
928 ipic_write(ipic->regs, IPIC_SERMR, 0);
931 return 0;
934 static void ipic_resume(void)
936 struct ipic *ipic = primary_ipic;
938 ipic_write(ipic->regs, IPIC_SICFR, ipic_saved_state.sicfr);
939 ipic_write(ipic->regs, IPIC_SIPRR_A, ipic_saved_state.siprr[0]);
940 ipic_write(ipic->regs, IPIC_SIPRR_D, ipic_saved_state.siprr[1]);
941 ipic_write(ipic->regs, IPIC_SIMSR_H, ipic_saved_state.simsr[0]);
942 ipic_write(ipic->regs, IPIC_SIMSR_L, ipic_saved_state.simsr[1]);
943 ipic_write(ipic->regs, IPIC_SICNR, ipic_saved_state.sicnr);
944 ipic_write(ipic->regs, IPIC_SMPRR_A, ipic_saved_state.smprr[0]);
945 ipic_write(ipic->regs, IPIC_SMPRR_B, ipic_saved_state.smprr[1]);
946 ipic_write(ipic->regs, IPIC_SEMSR, ipic_saved_state.semsr);
947 ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr);
948 ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr);
949 ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr);
951 #else
952 #define ipic_suspend NULL
953 #define ipic_resume NULL
954 #endif
956 static struct syscore_ops ipic_syscore_ops = {
957 .suspend = ipic_suspend,
958 .resume = ipic_resume,
961 static int __init init_ipic_syscore(void)
963 if (!primary_ipic || !primary_ipic->regs)
964 return -ENODEV;
966 printk(KERN_DEBUG "Registering ipic system core operations\n");
967 register_syscore_ops(&ipic_syscore_ops);
969 return 0;
972 subsys_initcall(init_ipic_syscore);