treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / powerpc / sysdev / ipic.c
blob7638a50a7c389a182453c9b568e818bb8817a353
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * arch/powerpc/sysdev/ipic.c
5 * IPIC routines implementations.
7 * Copyright 2005 Freescale Semiconductor, Inc.
8 */
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/reboot.h>
13 #include <linux/slab.h>
14 #include <linux/stddef.h>
15 #include <linux/sched.h>
16 #include <linux/signal.h>
17 #include <linux/syscore_ops.h>
18 #include <linux/device.h>
19 #include <linux/spinlock.h>
20 #include <linux/fsl_devices.h>
21 #include <asm/irq.h>
22 #include <asm/io.h>
23 #include <asm/prom.h>
24 #include <asm/ipic.h>
26 #include "ipic.h"
28 static struct ipic * primary_ipic;
29 static struct irq_chip ipic_level_irq_chip, ipic_edge_irq_chip;
30 static DEFINE_RAW_SPINLOCK(ipic_lock);
32 static struct ipic_info ipic_info[] = {
33 [1] = {
34 .mask = IPIC_SIMSR_H,
35 .prio = IPIC_SIPRR_C,
36 .force = IPIC_SIFCR_H,
37 .bit = 16,
38 .prio_mask = 0,
40 [2] = {
41 .mask = IPIC_SIMSR_H,
42 .prio = IPIC_SIPRR_C,
43 .force = IPIC_SIFCR_H,
44 .bit = 17,
45 .prio_mask = 1,
47 [3] = {
48 .mask = IPIC_SIMSR_H,
49 .prio = IPIC_SIPRR_C,
50 .force = IPIC_SIFCR_H,
51 .bit = 18,
52 .prio_mask = 2,
54 [4] = {
55 .mask = IPIC_SIMSR_H,
56 .prio = IPIC_SIPRR_C,
57 .force = IPIC_SIFCR_H,
58 .bit = 19,
59 .prio_mask = 3,
61 [5] = {
62 .mask = IPIC_SIMSR_H,
63 .prio = IPIC_SIPRR_C,
64 .force = IPIC_SIFCR_H,
65 .bit = 20,
66 .prio_mask = 4,
68 [6] = {
69 .mask = IPIC_SIMSR_H,
70 .prio = IPIC_SIPRR_C,
71 .force = IPIC_SIFCR_H,
72 .bit = 21,
73 .prio_mask = 5,
75 [7] = {
76 .mask = IPIC_SIMSR_H,
77 .prio = IPIC_SIPRR_C,
78 .force = IPIC_SIFCR_H,
79 .bit = 22,
80 .prio_mask = 6,
82 [8] = {
83 .mask = IPIC_SIMSR_H,
84 .prio = IPIC_SIPRR_C,
85 .force = IPIC_SIFCR_H,
86 .bit = 23,
87 .prio_mask = 7,
89 [9] = {
90 .mask = IPIC_SIMSR_H,
91 .prio = IPIC_SIPRR_D,
92 .force = IPIC_SIFCR_H,
93 .bit = 24,
94 .prio_mask = 0,
96 [10] = {
97 .mask = IPIC_SIMSR_H,
98 .prio = IPIC_SIPRR_D,
99 .force = IPIC_SIFCR_H,
100 .bit = 25,
101 .prio_mask = 1,
103 [11] = {
104 .mask = IPIC_SIMSR_H,
105 .prio = IPIC_SIPRR_D,
106 .force = IPIC_SIFCR_H,
107 .bit = 26,
108 .prio_mask = 2,
110 [12] = {
111 .mask = IPIC_SIMSR_H,
112 .prio = IPIC_SIPRR_D,
113 .force = IPIC_SIFCR_H,
114 .bit = 27,
115 .prio_mask = 3,
117 [13] = {
118 .mask = IPIC_SIMSR_H,
119 .prio = IPIC_SIPRR_D,
120 .force = IPIC_SIFCR_H,
121 .bit = 28,
122 .prio_mask = 4,
124 [14] = {
125 .mask = IPIC_SIMSR_H,
126 .prio = IPIC_SIPRR_D,
127 .force = IPIC_SIFCR_H,
128 .bit = 29,
129 .prio_mask = 5,
131 [15] = {
132 .mask = IPIC_SIMSR_H,
133 .prio = IPIC_SIPRR_D,
134 .force = IPIC_SIFCR_H,
135 .bit = 30,
136 .prio_mask = 6,
138 [16] = {
139 .mask = IPIC_SIMSR_H,
140 .prio = IPIC_SIPRR_D,
141 .force = IPIC_SIFCR_H,
142 .bit = 31,
143 .prio_mask = 7,
145 [17] = {
146 .ack = IPIC_SEPNR,
147 .mask = IPIC_SEMSR,
148 .prio = IPIC_SMPRR_A,
149 .force = IPIC_SEFCR,
150 .bit = 1,
151 .prio_mask = 5,
153 [18] = {
154 .ack = IPIC_SEPNR,
155 .mask = IPIC_SEMSR,
156 .prio = IPIC_SMPRR_A,
157 .force = IPIC_SEFCR,
158 .bit = 2,
159 .prio_mask = 6,
161 [19] = {
162 .ack = IPIC_SEPNR,
163 .mask = IPIC_SEMSR,
164 .prio = IPIC_SMPRR_A,
165 .force = IPIC_SEFCR,
166 .bit = 3,
167 .prio_mask = 7,
169 [20] = {
170 .ack = IPIC_SEPNR,
171 .mask = IPIC_SEMSR,
172 .prio = IPIC_SMPRR_B,
173 .force = IPIC_SEFCR,
174 .bit = 4,
175 .prio_mask = 4,
177 [21] = {
178 .ack = IPIC_SEPNR,
179 .mask = IPIC_SEMSR,
180 .prio = IPIC_SMPRR_B,
181 .force = IPIC_SEFCR,
182 .bit = 5,
183 .prio_mask = 5,
185 [22] = {
186 .ack = IPIC_SEPNR,
187 .mask = IPIC_SEMSR,
188 .prio = IPIC_SMPRR_B,
189 .force = IPIC_SEFCR,
190 .bit = 6,
191 .prio_mask = 6,
193 [23] = {
194 .ack = IPIC_SEPNR,
195 .mask = IPIC_SEMSR,
196 .prio = IPIC_SMPRR_B,
197 .force = IPIC_SEFCR,
198 .bit = 7,
199 .prio_mask = 7,
201 [32] = {
202 .mask = IPIC_SIMSR_H,
203 .prio = IPIC_SIPRR_A,
204 .force = IPIC_SIFCR_H,
205 .bit = 0,
206 .prio_mask = 0,
208 [33] = {
209 .mask = IPIC_SIMSR_H,
210 .prio = IPIC_SIPRR_A,
211 .force = IPIC_SIFCR_H,
212 .bit = 1,
213 .prio_mask = 1,
215 [34] = {
216 .mask = IPIC_SIMSR_H,
217 .prio = IPIC_SIPRR_A,
218 .force = IPIC_SIFCR_H,
219 .bit = 2,
220 .prio_mask = 2,
222 [35] = {
223 .mask = IPIC_SIMSR_H,
224 .prio = IPIC_SIPRR_A,
225 .force = IPIC_SIFCR_H,
226 .bit = 3,
227 .prio_mask = 3,
229 [36] = {
230 .mask = IPIC_SIMSR_H,
231 .prio = IPIC_SIPRR_A,
232 .force = IPIC_SIFCR_H,
233 .bit = 4,
234 .prio_mask = 4,
236 [37] = {
237 .mask = IPIC_SIMSR_H,
238 .prio = IPIC_SIPRR_A,
239 .force = IPIC_SIFCR_H,
240 .bit = 5,
241 .prio_mask = 5,
243 [38] = {
244 .mask = IPIC_SIMSR_H,
245 .prio = IPIC_SIPRR_A,
246 .force = IPIC_SIFCR_H,
247 .bit = 6,
248 .prio_mask = 6,
250 [39] = {
251 .mask = IPIC_SIMSR_H,
252 .prio = IPIC_SIPRR_A,
253 .force = IPIC_SIFCR_H,
254 .bit = 7,
255 .prio_mask = 7,
257 [40] = {
258 .mask = IPIC_SIMSR_H,
259 .prio = IPIC_SIPRR_B,
260 .force = IPIC_SIFCR_H,
261 .bit = 8,
262 .prio_mask = 0,
264 [41] = {
265 .mask = IPIC_SIMSR_H,
266 .prio = IPIC_SIPRR_B,
267 .force = IPIC_SIFCR_H,
268 .bit = 9,
269 .prio_mask = 1,
271 [42] = {
272 .mask = IPIC_SIMSR_H,
273 .prio = IPIC_SIPRR_B,
274 .force = IPIC_SIFCR_H,
275 .bit = 10,
276 .prio_mask = 2,
278 [43] = {
279 .mask = IPIC_SIMSR_H,
280 .prio = IPIC_SIPRR_B,
281 .force = IPIC_SIFCR_H,
282 .bit = 11,
283 .prio_mask = 3,
285 [44] = {
286 .mask = IPIC_SIMSR_H,
287 .prio = IPIC_SIPRR_B,
288 .force = IPIC_SIFCR_H,
289 .bit = 12,
290 .prio_mask = 4,
292 [45] = {
293 .mask = IPIC_SIMSR_H,
294 .prio = IPIC_SIPRR_B,
295 .force = IPIC_SIFCR_H,
296 .bit = 13,
297 .prio_mask = 5,
299 [46] = {
300 .mask = IPIC_SIMSR_H,
301 .prio = IPIC_SIPRR_B,
302 .force = IPIC_SIFCR_H,
303 .bit = 14,
304 .prio_mask = 6,
306 [47] = {
307 .mask = IPIC_SIMSR_H,
308 .prio = IPIC_SIPRR_B,
309 .force = IPIC_SIFCR_H,
310 .bit = 15,
311 .prio_mask = 7,
313 [48] = {
314 .ack = IPIC_SEPNR,
315 .mask = IPIC_SEMSR,
316 .prio = IPIC_SMPRR_A,
317 .force = IPIC_SEFCR,
318 .bit = 0,
319 .prio_mask = 4,
321 [64] = {
322 .mask = IPIC_SIMSR_L,
323 .prio = IPIC_SMPRR_A,
324 .force = IPIC_SIFCR_L,
325 .bit = 0,
326 .prio_mask = 0,
328 [65] = {
329 .mask = IPIC_SIMSR_L,
330 .prio = IPIC_SMPRR_A,
331 .force = IPIC_SIFCR_L,
332 .bit = 1,
333 .prio_mask = 1,
335 [66] = {
336 .mask = IPIC_SIMSR_L,
337 .prio = IPIC_SMPRR_A,
338 .force = IPIC_SIFCR_L,
339 .bit = 2,
340 .prio_mask = 2,
342 [67] = {
343 .mask = IPIC_SIMSR_L,
344 .prio = IPIC_SMPRR_A,
345 .force = IPIC_SIFCR_L,
346 .bit = 3,
347 .prio_mask = 3,
349 [68] = {
350 .mask = IPIC_SIMSR_L,
351 .prio = IPIC_SMPRR_B,
352 .force = IPIC_SIFCR_L,
353 .bit = 4,
354 .prio_mask = 0,
356 [69] = {
357 .mask = IPIC_SIMSR_L,
358 .prio = IPIC_SMPRR_B,
359 .force = IPIC_SIFCR_L,
360 .bit = 5,
361 .prio_mask = 1,
363 [70] = {
364 .mask = IPIC_SIMSR_L,
365 .prio = IPIC_SMPRR_B,
366 .force = IPIC_SIFCR_L,
367 .bit = 6,
368 .prio_mask = 2,
370 [71] = {
371 .mask = IPIC_SIMSR_L,
372 .prio = IPIC_SMPRR_B,
373 .force = IPIC_SIFCR_L,
374 .bit = 7,
375 .prio_mask = 3,
377 [72] = {
378 .mask = IPIC_SIMSR_L,
379 .prio = 0,
380 .force = IPIC_SIFCR_L,
381 .bit = 8,
383 [73] = {
384 .mask = IPIC_SIMSR_L,
385 .prio = 0,
386 .force = IPIC_SIFCR_L,
387 .bit = 9,
389 [74] = {
390 .mask = IPIC_SIMSR_L,
391 .prio = 0,
392 .force = IPIC_SIFCR_L,
393 .bit = 10,
395 [75] = {
396 .mask = IPIC_SIMSR_L,
397 .prio = 0,
398 .force = IPIC_SIFCR_L,
399 .bit = 11,
401 [76] = {
402 .mask = IPIC_SIMSR_L,
403 .prio = 0,
404 .force = IPIC_SIFCR_L,
405 .bit = 12,
407 [77] = {
408 .mask = IPIC_SIMSR_L,
409 .prio = 0,
410 .force = IPIC_SIFCR_L,
411 .bit = 13,
413 [78] = {
414 .mask = IPIC_SIMSR_L,
415 .prio = 0,
416 .force = IPIC_SIFCR_L,
417 .bit = 14,
419 [79] = {
420 .mask = IPIC_SIMSR_L,
421 .prio = 0,
422 .force = IPIC_SIFCR_L,
423 .bit = 15,
425 [80] = {
426 .mask = IPIC_SIMSR_L,
427 .prio = 0,
428 .force = IPIC_SIFCR_L,
429 .bit = 16,
431 [81] = {
432 .mask = IPIC_SIMSR_L,
433 .prio = 0,
434 .force = IPIC_SIFCR_L,
435 .bit = 17,
437 [82] = {
438 .mask = IPIC_SIMSR_L,
439 .prio = 0,
440 .force = IPIC_SIFCR_L,
441 .bit = 18,
443 [83] = {
444 .mask = IPIC_SIMSR_L,
445 .prio = 0,
446 .force = IPIC_SIFCR_L,
447 .bit = 19,
449 [84] = {
450 .mask = IPIC_SIMSR_L,
451 .prio = 0,
452 .force = IPIC_SIFCR_L,
453 .bit = 20,
455 [85] = {
456 .mask = IPIC_SIMSR_L,
457 .prio = 0,
458 .force = IPIC_SIFCR_L,
459 .bit = 21,
461 [86] = {
462 .mask = IPIC_SIMSR_L,
463 .prio = 0,
464 .force = IPIC_SIFCR_L,
465 .bit = 22,
467 [87] = {
468 .mask = IPIC_SIMSR_L,
469 .prio = 0,
470 .force = IPIC_SIFCR_L,
471 .bit = 23,
473 [88] = {
474 .mask = IPIC_SIMSR_L,
475 .prio = 0,
476 .force = IPIC_SIFCR_L,
477 .bit = 24,
479 [89] = {
480 .mask = IPIC_SIMSR_L,
481 .prio = 0,
482 .force = IPIC_SIFCR_L,
483 .bit = 25,
485 [90] = {
486 .mask = IPIC_SIMSR_L,
487 .prio = 0,
488 .force = IPIC_SIFCR_L,
489 .bit = 26,
491 [91] = {
492 .mask = IPIC_SIMSR_L,
493 .prio = 0,
494 .force = IPIC_SIFCR_L,
495 .bit = 27,
497 [94] = {
498 .mask = IPIC_SIMSR_L,
499 .prio = 0,
500 .force = IPIC_SIFCR_L,
501 .bit = 30,
505 static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg)
507 return in_be32(base + (reg >> 2));
510 static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32 value)
512 out_be32(base + (reg >> 2), value);
515 static inline struct ipic * ipic_from_irq(unsigned int virq)
517 return primary_ipic;
520 static void ipic_unmask_irq(struct irq_data *d)
522 struct ipic *ipic = ipic_from_irq(d->irq);
523 unsigned int src = irqd_to_hwirq(d);
524 unsigned long flags;
525 u32 temp;
527 raw_spin_lock_irqsave(&ipic_lock, flags);
529 temp = ipic_read(ipic->regs, ipic_info[src].mask);
530 temp |= (1 << (31 - ipic_info[src].bit));
531 ipic_write(ipic->regs, ipic_info[src].mask, temp);
533 raw_spin_unlock_irqrestore(&ipic_lock, flags);
536 static void ipic_mask_irq(struct irq_data *d)
538 struct ipic *ipic = ipic_from_irq(d->irq);
539 unsigned int src = irqd_to_hwirq(d);
540 unsigned long flags;
541 u32 temp;
543 raw_spin_lock_irqsave(&ipic_lock, flags);
545 temp = ipic_read(ipic->regs, ipic_info[src].mask);
546 temp &= ~(1 << (31 - ipic_info[src].bit));
547 ipic_write(ipic->regs, ipic_info[src].mask, temp);
549 /* mb() can't guarantee that masking is finished. But it does finish
550 * for nearly all cases. */
551 mb();
553 raw_spin_unlock_irqrestore(&ipic_lock, flags);
556 static void ipic_ack_irq(struct irq_data *d)
558 struct ipic *ipic = ipic_from_irq(d->irq);
559 unsigned int src = irqd_to_hwirq(d);
560 unsigned long flags;
561 u32 temp;
563 raw_spin_lock_irqsave(&ipic_lock, flags);
565 temp = 1 << (31 - ipic_info[src].bit);
566 ipic_write(ipic->regs, ipic_info[src].ack, temp);
568 /* mb() can't guarantee that ack is finished. But it does finish
569 * for nearly all cases. */
570 mb();
572 raw_spin_unlock_irqrestore(&ipic_lock, flags);
575 static void ipic_mask_irq_and_ack(struct irq_data *d)
577 struct ipic *ipic = ipic_from_irq(d->irq);
578 unsigned int src = irqd_to_hwirq(d);
579 unsigned long flags;
580 u32 temp;
582 raw_spin_lock_irqsave(&ipic_lock, flags);
584 temp = ipic_read(ipic->regs, ipic_info[src].mask);
585 temp &= ~(1 << (31 - ipic_info[src].bit));
586 ipic_write(ipic->regs, ipic_info[src].mask, temp);
588 temp = 1 << (31 - ipic_info[src].bit);
589 ipic_write(ipic->regs, ipic_info[src].ack, temp);
591 /* mb() can't guarantee that ack is finished. But it does finish
592 * for nearly all cases. */
593 mb();
595 raw_spin_unlock_irqrestore(&ipic_lock, flags);
598 static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)
600 struct ipic *ipic = ipic_from_irq(d->irq);
601 unsigned int src = irqd_to_hwirq(d);
602 unsigned int vold, vnew, edibit;
604 if (flow_type == IRQ_TYPE_NONE)
605 flow_type = IRQ_TYPE_LEVEL_LOW;
607 /* ipic supports only low assertion and high-to-low change senses
609 if (!(flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))) {
610 printk(KERN_ERR "ipic: sense type 0x%x not supported\n",
611 flow_type);
612 return -EINVAL;
614 /* ipic supports only edge mode on external interrupts */
615 if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !ipic_info[src].ack) {
616 printk(KERN_ERR "ipic: edge sense not supported on internal "
617 "interrupts\n");
618 return -EINVAL;
622 irqd_set_trigger_type(d, flow_type);
623 if (flow_type & IRQ_TYPE_LEVEL_LOW) {
624 irq_set_handler_locked(d, handle_level_irq);
625 d->chip = &ipic_level_irq_chip;
626 } else {
627 irq_set_handler_locked(d, handle_edge_irq);
628 d->chip = &ipic_edge_irq_chip;
631 /* only EXT IRQ senses are programmable on ipic
632 * internal IRQ senses are LEVEL_LOW
634 if (src == IPIC_IRQ_EXT0)
635 edibit = 15;
636 else
637 if (src >= IPIC_IRQ_EXT1 && src <= IPIC_IRQ_EXT7)
638 edibit = (14 - (src - IPIC_IRQ_EXT1));
639 else
640 return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL;
642 vold = ipic_read(ipic->regs, IPIC_SECNR);
643 if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) {
644 vnew = vold | (1 << edibit);
645 } else {
646 vnew = vold & ~(1 << edibit);
648 if (vold != vnew)
649 ipic_write(ipic->regs, IPIC_SECNR, vnew);
650 return IRQ_SET_MASK_OK_NOCOPY;
653 /* level interrupts and edge interrupts have different ack operations */
654 static struct irq_chip ipic_level_irq_chip = {
655 .name = "IPIC",
656 .irq_unmask = ipic_unmask_irq,
657 .irq_mask = ipic_mask_irq,
658 .irq_mask_ack = ipic_mask_irq,
659 .irq_set_type = ipic_set_irq_type,
662 static struct irq_chip ipic_edge_irq_chip = {
663 .name = "IPIC",
664 .irq_unmask = ipic_unmask_irq,
665 .irq_mask = ipic_mask_irq,
666 .irq_mask_ack = ipic_mask_irq_and_ack,
667 .irq_ack = ipic_ack_irq,
668 .irq_set_type = ipic_set_irq_type,
671 static int ipic_host_match(struct irq_domain *h, struct device_node *node,
672 enum irq_domain_bus_token bus_token)
674 /* Exact match, unless ipic node is NULL */
675 struct device_node *of_node = irq_domain_get_of_node(h);
676 return of_node == NULL || of_node == node;
679 static int ipic_host_map(struct irq_domain *h, unsigned int virq,
680 irq_hw_number_t hw)
682 struct ipic *ipic = h->host_data;
684 irq_set_chip_data(virq, ipic);
685 irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq);
687 /* Set default irq type */
688 irq_set_irq_type(virq, IRQ_TYPE_NONE);
690 return 0;
693 static const struct irq_domain_ops ipic_host_ops = {
694 .match = ipic_host_match,
695 .map = ipic_host_map,
696 .xlate = irq_domain_xlate_onetwocell,
699 struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
701 struct ipic *ipic;
702 struct resource res;
703 u32 temp = 0, ret;
705 ret = of_address_to_resource(node, 0, &res);
706 if (ret)
707 return NULL;
709 ipic = kzalloc(sizeof(*ipic), GFP_KERNEL);
710 if (ipic == NULL)
711 return NULL;
713 ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS,
714 &ipic_host_ops, ipic);
715 if (ipic->irqhost == NULL) {
716 kfree(ipic);
717 return NULL;
720 ipic->regs = ioremap(res.start, resource_size(&res));
722 /* init hw */
723 ipic_write(ipic->regs, IPIC_SICNR, 0x0);
725 /* default priority scheme is grouped. If spread mode is required
726 * configure SICFR accordingly */
727 if (flags & IPIC_SPREADMODE_GRP_A)
728 temp |= SICFR_IPSA;
729 if (flags & IPIC_SPREADMODE_GRP_B)
730 temp |= SICFR_IPSB;
731 if (flags & IPIC_SPREADMODE_GRP_C)
732 temp |= SICFR_IPSC;
733 if (flags & IPIC_SPREADMODE_GRP_D)
734 temp |= SICFR_IPSD;
735 if (flags & IPIC_SPREADMODE_MIX_A)
736 temp |= SICFR_MPSA;
737 if (flags & IPIC_SPREADMODE_MIX_B)
738 temp |= SICFR_MPSB;
740 ipic_write(ipic->regs, IPIC_SICFR, temp);
742 /* handle MCP route */
743 temp = 0;
744 if (flags & IPIC_DISABLE_MCP_OUT)
745 temp = SERCR_MCPR;
746 ipic_write(ipic->regs, IPIC_SERCR, temp);
748 /* handle routing of IRQ0 to MCP */
749 temp = ipic_read(ipic->regs, IPIC_SEMSR);
751 if (flags & IPIC_IRQ0_MCP)
752 temp |= SEMSR_SIRQ0;
753 else
754 temp &= ~SEMSR_SIRQ0;
756 ipic_write(ipic->regs, IPIC_SEMSR, temp);
758 primary_ipic = ipic;
759 irq_set_default_host(primary_ipic->irqhost);
761 ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
762 ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
764 printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS,
765 primary_ipic->regs);
767 return ipic;
770 void ipic_set_default_priority(void)
772 ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT);
773 ipic_write(primary_ipic->regs, IPIC_SIPRR_B, IPIC_PRIORITY_DEFAULT);
774 ipic_write(primary_ipic->regs, IPIC_SIPRR_C, IPIC_PRIORITY_DEFAULT);
775 ipic_write(primary_ipic->regs, IPIC_SIPRR_D, IPIC_PRIORITY_DEFAULT);
776 ipic_write(primary_ipic->regs, IPIC_SMPRR_A, IPIC_PRIORITY_DEFAULT);
777 ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_PRIORITY_DEFAULT);
780 u32 ipic_get_mcp_status(void)
782 return primary_ipic ? ipic_read(primary_ipic->regs, IPIC_SERSR) : 0;
785 void ipic_clear_mcp_status(u32 mask)
787 ipic_write(primary_ipic->regs, IPIC_SERSR, mask);
790 /* Return an interrupt vector or 0 if no interrupt is pending. */
791 unsigned int ipic_get_irq(void)
793 int irq;
795 BUG_ON(primary_ipic == NULL);
797 #define IPIC_SIVCR_VECTOR_MASK 0x7f
798 irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & IPIC_SIVCR_VECTOR_MASK;
800 if (irq == 0) /* 0 --> no irq is pending */
801 return 0;
803 return irq_linear_revmap(primary_ipic->irqhost, irq);
806 #ifdef CONFIG_SUSPEND
807 static struct {
808 u32 sicfr;
809 u32 siprr[2];
810 u32 simsr[2];
811 u32 sicnr;
812 u32 smprr[2];
813 u32 semsr;
814 u32 secnr;
815 u32 sermr;
816 u32 sercr;
817 } ipic_saved_state;
819 static int ipic_suspend(void)
821 struct ipic *ipic = primary_ipic;
823 ipic_saved_state.sicfr = ipic_read(ipic->regs, IPIC_SICFR);
824 ipic_saved_state.siprr[0] = ipic_read(ipic->regs, IPIC_SIPRR_A);
825 ipic_saved_state.siprr[1] = ipic_read(ipic->regs, IPIC_SIPRR_D);
826 ipic_saved_state.simsr[0] = ipic_read(ipic->regs, IPIC_SIMSR_H);
827 ipic_saved_state.simsr[1] = ipic_read(ipic->regs, IPIC_SIMSR_L);
828 ipic_saved_state.sicnr = ipic_read(ipic->regs, IPIC_SICNR);
829 ipic_saved_state.smprr[0] = ipic_read(ipic->regs, IPIC_SMPRR_A);
830 ipic_saved_state.smprr[1] = ipic_read(ipic->regs, IPIC_SMPRR_B);
831 ipic_saved_state.semsr = ipic_read(ipic->regs, IPIC_SEMSR);
832 ipic_saved_state.secnr = ipic_read(ipic->regs, IPIC_SECNR);
833 ipic_saved_state.sermr = ipic_read(ipic->regs, IPIC_SERMR);
834 ipic_saved_state.sercr = ipic_read(ipic->regs, IPIC_SERCR);
836 if (fsl_deep_sleep()) {
837 /* In deep sleep, make sure there can be no
838 * pending interrupts, as this can cause
839 * problems on 831x.
841 ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
842 ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
843 ipic_write(ipic->regs, IPIC_SEMSR, 0);
844 ipic_write(ipic->regs, IPIC_SERMR, 0);
847 return 0;
850 static void ipic_resume(void)
852 struct ipic *ipic = primary_ipic;
854 ipic_write(ipic->regs, IPIC_SICFR, ipic_saved_state.sicfr);
855 ipic_write(ipic->regs, IPIC_SIPRR_A, ipic_saved_state.siprr[0]);
856 ipic_write(ipic->regs, IPIC_SIPRR_D, ipic_saved_state.siprr[1]);
857 ipic_write(ipic->regs, IPIC_SIMSR_H, ipic_saved_state.simsr[0]);
858 ipic_write(ipic->regs, IPIC_SIMSR_L, ipic_saved_state.simsr[1]);
859 ipic_write(ipic->regs, IPIC_SICNR, ipic_saved_state.sicnr);
860 ipic_write(ipic->regs, IPIC_SMPRR_A, ipic_saved_state.smprr[0]);
861 ipic_write(ipic->regs, IPIC_SMPRR_B, ipic_saved_state.smprr[1]);
862 ipic_write(ipic->regs, IPIC_SEMSR, ipic_saved_state.semsr);
863 ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr);
864 ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr);
865 ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr);
867 #else
868 #define ipic_suspend NULL
869 #define ipic_resume NULL
870 #endif
872 static struct syscore_ops ipic_syscore_ops = {
873 .suspend = ipic_suspend,
874 .resume = ipic_resume,
877 static int __init init_ipic_syscore(void)
879 if (!primary_ipic || !primary_ipic->regs)
880 return -ENODEV;
882 printk(KERN_DEBUG "Registering ipic system core operations\n");
883 register_syscore_ops(&ipic_syscore_ops);
885 return 0;
888 subsys_initcall(init_ipic_syscore);