1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * arch/powerpc/sysdev/ipic.c
5 * IPIC routines implementations.
7 * Copyright 2005 Freescale Semiconductor, Inc.
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/reboot.h>
13 #include <linux/slab.h>
14 #include <linux/stddef.h>
15 #include <linux/sched.h>
16 #include <linux/signal.h>
17 #include <linux/syscore_ops.h>
18 #include <linux/device.h>
19 #include <linux/spinlock.h>
20 #include <linux/fsl_devices.h>
28 static struct ipic
* primary_ipic
;
29 static struct irq_chip ipic_level_irq_chip
, ipic_edge_irq_chip
;
30 static DEFINE_RAW_SPINLOCK(ipic_lock
);
32 static struct ipic_info ipic_info
[] = {
36 .force
= IPIC_SIFCR_H
,
43 .force
= IPIC_SIFCR_H
,
50 .force
= IPIC_SIFCR_H
,
57 .force
= IPIC_SIFCR_H
,
64 .force
= IPIC_SIFCR_H
,
71 .force
= IPIC_SIFCR_H
,
78 .force
= IPIC_SIFCR_H
,
85 .force
= IPIC_SIFCR_H
,
92 .force
= IPIC_SIFCR_H
,
99 .force
= IPIC_SIFCR_H
,
104 .mask
= IPIC_SIMSR_H
,
105 .prio
= IPIC_SIPRR_D
,
106 .force
= IPIC_SIFCR_H
,
111 .mask
= IPIC_SIMSR_H
,
112 .prio
= IPIC_SIPRR_D
,
113 .force
= IPIC_SIFCR_H
,
118 .mask
= IPIC_SIMSR_H
,
119 .prio
= IPIC_SIPRR_D
,
120 .force
= IPIC_SIFCR_H
,
125 .mask
= IPIC_SIMSR_H
,
126 .prio
= IPIC_SIPRR_D
,
127 .force
= IPIC_SIFCR_H
,
132 .mask
= IPIC_SIMSR_H
,
133 .prio
= IPIC_SIPRR_D
,
134 .force
= IPIC_SIFCR_H
,
139 .mask
= IPIC_SIMSR_H
,
140 .prio
= IPIC_SIPRR_D
,
141 .force
= IPIC_SIFCR_H
,
148 .prio
= IPIC_SMPRR_A
,
156 .prio
= IPIC_SMPRR_A
,
164 .prio
= IPIC_SMPRR_A
,
172 .prio
= IPIC_SMPRR_B
,
180 .prio
= IPIC_SMPRR_B
,
188 .prio
= IPIC_SMPRR_B
,
196 .prio
= IPIC_SMPRR_B
,
202 .mask
= IPIC_SIMSR_H
,
203 .prio
= IPIC_SIPRR_A
,
204 .force
= IPIC_SIFCR_H
,
209 .mask
= IPIC_SIMSR_H
,
210 .prio
= IPIC_SIPRR_A
,
211 .force
= IPIC_SIFCR_H
,
216 .mask
= IPIC_SIMSR_H
,
217 .prio
= IPIC_SIPRR_A
,
218 .force
= IPIC_SIFCR_H
,
223 .mask
= IPIC_SIMSR_H
,
224 .prio
= IPIC_SIPRR_A
,
225 .force
= IPIC_SIFCR_H
,
230 .mask
= IPIC_SIMSR_H
,
231 .prio
= IPIC_SIPRR_A
,
232 .force
= IPIC_SIFCR_H
,
237 .mask
= IPIC_SIMSR_H
,
238 .prio
= IPIC_SIPRR_A
,
239 .force
= IPIC_SIFCR_H
,
244 .mask
= IPIC_SIMSR_H
,
245 .prio
= IPIC_SIPRR_A
,
246 .force
= IPIC_SIFCR_H
,
251 .mask
= IPIC_SIMSR_H
,
252 .prio
= IPIC_SIPRR_A
,
253 .force
= IPIC_SIFCR_H
,
258 .mask
= IPIC_SIMSR_H
,
259 .prio
= IPIC_SIPRR_B
,
260 .force
= IPIC_SIFCR_H
,
265 .mask
= IPIC_SIMSR_H
,
266 .prio
= IPIC_SIPRR_B
,
267 .force
= IPIC_SIFCR_H
,
272 .mask
= IPIC_SIMSR_H
,
273 .prio
= IPIC_SIPRR_B
,
274 .force
= IPIC_SIFCR_H
,
279 .mask
= IPIC_SIMSR_H
,
280 .prio
= IPIC_SIPRR_B
,
281 .force
= IPIC_SIFCR_H
,
286 .mask
= IPIC_SIMSR_H
,
287 .prio
= IPIC_SIPRR_B
,
288 .force
= IPIC_SIFCR_H
,
293 .mask
= IPIC_SIMSR_H
,
294 .prio
= IPIC_SIPRR_B
,
295 .force
= IPIC_SIFCR_H
,
300 .mask
= IPIC_SIMSR_H
,
301 .prio
= IPIC_SIPRR_B
,
302 .force
= IPIC_SIFCR_H
,
307 .mask
= IPIC_SIMSR_H
,
308 .prio
= IPIC_SIPRR_B
,
309 .force
= IPIC_SIFCR_H
,
316 .prio
= IPIC_SMPRR_A
,
322 .mask
= IPIC_SIMSR_L
,
323 .prio
= IPIC_SMPRR_A
,
324 .force
= IPIC_SIFCR_L
,
329 .mask
= IPIC_SIMSR_L
,
330 .prio
= IPIC_SMPRR_A
,
331 .force
= IPIC_SIFCR_L
,
336 .mask
= IPIC_SIMSR_L
,
337 .prio
= IPIC_SMPRR_A
,
338 .force
= IPIC_SIFCR_L
,
343 .mask
= IPIC_SIMSR_L
,
344 .prio
= IPIC_SMPRR_A
,
345 .force
= IPIC_SIFCR_L
,
350 .mask
= IPIC_SIMSR_L
,
351 .prio
= IPIC_SMPRR_B
,
352 .force
= IPIC_SIFCR_L
,
357 .mask
= IPIC_SIMSR_L
,
358 .prio
= IPIC_SMPRR_B
,
359 .force
= IPIC_SIFCR_L
,
364 .mask
= IPIC_SIMSR_L
,
365 .prio
= IPIC_SMPRR_B
,
366 .force
= IPIC_SIFCR_L
,
371 .mask
= IPIC_SIMSR_L
,
372 .prio
= IPIC_SMPRR_B
,
373 .force
= IPIC_SIFCR_L
,
378 .mask
= IPIC_SIMSR_L
,
380 .force
= IPIC_SIFCR_L
,
384 .mask
= IPIC_SIMSR_L
,
386 .force
= IPIC_SIFCR_L
,
390 .mask
= IPIC_SIMSR_L
,
392 .force
= IPIC_SIFCR_L
,
396 .mask
= IPIC_SIMSR_L
,
398 .force
= IPIC_SIFCR_L
,
402 .mask
= IPIC_SIMSR_L
,
404 .force
= IPIC_SIFCR_L
,
408 .mask
= IPIC_SIMSR_L
,
410 .force
= IPIC_SIFCR_L
,
414 .mask
= IPIC_SIMSR_L
,
416 .force
= IPIC_SIFCR_L
,
420 .mask
= IPIC_SIMSR_L
,
422 .force
= IPIC_SIFCR_L
,
426 .mask
= IPIC_SIMSR_L
,
428 .force
= IPIC_SIFCR_L
,
432 .mask
= IPIC_SIMSR_L
,
434 .force
= IPIC_SIFCR_L
,
438 .mask
= IPIC_SIMSR_L
,
440 .force
= IPIC_SIFCR_L
,
444 .mask
= IPIC_SIMSR_L
,
446 .force
= IPIC_SIFCR_L
,
450 .mask
= IPIC_SIMSR_L
,
452 .force
= IPIC_SIFCR_L
,
456 .mask
= IPIC_SIMSR_L
,
458 .force
= IPIC_SIFCR_L
,
462 .mask
= IPIC_SIMSR_L
,
464 .force
= IPIC_SIFCR_L
,
468 .mask
= IPIC_SIMSR_L
,
470 .force
= IPIC_SIFCR_L
,
474 .mask
= IPIC_SIMSR_L
,
476 .force
= IPIC_SIFCR_L
,
480 .mask
= IPIC_SIMSR_L
,
482 .force
= IPIC_SIFCR_L
,
486 .mask
= IPIC_SIMSR_L
,
488 .force
= IPIC_SIFCR_L
,
492 .mask
= IPIC_SIMSR_L
,
494 .force
= IPIC_SIFCR_L
,
498 .mask
= IPIC_SIMSR_L
,
500 .force
= IPIC_SIFCR_L
,
505 static inline u32
ipic_read(volatile u32 __iomem
*base
, unsigned int reg
)
507 return in_be32(base
+ (reg
>> 2));
510 static inline void ipic_write(volatile u32 __iomem
*base
, unsigned int reg
, u32 value
)
512 out_be32(base
+ (reg
>> 2), value
);
515 static inline struct ipic
* ipic_from_irq(unsigned int virq
)
520 static void ipic_unmask_irq(struct irq_data
*d
)
522 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
523 unsigned int src
= irqd_to_hwirq(d
);
527 raw_spin_lock_irqsave(&ipic_lock
, flags
);
529 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].mask
);
530 temp
|= (1 << (31 - ipic_info
[src
].bit
));
531 ipic_write(ipic
->regs
, ipic_info
[src
].mask
, temp
);
533 raw_spin_unlock_irqrestore(&ipic_lock
, flags
);
536 static void ipic_mask_irq(struct irq_data
*d
)
538 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
539 unsigned int src
= irqd_to_hwirq(d
);
543 raw_spin_lock_irqsave(&ipic_lock
, flags
);
545 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].mask
);
546 temp
&= ~(1 << (31 - ipic_info
[src
].bit
));
547 ipic_write(ipic
->regs
, ipic_info
[src
].mask
, temp
);
549 /* mb() can't guarantee that masking is finished. But it does finish
550 * for nearly all cases. */
553 raw_spin_unlock_irqrestore(&ipic_lock
, flags
);
556 static void ipic_ack_irq(struct irq_data
*d
)
558 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
559 unsigned int src
= irqd_to_hwirq(d
);
563 raw_spin_lock_irqsave(&ipic_lock
, flags
);
565 temp
= 1 << (31 - ipic_info
[src
].bit
);
566 ipic_write(ipic
->regs
, ipic_info
[src
].ack
, temp
);
568 /* mb() can't guarantee that ack is finished. But it does finish
569 * for nearly all cases. */
572 raw_spin_unlock_irqrestore(&ipic_lock
, flags
);
575 static void ipic_mask_irq_and_ack(struct irq_data
*d
)
577 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
578 unsigned int src
= irqd_to_hwirq(d
);
582 raw_spin_lock_irqsave(&ipic_lock
, flags
);
584 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].mask
);
585 temp
&= ~(1 << (31 - ipic_info
[src
].bit
));
586 ipic_write(ipic
->regs
, ipic_info
[src
].mask
, temp
);
588 temp
= 1 << (31 - ipic_info
[src
].bit
);
589 ipic_write(ipic
->regs
, ipic_info
[src
].ack
, temp
);
591 /* mb() can't guarantee that ack is finished. But it does finish
592 * for nearly all cases. */
595 raw_spin_unlock_irqrestore(&ipic_lock
, flags
);
598 static int ipic_set_irq_type(struct irq_data
*d
, unsigned int flow_type
)
600 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
601 unsigned int src
= irqd_to_hwirq(d
);
602 unsigned int vold
, vnew
, edibit
;
604 if (flow_type
== IRQ_TYPE_NONE
)
605 flow_type
= IRQ_TYPE_LEVEL_LOW
;
607 /* ipic supports only low assertion and high-to-low change senses
609 if (!(flow_type
& (IRQ_TYPE_LEVEL_LOW
| IRQ_TYPE_EDGE_FALLING
))) {
610 printk(KERN_ERR
"ipic: sense type 0x%x not supported\n",
614 /* ipic supports only edge mode on external interrupts */
615 if ((flow_type
& IRQ_TYPE_EDGE_FALLING
) && !ipic_info
[src
].ack
) {
616 printk(KERN_ERR
"ipic: edge sense not supported on internal "
622 irqd_set_trigger_type(d
, flow_type
);
623 if (flow_type
& IRQ_TYPE_LEVEL_LOW
) {
624 irq_set_handler_locked(d
, handle_level_irq
);
625 d
->chip
= &ipic_level_irq_chip
;
627 irq_set_handler_locked(d
, handle_edge_irq
);
628 d
->chip
= &ipic_edge_irq_chip
;
631 /* only EXT IRQ senses are programmable on ipic
632 * internal IRQ senses are LEVEL_LOW
634 if (src
== IPIC_IRQ_EXT0
)
637 if (src
>= IPIC_IRQ_EXT1
&& src
<= IPIC_IRQ_EXT7
)
638 edibit
= (14 - (src
- IPIC_IRQ_EXT1
));
640 return (flow_type
& IRQ_TYPE_LEVEL_LOW
) ? 0 : -EINVAL
;
642 vold
= ipic_read(ipic
->regs
, IPIC_SECNR
);
643 if ((flow_type
& IRQ_TYPE_SENSE_MASK
) == IRQ_TYPE_EDGE_FALLING
) {
644 vnew
= vold
| (1 << edibit
);
646 vnew
= vold
& ~(1 << edibit
);
649 ipic_write(ipic
->regs
, IPIC_SECNR
, vnew
);
650 return IRQ_SET_MASK_OK_NOCOPY
;
653 /* level interrupts and edge interrupts have different ack operations */
654 static struct irq_chip ipic_level_irq_chip
= {
656 .irq_unmask
= ipic_unmask_irq
,
657 .irq_mask
= ipic_mask_irq
,
658 .irq_mask_ack
= ipic_mask_irq
,
659 .irq_set_type
= ipic_set_irq_type
,
662 static struct irq_chip ipic_edge_irq_chip
= {
664 .irq_unmask
= ipic_unmask_irq
,
665 .irq_mask
= ipic_mask_irq
,
666 .irq_mask_ack
= ipic_mask_irq_and_ack
,
667 .irq_ack
= ipic_ack_irq
,
668 .irq_set_type
= ipic_set_irq_type
,
671 static int ipic_host_match(struct irq_domain
*h
, struct device_node
*node
,
672 enum irq_domain_bus_token bus_token
)
674 /* Exact match, unless ipic node is NULL */
675 struct device_node
*of_node
= irq_domain_get_of_node(h
);
676 return of_node
== NULL
|| of_node
== node
;
679 static int ipic_host_map(struct irq_domain
*h
, unsigned int virq
,
682 struct ipic
*ipic
= h
->host_data
;
684 irq_set_chip_data(virq
, ipic
);
685 irq_set_chip_and_handler(virq
, &ipic_level_irq_chip
, handle_level_irq
);
687 /* Set default irq type */
688 irq_set_irq_type(virq
, IRQ_TYPE_NONE
);
693 static const struct irq_domain_ops ipic_host_ops
= {
694 .match
= ipic_host_match
,
695 .map
= ipic_host_map
,
696 .xlate
= irq_domain_xlate_onetwocell
,
699 struct ipic
* __init
ipic_init(struct device_node
*node
, unsigned int flags
)
705 ret
= of_address_to_resource(node
, 0, &res
);
709 ipic
= kzalloc(sizeof(*ipic
), GFP_KERNEL
);
713 ipic
->irqhost
= irq_domain_add_linear(node
, NR_IPIC_INTS
,
714 &ipic_host_ops
, ipic
);
715 if (ipic
->irqhost
== NULL
) {
720 ipic
->regs
= ioremap(res
.start
, resource_size(&res
));
723 ipic_write(ipic
->regs
, IPIC_SICNR
, 0x0);
725 /* default priority scheme is grouped. If spread mode is required
726 * configure SICFR accordingly */
727 if (flags
& IPIC_SPREADMODE_GRP_A
)
729 if (flags
& IPIC_SPREADMODE_GRP_B
)
731 if (flags
& IPIC_SPREADMODE_GRP_C
)
733 if (flags
& IPIC_SPREADMODE_GRP_D
)
735 if (flags
& IPIC_SPREADMODE_MIX_A
)
737 if (flags
& IPIC_SPREADMODE_MIX_B
)
740 ipic_write(ipic
->regs
, IPIC_SICFR
, temp
);
742 /* handle MCP route */
744 if (flags
& IPIC_DISABLE_MCP_OUT
)
746 ipic_write(ipic
->regs
, IPIC_SERCR
, temp
);
748 /* handle routing of IRQ0 to MCP */
749 temp
= ipic_read(ipic
->regs
, IPIC_SEMSR
);
751 if (flags
& IPIC_IRQ0_MCP
)
754 temp
&= ~SEMSR_SIRQ0
;
756 ipic_write(ipic
->regs
, IPIC_SEMSR
, temp
);
759 irq_set_default_host(primary_ipic
->irqhost
);
761 ipic_write(ipic
->regs
, IPIC_SIMSR_H
, 0);
762 ipic_write(ipic
->regs
, IPIC_SIMSR_L
, 0);
764 printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS
,
770 void ipic_set_default_priority(void)
772 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_A
, IPIC_PRIORITY_DEFAULT
);
773 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_B
, IPIC_PRIORITY_DEFAULT
);
774 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_C
, IPIC_PRIORITY_DEFAULT
);
775 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_D
, IPIC_PRIORITY_DEFAULT
);
776 ipic_write(primary_ipic
->regs
, IPIC_SMPRR_A
, IPIC_PRIORITY_DEFAULT
);
777 ipic_write(primary_ipic
->regs
, IPIC_SMPRR_B
, IPIC_PRIORITY_DEFAULT
);
780 u32
ipic_get_mcp_status(void)
782 return primary_ipic
? ipic_read(primary_ipic
->regs
, IPIC_SERSR
) : 0;
785 void ipic_clear_mcp_status(u32 mask
)
787 ipic_write(primary_ipic
->regs
, IPIC_SERSR
, mask
);
790 /* Return an interrupt vector or 0 if no interrupt is pending. */
791 unsigned int ipic_get_irq(void)
795 BUG_ON(primary_ipic
== NULL
);
797 #define IPIC_SIVCR_VECTOR_MASK 0x7f
798 irq
= ipic_read(primary_ipic
->regs
, IPIC_SIVCR
) & IPIC_SIVCR_VECTOR_MASK
;
800 if (irq
== 0) /* 0 --> no irq is pending */
803 return irq_linear_revmap(primary_ipic
->irqhost
, irq
);
806 #ifdef CONFIG_SUSPEND
819 static int ipic_suspend(void)
821 struct ipic
*ipic
= primary_ipic
;
823 ipic_saved_state
.sicfr
= ipic_read(ipic
->regs
, IPIC_SICFR
);
824 ipic_saved_state
.siprr
[0] = ipic_read(ipic
->regs
, IPIC_SIPRR_A
);
825 ipic_saved_state
.siprr
[1] = ipic_read(ipic
->regs
, IPIC_SIPRR_D
);
826 ipic_saved_state
.simsr
[0] = ipic_read(ipic
->regs
, IPIC_SIMSR_H
);
827 ipic_saved_state
.simsr
[1] = ipic_read(ipic
->regs
, IPIC_SIMSR_L
);
828 ipic_saved_state
.sicnr
= ipic_read(ipic
->regs
, IPIC_SICNR
);
829 ipic_saved_state
.smprr
[0] = ipic_read(ipic
->regs
, IPIC_SMPRR_A
);
830 ipic_saved_state
.smprr
[1] = ipic_read(ipic
->regs
, IPIC_SMPRR_B
);
831 ipic_saved_state
.semsr
= ipic_read(ipic
->regs
, IPIC_SEMSR
);
832 ipic_saved_state
.secnr
= ipic_read(ipic
->regs
, IPIC_SECNR
);
833 ipic_saved_state
.sermr
= ipic_read(ipic
->regs
, IPIC_SERMR
);
834 ipic_saved_state
.sercr
= ipic_read(ipic
->regs
, IPIC_SERCR
);
836 if (fsl_deep_sleep()) {
837 /* In deep sleep, make sure there can be no
838 * pending interrupts, as this can cause
841 ipic_write(ipic
->regs
, IPIC_SIMSR_H
, 0);
842 ipic_write(ipic
->regs
, IPIC_SIMSR_L
, 0);
843 ipic_write(ipic
->regs
, IPIC_SEMSR
, 0);
844 ipic_write(ipic
->regs
, IPIC_SERMR
, 0);
850 static void ipic_resume(void)
852 struct ipic
*ipic
= primary_ipic
;
854 ipic_write(ipic
->regs
, IPIC_SICFR
, ipic_saved_state
.sicfr
);
855 ipic_write(ipic
->regs
, IPIC_SIPRR_A
, ipic_saved_state
.siprr
[0]);
856 ipic_write(ipic
->regs
, IPIC_SIPRR_D
, ipic_saved_state
.siprr
[1]);
857 ipic_write(ipic
->regs
, IPIC_SIMSR_H
, ipic_saved_state
.simsr
[0]);
858 ipic_write(ipic
->regs
, IPIC_SIMSR_L
, ipic_saved_state
.simsr
[1]);
859 ipic_write(ipic
->regs
, IPIC_SICNR
, ipic_saved_state
.sicnr
);
860 ipic_write(ipic
->regs
, IPIC_SMPRR_A
, ipic_saved_state
.smprr
[0]);
861 ipic_write(ipic
->regs
, IPIC_SMPRR_B
, ipic_saved_state
.smprr
[1]);
862 ipic_write(ipic
->regs
, IPIC_SEMSR
, ipic_saved_state
.semsr
);
863 ipic_write(ipic
->regs
, IPIC_SECNR
, ipic_saved_state
.secnr
);
864 ipic_write(ipic
->regs
, IPIC_SERMR
, ipic_saved_state
.sermr
);
865 ipic_write(ipic
->regs
, IPIC_SERCR
, ipic_saved_state
.sercr
);
868 #define ipic_suspend NULL
869 #define ipic_resume NULL
872 static struct syscore_ops ipic_syscore_ops
= {
873 .suspend
= ipic_suspend
,
874 .resume
= ipic_resume
,
877 static int __init
init_ipic_syscore(void)
879 if (!primary_ipic
|| !primary_ipic
->regs
)
882 printk(KERN_DEBUG
"Registering ipic system core operations\n");
883 register_syscore_ops(&ipic_syscore_ops
);
888 subsys_initcall(init_ipic_syscore
);