2 * arch/powerpc/sysdev/ipic.c
4 * IPIC routines implementations.
6 * Copyright 2005 Freescale Semiconductor, Inc.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/reboot.h>
17 #include <linux/slab.h>
18 #include <linux/stddef.h>
19 #include <linux/sched.h>
20 #include <linux/signal.h>
21 #include <linux/syscore_ops.h>
22 #include <linux/device.h>
23 #include <linux/bootmem.h>
24 #include <linux/spinlock.h>
25 #include <linux/fsl_devices.h>
33 static struct ipic
* primary_ipic
;
34 static struct irq_chip ipic_level_irq_chip
, ipic_edge_irq_chip
;
35 static DEFINE_RAW_SPINLOCK(ipic_lock
);
37 static struct ipic_info ipic_info
[] = {
41 .force
= IPIC_SIFCR_H
,
48 .force
= IPIC_SIFCR_H
,
55 .force
= IPIC_SIFCR_H
,
62 .force
= IPIC_SIFCR_H
,
69 .force
= IPIC_SIFCR_H
,
76 .force
= IPIC_SIFCR_H
,
83 .force
= IPIC_SIFCR_H
,
90 .force
= IPIC_SIFCR_H
,
97 .force
= IPIC_SIFCR_H
,
102 .mask
= IPIC_SIMSR_H
,
103 .prio
= IPIC_SIPRR_D
,
104 .force
= IPIC_SIFCR_H
,
109 .mask
= IPIC_SIMSR_H
,
110 .prio
= IPIC_SIPRR_D
,
111 .force
= IPIC_SIFCR_H
,
116 .mask
= IPIC_SIMSR_H
,
117 .prio
= IPIC_SIPRR_D
,
118 .force
= IPIC_SIFCR_H
,
123 .mask
= IPIC_SIMSR_H
,
124 .prio
= IPIC_SIPRR_D
,
125 .force
= IPIC_SIFCR_H
,
130 .mask
= IPIC_SIMSR_H
,
131 .prio
= IPIC_SIPRR_D
,
132 .force
= IPIC_SIFCR_H
,
137 .mask
= IPIC_SIMSR_H
,
138 .prio
= IPIC_SIPRR_D
,
139 .force
= IPIC_SIFCR_H
,
144 .mask
= IPIC_SIMSR_H
,
145 .prio
= IPIC_SIPRR_D
,
146 .force
= IPIC_SIFCR_H
,
153 .prio
= IPIC_SMPRR_A
,
161 .prio
= IPIC_SMPRR_A
,
169 .prio
= IPIC_SMPRR_A
,
177 .prio
= IPIC_SMPRR_B
,
185 .prio
= IPIC_SMPRR_B
,
193 .prio
= IPIC_SMPRR_B
,
201 .prio
= IPIC_SMPRR_B
,
207 .mask
= IPIC_SIMSR_H
,
208 .prio
= IPIC_SIPRR_A
,
209 .force
= IPIC_SIFCR_H
,
214 .mask
= IPIC_SIMSR_H
,
215 .prio
= IPIC_SIPRR_A
,
216 .force
= IPIC_SIFCR_H
,
221 .mask
= IPIC_SIMSR_H
,
222 .prio
= IPIC_SIPRR_A
,
223 .force
= IPIC_SIFCR_H
,
228 .mask
= IPIC_SIMSR_H
,
229 .prio
= IPIC_SIPRR_A
,
230 .force
= IPIC_SIFCR_H
,
235 .mask
= IPIC_SIMSR_H
,
236 .prio
= IPIC_SIPRR_A
,
237 .force
= IPIC_SIFCR_H
,
242 .mask
= IPIC_SIMSR_H
,
243 .prio
= IPIC_SIPRR_A
,
244 .force
= IPIC_SIFCR_H
,
249 .mask
= IPIC_SIMSR_H
,
250 .prio
= IPIC_SIPRR_A
,
251 .force
= IPIC_SIFCR_H
,
256 .mask
= IPIC_SIMSR_H
,
257 .prio
= IPIC_SIPRR_A
,
258 .force
= IPIC_SIFCR_H
,
263 .mask
= IPIC_SIMSR_H
,
264 .prio
= IPIC_SIPRR_B
,
265 .force
= IPIC_SIFCR_H
,
270 .mask
= IPIC_SIMSR_H
,
271 .prio
= IPIC_SIPRR_B
,
272 .force
= IPIC_SIFCR_H
,
277 .mask
= IPIC_SIMSR_H
,
278 .prio
= IPIC_SIPRR_B
,
279 .force
= IPIC_SIFCR_H
,
284 .mask
= IPIC_SIMSR_H
,
285 .prio
= IPIC_SIPRR_B
,
286 .force
= IPIC_SIFCR_H
,
291 .mask
= IPIC_SIMSR_H
,
292 .prio
= IPIC_SIPRR_B
,
293 .force
= IPIC_SIFCR_H
,
298 .mask
= IPIC_SIMSR_H
,
299 .prio
= IPIC_SIPRR_B
,
300 .force
= IPIC_SIFCR_H
,
305 .mask
= IPIC_SIMSR_H
,
306 .prio
= IPIC_SIPRR_B
,
307 .force
= IPIC_SIFCR_H
,
312 .mask
= IPIC_SIMSR_H
,
313 .prio
= IPIC_SIPRR_B
,
314 .force
= IPIC_SIFCR_H
,
320 .prio
= IPIC_SMPRR_A
,
326 .mask
= IPIC_SIMSR_L
,
327 .prio
= IPIC_SMPRR_A
,
328 .force
= IPIC_SIFCR_L
,
333 .mask
= IPIC_SIMSR_L
,
334 .prio
= IPIC_SMPRR_A
,
335 .force
= IPIC_SIFCR_L
,
340 .mask
= IPIC_SIMSR_L
,
341 .prio
= IPIC_SMPRR_A
,
342 .force
= IPIC_SIFCR_L
,
347 .mask
= IPIC_SIMSR_L
,
348 .prio
= IPIC_SMPRR_A
,
349 .force
= IPIC_SIFCR_L
,
354 .mask
= IPIC_SIMSR_L
,
355 .prio
= IPIC_SMPRR_B
,
356 .force
= IPIC_SIFCR_L
,
361 .mask
= IPIC_SIMSR_L
,
362 .prio
= IPIC_SMPRR_B
,
363 .force
= IPIC_SIFCR_L
,
368 .mask
= IPIC_SIMSR_L
,
369 .prio
= IPIC_SMPRR_B
,
370 .force
= IPIC_SIFCR_L
,
375 .mask
= IPIC_SIMSR_L
,
376 .prio
= IPIC_SMPRR_B
,
377 .force
= IPIC_SIFCR_L
,
382 .mask
= IPIC_SIMSR_L
,
384 .force
= IPIC_SIFCR_L
,
388 .mask
= IPIC_SIMSR_L
,
390 .force
= IPIC_SIFCR_L
,
394 .mask
= IPIC_SIMSR_L
,
396 .force
= IPIC_SIFCR_L
,
400 .mask
= IPIC_SIMSR_L
,
402 .force
= IPIC_SIFCR_L
,
406 .mask
= IPIC_SIMSR_L
,
408 .force
= IPIC_SIFCR_L
,
412 .mask
= IPIC_SIMSR_L
,
414 .force
= IPIC_SIFCR_L
,
418 .mask
= IPIC_SIMSR_L
,
420 .force
= IPIC_SIFCR_L
,
424 .mask
= IPIC_SIMSR_L
,
426 .force
= IPIC_SIFCR_L
,
430 .mask
= IPIC_SIMSR_L
,
432 .force
= IPIC_SIFCR_L
,
436 .mask
= IPIC_SIMSR_L
,
438 .force
= IPIC_SIFCR_L
,
442 .mask
= IPIC_SIMSR_L
,
444 .force
= IPIC_SIFCR_L
,
448 .mask
= IPIC_SIMSR_L
,
450 .force
= IPIC_SIFCR_L
,
454 .mask
= IPIC_SIMSR_L
,
456 .force
= IPIC_SIFCR_L
,
460 .mask
= IPIC_SIMSR_L
,
462 .force
= IPIC_SIFCR_L
,
466 .mask
= IPIC_SIMSR_L
,
468 .force
= IPIC_SIFCR_L
,
472 .mask
= IPIC_SIMSR_L
,
474 .force
= IPIC_SIFCR_L
,
478 .mask
= IPIC_SIMSR_L
,
480 .force
= IPIC_SIFCR_L
,
484 .mask
= IPIC_SIMSR_L
,
486 .force
= IPIC_SIFCR_L
,
490 .mask
= IPIC_SIMSR_L
,
492 .force
= IPIC_SIFCR_L
,
496 .mask
= IPIC_SIMSR_L
,
498 .force
= IPIC_SIFCR_L
,
502 .mask
= IPIC_SIMSR_L
,
504 .force
= IPIC_SIFCR_L
,
509 static inline u32
ipic_read(volatile u32 __iomem
*base
, unsigned int reg
)
511 return in_be32(base
+ (reg
>> 2));
514 static inline void ipic_write(volatile u32 __iomem
*base
, unsigned int reg
, u32 value
)
516 out_be32(base
+ (reg
>> 2), value
);
519 static inline struct ipic
* ipic_from_irq(unsigned int virq
)
524 static void ipic_unmask_irq(struct irq_data
*d
)
526 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
527 unsigned int src
= irqd_to_hwirq(d
);
531 raw_spin_lock_irqsave(&ipic_lock
, flags
);
533 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].mask
);
534 temp
|= (1 << (31 - ipic_info
[src
].bit
));
535 ipic_write(ipic
->regs
, ipic_info
[src
].mask
, temp
);
537 raw_spin_unlock_irqrestore(&ipic_lock
, flags
);
540 static void ipic_mask_irq(struct irq_data
*d
)
542 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
543 unsigned int src
= irqd_to_hwirq(d
);
547 raw_spin_lock_irqsave(&ipic_lock
, flags
);
549 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].mask
);
550 temp
&= ~(1 << (31 - ipic_info
[src
].bit
));
551 ipic_write(ipic
->regs
, ipic_info
[src
].mask
, temp
);
553 /* mb() can't guarantee that masking is finished. But it does finish
554 * for nearly all cases. */
557 raw_spin_unlock_irqrestore(&ipic_lock
, flags
);
560 static void ipic_ack_irq(struct irq_data
*d
)
562 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
563 unsigned int src
= irqd_to_hwirq(d
);
567 raw_spin_lock_irqsave(&ipic_lock
, flags
);
569 temp
= 1 << (31 - ipic_info
[src
].bit
);
570 ipic_write(ipic
->regs
, ipic_info
[src
].ack
, temp
);
572 /* mb() can't guarantee that ack is finished. But it does finish
573 * for nearly all cases. */
576 raw_spin_unlock_irqrestore(&ipic_lock
, flags
);
579 static void ipic_mask_irq_and_ack(struct irq_data
*d
)
581 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
582 unsigned int src
= irqd_to_hwirq(d
);
586 raw_spin_lock_irqsave(&ipic_lock
, flags
);
588 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].mask
);
589 temp
&= ~(1 << (31 - ipic_info
[src
].bit
));
590 ipic_write(ipic
->regs
, ipic_info
[src
].mask
, temp
);
592 temp
= 1 << (31 - ipic_info
[src
].bit
);
593 ipic_write(ipic
->regs
, ipic_info
[src
].ack
, temp
);
595 /* mb() can't guarantee that ack is finished. But it does finish
596 * for nearly all cases. */
599 raw_spin_unlock_irqrestore(&ipic_lock
, flags
);
602 static int ipic_set_irq_type(struct irq_data
*d
, unsigned int flow_type
)
604 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
605 unsigned int src
= irqd_to_hwirq(d
);
606 unsigned int vold
, vnew
, edibit
;
608 if (flow_type
== IRQ_TYPE_NONE
)
609 flow_type
= IRQ_TYPE_LEVEL_LOW
;
611 /* ipic supports only low assertion and high-to-low change senses
613 if (!(flow_type
& (IRQ_TYPE_LEVEL_LOW
| IRQ_TYPE_EDGE_FALLING
))) {
614 printk(KERN_ERR
"ipic: sense type 0x%x not supported\n",
618 /* ipic supports only edge mode on external interrupts */
619 if ((flow_type
& IRQ_TYPE_EDGE_FALLING
) && !ipic_info
[src
].ack
) {
620 printk(KERN_ERR
"ipic: edge sense not supported on internal "
626 irqd_set_trigger_type(d
, flow_type
);
627 if (flow_type
& IRQ_TYPE_LEVEL_LOW
) {
628 __irq_set_handler_locked(d
->irq
, handle_level_irq
);
629 d
->chip
= &ipic_level_irq_chip
;
631 __irq_set_handler_locked(d
->irq
, handle_edge_irq
);
632 d
->chip
= &ipic_edge_irq_chip
;
635 /* only EXT IRQ senses are programmable on ipic
636 * internal IRQ senses are LEVEL_LOW
638 if (src
== IPIC_IRQ_EXT0
)
641 if (src
>= IPIC_IRQ_EXT1
&& src
<= IPIC_IRQ_EXT7
)
642 edibit
= (14 - (src
- IPIC_IRQ_EXT1
));
644 return (flow_type
& IRQ_TYPE_LEVEL_LOW
) ? 0 : -EINVAL
;
646 vold
= ipic_read(ipic
->regs
, IPIC_SECNR
);
647 if ((flow_type
& IRQ_TYPE_SENSE_MASK
) == IRQ_TYPE_EDGE_FALLING
) {
648 vnew
= vold
| (1 << edibit
);
650 vnew
= vold
& ~(1 << edibit
);
653 ipic_write(ipic
->regs
, IPIC_SECNR
, vnew
);
654 return IRQ_SET_MASK_OK_NOCOPY
;
657 /* level interrupts and edge interrupts have different ack operations */
658 static struct irq_chip ipic_level_irq_chip
= {
660 .irq_unmask
= ipic_unmask_irq
,
661 .irq_mask
= ipic_mask_irq
,
662 .irq_mask_ack
= ipic_mask_irq
,
663 .irq_set_type
= ipic_set_irq_type
,
666 static struct irq_chip ipic_edge_irq_chip
= {
668 .irq_unmask
= ipic_unmask_irq
,
669 .irq_mask
= ipic_mask_irq
,
670 .irq_mask_ack
= ipic_mask_irq_and_ack
,
671 .irq_ack
= ipic_ack_irq
,
672 .irq_set_type
= ipic_set_irq_type
,
675 static int ipic_host_match(struct irq_host
*h
, struct device_node
*node
)
677 /* Exact match, unless ipic node is NULL */
678 return h
->of_node
== NULL
|| h
->of_node
== node
;
681 static int ipic_host_map(struct irq_host
*h
, unsigned int virq
,
684 struct ipic
*ipic
= h
->host_data
;
686 irq_set_chip_data(virq
, ipic
);
687 irq_set_chip_and_handler(virq
, &ipic_level_irq_chip
, handle_level_irq
);
689 /* Set default irq type */
690 irq_set_irq_type(virq
, IRQ_TYPE_NONE
);
695 static int ipic_host_xlate(struct irq_host
*h
, struct device_node
*ct
,
696 const u32
*intspec
, unsigned int intsize
,
697 irq_hw_number_t
*out_hwirq
, unsigned int *out_flags
)
700 /* interrupt sense values coming from the device tree equal either
701 * LEVEL_LOW (low assertion) or EDGE_FALLING (high-to-low change)
703 *out_hwirq
= intspec
[0];
705 *out_flags
= intspec
[1];
707 *out_flags
= IRQ_TYPE_NONE
;
711 static struct irq_host_ops ipic_host_ops
= {
712 .match
= ipic_host_match
,
713 .map
= ipic_host_map
,
714 .xlate
= ipic_host_xlate
,
717 struct ipic
* __init
ipic_init(struct device_node
*node
, unsigned int flags
)
723 ret
= of_address_to_resource(node
, 0, &res
);
727 ipic
= kzalloc(sizeof(*ipic
), GFP_KERNEL
);
731 ipic
->irqhost
= irq_alloc_host(node
, IRQ_HOST_MAP_LINEAR
,
734 if (ipic
->irqhost
== NULL
) {
739 ipic
->regs
= ioremap(res
.start
, resource_size(&res
));
741 ipic
->irqhost
->host_data
= ipic
;
744 ipic_write(ipic
->regs
, IPIC_SICNR
, 0x0);
746 /* default priority scheme is grouped. If spread mode is required
747 * configure SICFR accordingly */
748 if (flags
& IPIC_SPREADMODE_GRP_A
)
750 if (flags
& IPIC_SPREADMODE_GRP_B
)
752 if (flags
& IPIC_SPREADMODE_GRP_C
)
754 if (flags
& IPIC_SPREADMODE_GRP_D
)
756 if (flags
& IPIC_SPREADMODE_MIX_A
)
758 if (flags
& IPIC_SPREADMODE_MIX_B
)
761 ipic_write(ipic
->regs
, IPIC_SICFR
, temp
);
763 /* handle MCP route */
765 if (flags
& IPIC_DISABLE_MCP_OUT
)
767 ipic_write(ipic
->regs
, IPIC_SERCR
, temp
);
769 /* handle routing of IRQ0 to MCP */
770 temp
= ipic_read(ipic
->regs
, IPIC_SEMSR
);
772 if (flags
& IPIC_IRQ0_MCP
)
775 temp
&= ~SEMSR_SIRQ0
;
777 ipic_write(ipic
->regs
, IPIC_SEMSR
, temp
);
780 irq_set_default_host(primary_ipic
->irqhost
);
782 ipic_write(ipic
->regs
, IPIC_SIMSR_H
, 0);
783 ipic_write(ipic
->regs
, IPIC_SIMSR_L
, 0);
785 printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS
,
791 int ipic_set_priority(unsigned int virq
, unsigned int priority
)
793 struct ipic
*ipic
= ipic_from_irq(virq
);
794 unsigned int src
= virq_to_hw(virq
);
801 if (ipic_info
[src
].prio
== 0)
804 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].prio
);
807 temp
&= ~(0x7 << (20 + (3 - priority
) * 3));
808 temp
|= ipic_info
[src
].prio_mask
<< (20 + (3 - priority
) * 3);
810 temp
&= ~(0x7 << (4 + (7 - priority
) * 3));
811 temp
|= ipic_info
[src
].prio_mask
<< (4 + (7 - priority
) * 3);
814 ipic_write(ipic
->regs
, ipic_info
[src
].prio
, temp
);
819 void ipic_set_highest_priority(unsigned int virq
)
821 struct ipic
*ipic
= ipic_from_irq(virq
);
822 unsigned int src
= virq_to_hw(virq
);
825 temp
= ipic_read(ipic
->regs
, IPIC_SICFR
);
827 /* clear and set HPI */
829 temp
|= (src
& 0x7f) << 24;
831 ipic_write(ipic
->regs
, IPIC_SICFR
, temp
);
834 void ipic_set_default_priority(void)
836 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_A
, IPIC_PRIORITY_DEFAULT
);
837 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_B
, IPIC_PRIORITY_DEFAULT
);
838 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_C
, IPIC_PRIORITY_DEFAULT
);
839 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_D
, IPIC_PRIORITY_DEFAULT
);
840 ipic_write(primary_ipic
->regs
, IPIC_SMPRR_A
, IPIC_PRIORITY_DEFAULT
);
841 ipic_write(primary_ipic
->regs
, IPIC_SMPRR_B
, IPIC_PRIORITY_DEFAULT
);
844 void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq
)
846 struct ipic
*ipic
= primary_ipic
;
849 temp
= ipic_read(ipic
->regs
, IPIC_SERMR
);
850 temp
|= (1 << (31 - mcp_irq
));
851 ipic_write(ipic
->regs
, IPIC_SERMR
, temp
);
854 void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq
)
856 struct ipic
*ipic
= primary_ipic
;
859 temp
= ipic_read(ipic
->regs
, IPIC_SERMR
);
860 temp
&= (1 << (31 - mcp_irq
));
861 ipic_write(ipic
->regs
, IPIC_SERMR
, temp
);
864 u32
ipic_get_mcp_status(void)
866 return ipic_read(primary_ipic
->regs
, IPIC_SERMR
);
869 void ipic_clear_mcp_status(u32 mask
)
871 ipic_write(primary_ipic
->regs
, IPIC_SERMR
, mask
);
874 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
875 unsigned int ipic_get_irq(void)
879 BUG_ON(primary_ipic
== NULL
);
881 #define IPIC_SIVCR_VECTOR_MASK 0x7f
882 irq
= ipic_read(primary_ipic
->regs
, IPIC_SIVCR
) & IPIC_SIVCR_VECTOR_MASK
;
884 if (irq
== 0) /* 0 --> no irq is pending */
887 return irq_linear_revmap(primary_ipic
->irqhost
, irq
);
890 #ifdef CONFIG_SUSPEND
903 static int ipic_suspend(void)
905 struct ipic
*ipic
= primary_ipic
;
907 ipic_saved_state
.sicfr
= ipic_read(ipic
->regs
, IPIC_SICFR
);
908 ipic_saved_state
.siprr
[0] = ipic_read(ipic
->regs
, IPIC_SIPRR_A
);
909 ipic_saved_state
.siprr
[1] = ipic_read(ipic
->regs
, IPIC_SIPRR_D
);
910 ipic_saved_state
.simsr
[0] = ipic_read(ipic
->regs
, IPIC_SIMSR_H
);
911 ipic_saved_state
.simsr
[1] = ipic_read(ipic
->regs
, IPIC_SIMSR_L
);
912 ipic_saved_state
.sicnr
= ipic_read(ipic
->regs
, IPIC_SICNR
);
913 ipic_saved_state
.smprr
[0] = ipic_read(ipic
->regs
, IPIC_SMPRR_A
);
914 ipic_saved_state
.smprr
[1] = ipic_read(ipic
->regs
, IPIC_SMPRR_B
);
915 ipic_saved_state
.semsr
= ipic_read(ipic
->regs
, IPIC_SEMSR
);
916 ipic_saved_state
.secnr
= ipic_read(ipic
->regs
, IPIC_SECNR
);
917 ipic_saved_state
.sermr
= ipic_read(ipic
->regs
, IPIC_SERMR
);
918 ipic_saved_state
.sercr
= ipic_read(ipic
->regs
, IPIC_SERCR
);
920 if (fsl_deep_sleep()) {
921 /* In deep sleep, make sure there can be no
922 * pending interrupts, as this can cause
925 ipic_write(ipic
->regs
, IPIC_SIMSR_H
, 0);
926 ipic_write(ipic
->regs
, IPIC_SIMSR_L
, 0);
927 ipic_write(ipic
->regs
, IPIC_SEMSR
, 0);
928 ipic_write(ipic
->regs
, IPIC_SERMR
, 0);
934 static void ipic_resume(void)
936 struct ipic
*ipic
= primary_ipic
;
938 ipic_write(ipic
->regs
, IPIC_SICFR
, ipic_saved_state
.sicfr
);
939 ipic_write(ipic
->regs
, IPIC_SIPRR_A
, ipic_saved_state
.siprr
[0]);
940 ipic_write(ipic
->regs
, IPIC_SIPRR_D
, ipic_saved_state
.siprr
[1]);
941 ipic_write(ipic
->regs
, IPIC_SIMSR_H
, ipic_saved_state
.simsr
[0]);
942 ipic_write(ipic
->regs
, IPIC_SIMSR_L
, ipic_saved_state
.simsr
[1]);
943 ipic_write(ipic
->regs
, IPIC_SICNR
, ipic_saved_state
.sicnr
);
944 ipic_write(ipic
->regs
, IPIC_SMPRR_A
, ipic_saved_state
.smprr
[0]);
945 ipic_write(ipic
->regs
, IPIC_SMPRR_B
, ipic_saved_state
.smprr
[1]);
946 ipic_write(ipic
->regs
, IPIC_SEMSR
, ipic_saved_state
.semsr
);
947 ipic_write(ipic
->regs
, IPIC_SECNR
, ipic_saved_state
.secnr
);
948 ipic_write(ipic
->regs
, IPIC_SERMR
, ipic_saved_state
.sermr
);
949 ipic_write(ipic
->regs
, IPIC_SERCR
, ipic_saved_state
.sercr
);
952 #define ipic_suspend NULL
953 #define ipic_resume NULL
956 static struct syscore_ops ipic_syscore_ops
= {
957 .suspend
= ipic_suspend
,
958 .resume
= ipic_resume
,
961 static int __init
init_ipic_syscore(void)
963 if (!primary_ipic
|| !primary_ipic
->regs
)
966 printk(KERN_DEBUG
"Registering ipic system core operations\n");
967 register_syscore_ops(&ipic_syscore_ops
);
972 subsys_initcall(init_ipic_syscore
);