2 * arch/powerpc/sysdev/ipic.c
4 * IPIC routines implementations.
6 * Copyright 2005 Freescale Semiconductor, Inc.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/reboot.h>
17 #include <linux/slab.h>
18 #include <linux/stddef.h>
19 #include <linux/sched.h>
20 #include <linux/signal.h>
21 #include <linux/syscore_ops.h>
22 #include <linux/device.h>
23 #include <linux/bootmem.h>
24 #include <linux/spinlock.h>
25 #include <linux/fsl_devices.h>
33 static struct ipic
* primary_ipic
;
34 static struct irq_chip ipic_level_irq_chip
, ipic_edge_irq_chip
;
35 static DEFINE_RAW_SPINLOCK(ipic_lock
);
37 static struct ipic_info ipic_info
[] = {
41 .force
= IPIC_SIFCR_H
,
48 .force
= IPIC_SIFCR_H
,
55 .force
= IPIC_SIFCR_H
,
62 .force
= IPIC_SIFCR_H
,
69 .force
= IPIC_SIFCR_H
,
76 .force
= IPIC_SIFCR_H
,
83 .force
= IPIC_SIFCR_H
,
90 .force
= IPIC_SIFCR_H
,
97 .force
= IPIC_SIFCR_H
,
102 .mask
= IPIC_SIMSR_H
,
103 .prio
= IPIC_SIPRR_D
,
104 .force
= IPIC_SIFCR_H
,
109 .mask
= IPIC_SIMSR_H
,
110 .prio
= IPIC_SIPRR_D
,
111 .force
= IPIC_SIFCR_H
,
116 .mask
= IPIC_SIMSR_H
,
117 .prio
= IPIC_SIPRR_D
,
118 .force
= IPIC_SIFCR_H
,
123 .mask
= IPIC_SIMSR_H
,
124 .prio
= IPIC_SIPRR_D
,
125 .force
= IPIC_SIFCR_H
,
130 .mask
= IPIC_SIMSR_H
,
131 .prio
= IPIC_SIPRR_D
,
132 .force
= IPIC_SIFCR_H
,
137 .mask
= IPIC_SIMSR_H
,
138 .prio
= IPIC_SIPRR_D
,
139 .force
= IPIC_SIFCR_H
,
144 .mask
= IPIC_SIMSR_H
,
145 .prio
= IPIC_SIPRR_D
,
146 .force
= IPIC_SIFCR_H
,
153 .prio
= IPIC_SMPRR_A
,
161 .prio
= IPIC_SMPRR_A
,
169 .prio
= IPIC_SMPRR_A
,
177 .prio
= IPIC_SMPRR_B
,
185 .prio
= IPIC_SMPRR_B
,
193 .prio
= IPIC_SMPRR_B
,
201 .prio
= IPIC_SMPRR_B
,
207 .mask
= IPIC_SIMSR_H
,
208 .prio
= IPIC_SIPRR_A
,
209 .force
= IPIC_SIFCR_H
,
214 .mask
= IPIC_SIMSR_H
,
215 .prio
= IPIC_SIPRR_A
,
216 .force
= IPIC_SIFCR_H
,
221 .mask
= IPIC_SIMSR_H
,
222 .prio
= IPIC_SIPRR_A
,
223 .force
= IPIC_SIFCR_H
,
228 .mask
= IPIC_SIMSR_H
,
229 .prio
= IPIC_SIPRR_A
,
230 .force
= IPIC_SIFCR_H
,
235 .mask
= IPIC_SIMSR_H
,
236 .prio
= IPIC_SIPRR_A
,
237 .force
= IPIC_SIFCR_H
,
242 .mask
= IPIC_SIMSR_H
,
243 .prio
= IPIC_SIPRR_A
,
244 .force
= IPIC_SIFCR_H
,
249 .mask
= IPIC_SIMSR_H
,
250 .prio
= IPIC_SIPRR_A
,
251 .force
= IPIC_SIFCR_H
,
256 .mask
= IPIC_SIMSR_H
,
257 .prio
= IPIC_SIPRR_A
,
258 .force
= IPIC_SIFCR_H
,
263 .mask
= IPIC_SIMSR_H
,
264 .prio
= IPIC_SIPRR_B
,
265 .force
= IPIC_SIFCR_H
,
270 .mask
= IPIC_SIMSR_H
,
271 .prio
= IPIC_SIPRR_B
,
272 .force
= IPIC_SIFCR_H
,
277 .mask
= IPIC_SIMSR_H
,
278 .prio
= IPIC_SIPRR_B
,
279 .force
= IPIC_SIFCR_H
,
284 .mask
= IPIC_SIMSR_H
,
285 .prio
= IPIC_SIPRR_B
,
286 .force
= IPIC_SIFCR_H
,
291 .mask
= IPIC_SIMSR_H
,
292 .prio
= IPIC_SIPRR_B
,
293 .force
= IPIC_SIFCR_H
,
298 .mask
= IPIC_SIMSR_H
,
299 .prio
= IPIC_SIPRR_B
,
300 .force
= IPIC_SIFCR_H
,
305 .mask
= IPIC_SIMSR_H
,
306 .prio
= IPIC_SIPRR_B
,
307 .force
= IPIC_SIFCR_H
,
312 .mask
= IPIC_SIMSR_H
,
313 .prio
= IPIC_SIPRR_B
,
314 .force
= IPIC_SIFCR_H
,
320 .prio
= IPIC_SMPRR_A
,
326 .mask
= IPIC_SIMSR_L
,
327 .prio
= IPIC_SMPRR_A
,
328 .force
= IPIC_SIFCR_L
,
333 .mask
= IPIC_SIMSR_L
,
334 .prio
= IPIC_SMPRR_A
,
335 .force
= IPIC_SIFCR_L
,
340 .mask
= IPIC_SIMSR_L
,
341 .prio
= IPIC_SMPRR_A
,
342 .force
= IPIC_SIFCR_L
,
347 .mask
= IPIC_SIMSR_L
,
348 .prio
= IPIC_SMPRR_A
,
349 .force
= IPIC_SIFCR_L
,
354 .mask
= IPIC_SIMSR_L
,
355 .prio
= IPIC_SMPRR_B
,
356 .force
= IPIC_SIFCR_L
,
361 .mask
= IPIC_SIMSR_L
,
362 .prio
= IPIC_SMPRR_B
,
363 .force
= IPIC_SIFCR_L
,
368 .mask
= IPIC_SIMSR_L
,
369 .prio
= IPIC_SMPRR_B
,
370 .force
= IPIC_SIFCR_L
,
375 .mask
= IPIC_SIMSR_L
,
376 .prio
= IPIC_SMPRR_B
,
377 .force
= IPIC_SIFCR_L
,
382 .mask
= IPIC_SIMSR_L
,
384 .force
= IPIC_SIFCR_L
,
388 .mask
= IPIC_SIMSR_L
,
390 .force
= IPIC_SIFCR_L
,
394 .mask
= IPIC_SIMSR_L
,
396 .force
= IPIC_SIFCR_L
,
400 .mask
= IPIC_SIMSR_L
,
402 .force
= IPIC_SIFCR_L
,
406 .mask
= IPIC_SIMSR_L
,
408 .force
= IPIC_SIFCR_L
,
412 .mask
= IPIC_SIMSR_L
,
414 .force
= IPIC_SIFCR_L
,
418 .mask
= IPIC_SIMSR_L
,
420 .force
= IPIC_SIFCR_L
,
424 .mask
= IPIC_SIMSR_L
,
426 .force
= IPIC_SIFCR_L
,
430 .mask
= IPIC_SIMSR_L
,
432 .force
= IPIC_SIFCR_L
,
436 .mask
= IPIC_SIMSR_L
,
438 .force
= IPIC_SIFCR_L
,
442 .mask
= IPIC_SIMSR_L
,
444 .force
= IPIC_SIFCR_L
,
448 .mask
= IPIC_SIMSR_L
,
450 .force
= IPIC_SIFCR_L
,
454 .mask
= IPIC_SIMSR_L
,
456 .force
= IPIC_SIFCR_L
,
460 .mask
= IPIC_SIMSR_L
,
462 .force
= IPIC_SIFCR_L
,
466 .mask
= IPIC_SIMSR_L
,
468 .force
= IPIC_SIFCR_L
,
472 .mask
= IPIC_SIMSR_L
,
474 .force
= IPIC_SIFCR_L
,
478 .mask
= IPIC_SIMSR_L
,
480 .force
= IPIC_SIFCR_L
,
484 .mask
= IPIC_SIMSR_L
,
486 .force
= IPIC_SIFCR_L
,
490 .mask
= IPIC_SIMSR_L
,
492 .force
= IPIC_SIFCR_L
,
496 .mask
= IPIC_SIMSR_L
,
498 .force
= IPIC_SIFCR_L
,
502 .mask
= IPIC_SIMSR_L
,
504 .force
= IPIC_SIFCR_L
,
509 static inline u32
ipic_read(volatile u32 __iomem
*base
, unsigned int reg
)
511 return in_be32(base
+ (reg
>> 2));
514 static inline void ipic_write(volatile u32 __iomem
*base
, unsigned int reg
, u32 value
)
516 out_be32(base
+ (reg
>> 2), value
);
519 static inline struct ipic
* ipic_from_irq(unsigned int virq
)
524 static void ipic_unmask_irq(struct irq_data
*d
)
526 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
527 unsigned int src
= irqd_to_hwirq(d
);
531 raw_spin_lock_irqsave(&ipic_lock
, flags
);
533 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].mask
);
534 temp
|= (1 << (31 - ipic_info
[src
].bit
));
535 ipic_write(ipic
->regs
, ipic_info
[src
].mask
, temp
);
537 raw_spin_unlock_irqrestore(&ipic_lock
, flags
);
540 static void ipic_mask_irq(struct irq_data
*d
)
542 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
543 unsigned int src
= irqd_to_hwirq(d
);
547 raw_spin_lock_irqsave(&ipic_lock
, flags
);
549 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].mask
);
550 temp
&= ~(1 << (31 - ipic_info
[src
].bit
));
551 ipic_write(ipic
->regs
, ipic_info
[src
].mask
, temp
);
553 /* mb() can't guarantee that masking is finished. But it does finish
554 * for nearly all cases. */
557 raw_spin_unlock_irqrestore(&ipic_lock
, flags
);
560 static void ipic_ack_irq(struct irq_data
*d
)
562 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
563 unsigned int src
= irqd_to_hwirq(d
);
567 raw_spin_lock_irqsave(&ipic_lock
, flags
);
569 temp
= 1 << (31 - ipic_info
[src
].bit
);
570 ipic_write(ipic
->regs
, ipic_info
[src
].ack
, temp
);
572 /* mb() can't guarantee that ack is finished. But it does finish
573 * for nearly all cases. */
576 raw_spin_unlock_irqrestore(&ipic_lock
, flags
);
579 static void ipic_mask_irq_and_ack(struct irq_data
*d
)
581 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
582 unsigned int src
= irqd_to_hwirq(d
);
586 raw_spin_lock_irqsave(&ipic_lock
, flags
);
588 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].mask
);
589 temp
&= ~(1 << (31 - ipic_info
[src
].bit
));
590 ipic_write(ipic
->regs
, ipic_info
[src
].mask
, temp
);
592 temp
= 1 << (31 - ipic_info
[src
].bit
);
593 ipic_write(ipic
->regs
, ipic_info
[src
].ack
, temp
);
595 /* mb() can't guarantee that ack is finished. But it does finish
596 * for nearly all cases. */
599 raw_spin_unlock_irqrestore(&ipic_lock
, flags
);
602 static int ipic_set_irq_type(struct irq_data
*d
, unsigned int flow_type
)
604 struct ipic
*ipic
= ipic_from_irq(d
->irq
);
605 unsigned int src
= irqd_to_hwirq(d
);
606 unsigned int vold
, vnew
, edibit
;
608 if (flow_type
== IRQ_TYPE_NONE
)
609 flow_type
= IRQ_TYPE_LEVEL_LOW
;
611 /* ipic supports only low assertion and high-to-low change senses
613 if (!(flow_type
& (IRQ_TYPE_LEVEL_LOW
| IRQ_TYPE_EDGE_FALLING
))) {
614 printk(KERN_ERR
"ipic: sense type 0x%x not supported\n",
618 /* ipic supports only edge mode on external interrupts */
619 if ((flow_type
& IRQ_TYPE_EDGE_FALLING
) && !ipic_info
[src
].ack
) {
620 printk(KERN_ERR
"ipic: edge sense not supported on internal "
626 irqd_set_trigger_type(d
, flow_type
);
627 if (flow_type
& IRQ_TYPE_LEVEL_LOW
) {
628 __irq_set_handler_locked(d
->irq
, handle_level_irq
);
629 d
->chip
= &ipic_level_irq_chip
;
631 __irq_set_handler_locked(d
->irq
, handle_edge_irq
);
632 d
->chip
= &ipic_edge_irq_chip
;
635 /* only EXT IRQ senses are programmable on ipic
636 * internal IRQ senses are LEVEL_LOW
638 if (src
== IPIC_IRQ_EXT0
)
641 if (src
>= IPIC_IRQ_EXT1
&& src
<= IPIC_IRQ_EXT7
)
642 edibit
= (14 - (src
- IPIC_IRQ_EXT1
));
644 return (flow_type
& IRQ_TYPE_LEVEL_LOW
) ? 0 : -EINVAL
;
646 vold
= ipic_read(ipic
->regs
, IPIC_SECNR
);
647 if ((flow_type
& IRQ_TYPE_SENSE_MASK
) == IRQ_TYPE_EDGE_FALLING
) {
648 vnew
= vold
| (1 << edibit
);
650 vnew
= vold
& ~(1 << edibit
);
653 ipic_write(ipic
->regs
, IPIC_SECNR
, vnew
);
654 return IRQ_SET_MASK_OK_NOCOPY
;
657 /* level interrupts and edge interrupts have different ack operations */
658 static struct irq_chip ipic_level_irq_chip
= {
660 .irq_unmask
= ipic_unmask_irq
,
661 .irq_mask
= ipic_mask_irq
,
662 .irq_mask_ack
= ipic_mask_irq
,
663 .irq_set_type
= ipic_set_irq_type
,
666 static struct irq_chip ipic_edge_irq_chip
= {
668 .irq_unmask
= ipic_unmask_irq
,
669 .irq_mask
= ipic_mask_irq
,
670 .irq_mask_ack
= ipic_mask_irq_and_ack
,
671 .irq_ack
= ipic_ack_irq
,
672 .irq_set_type
= ipic_set_irq_type
,
675 static int ipic_host_match(struct irq_domain
*h
, struct device_node
*node
)
677 /* Exact match, unless ipic node is NULL */
678 return h
->of_node
== NULL
|| h
->of_node
== node
;
681 static int ipic_host_map(struct irq_domain
*h
, unsigned int virq
,
684 struct ipic
*ipic
= h
->host_data
;
686 irq_set_chip_data(virq
, ipic
);
687 irq_set_chip_and_handler(virq
, &ipic_level_irq_chip
, handle_level_irq
);
689 /* Set default irq type */
690 irq_set_irq_type(virq
, IRQ_TYPE_NONE
);
695 static struct irq_domain_ops ipic_host_ops
= {
696 .match
= ipic_host_match
,
697 .map
= ipic_host_map
,
698 .xlate
= irq_domain_xlate_onetwocell
,
701 struct ipic
* __init
ipic_init(struct device_node
*node
, unsigned int flags
)
707 ret
= of_address_to_resource(node
, 0, &res
);
711 ipic
= kzalloc(sizeof(*ipic
), GFP_KERNEL
);
715 ipic
->irqhost
= irq_domain_add_linear(node
, NR_IPIC_INTS
,
716 &ipic_host_ops
, ipic
);
717 if (ipic
->irqhost
== NULL
) {
722 ipic
->regs
= ioremap(res
.start
, resource_size(&res
));
725 ipic_write(ipic
->regs
, IPIC_SICNR
, 0x0);
727 /* default priority scheme is grouped. If spread mode is required
728 * configure SICFR accordingly */
729 if (flags
& IPIC_SPREADMODE_GRP_A
)
731 if (flags
& IPIC_SPREADMODE_GRP_B
)
733 if (flags
& IPIC_SPREADMODE_GRP_C
)
735 if (flags
& IPIC_SPREADMODE_GRP_D
)
737 if (flags
& IPIC_SPREADMODE_MIX_A
)
739 if (flags
& IPIC_SPREADMODE_MIX_B
)
742 ipic_write(ipic
->regs
, IPIC_SICFR
, temp
);
744 /* handle MCP route */
746 if (flags
& IPIC_DISABLE_MCP_OUT
)
748 ipic_write(ipic
->regs
, IPIC_SERCR
, temp
);
750 /* handle routing of IRQ0 to MCP */
751 temp
= ipic_read(ipic
->regs
, IPIC_SEMSR
);
753 if (flags
& IPIC_IRQ0_MCP
)
756 temp
&= ~SEMSR_SIRQ0
;
758 ipic_write(ipic
->regs
, IPIC_SEMSR
, temp
);
761 irq_set_default_host(primary_ipic
->irqhost
);
763 ipic_write(ipic
->regs
, IPIC_SIMSR_H
, 0);
764 ipic_write(ipic
->regs
, IPIC_SIMSR_L
, 0);
766 printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS
,
772 int ipic_set_priority(unsigned int virq
, unsigned int priority
)
774 struct ipic
*ipic
= ipic_from_irq(virq
);
775 unsigned int src
= virq_to_hw(virq
);
782 if (ipic_info
[src
].prio
== 0)
785 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].prio
);
788 temp
&= ~(0x7 << (20 + (3 - priority
) * 3));
789 temp
|= ipic_info
[src
].prio_mask
<< (20 + (3 - priority
) * 3);
791 temp
&= ~(0x7 << (4 + (7 - priority
) * 3));
792 temp
|= ipic_info
[src
].prio_mask
<< (4 + (7 - priority
) * 3);
795 ipic_write(ipic
->regs
, ipic_info
[src
].prio
, temp
);
800 void ipic_set_highest_priority(unsigned int virq
)
802 struct ipic
*ipic
= ipic_from_irq(virq
);
803 unsigned int src
= virq_to_hw(virq
);
806 temp
= ipic_read(ipic
->regs
, IPIC_SICFR
);
808 /* clear and set HPI */
810 temp
|= (src
& 0x7f) << 24;
812 ipic_write(ipic
->regs
, IPIC_SICFR
, temp
);
815 void ipic_set_default_priority(void)
817 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_A
, IPIC_PRIORITY_DEFAULT
);
818 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_B
, IPIC_PRIORITY_DEFAULT
);
819 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_C
, IPIC_PRIORITY_DEFAULT
);
820 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_D
, IPIC_PRIORITY_DEFAULT
);
821 ipic_write(primary_ipic
->regs
, IPIC_SMPRR_A
, IPIC_PRIORITY_DEFAULT
);
822 ipic_write(primary_ipic
->regs
, IPIC_SMPRR_B
, IPIC_PRIORITY_DEFAULT
);
825 void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq
)
827 struct ipic
*ipic
= primary_ipic
;
830 temp
= ipic_read(ipic
->regs
, IPIC_SERMR
);
831 temp
|= (1 << (31 - mcp_irq
));
832 ipic_write(ipic
->regs
, IPIC_SERMR
, temp
);
835 void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq
)
837 struct ipic
*ipic
= primary_ipic
;
840 temp
= ipic_read(ipic
->regs
, IPIC_SERMR
);
841 temp
&= (1 << (31 - mcp_irq
));
842 ipic_write(ipic
->regs
, IPIC_SERMR
, temp
);
845 u32
ipic_get_mcp_status(void)
847 return ipic_read(primary_ipic
->regs
, IPIC_SERMR
);
850 void ipic_clear_mcp_status(u32 mask
)
852 ipic_write(primary_ipic
->regs
, IPIC_SERMR
, mask
);
855 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
856 unsigned int ipic_get_irq(void)
860 BUG_ON(primary_ipic
== NULL
);
862 #define IPIC_SIVCR_VECTOR_MASK 0x7f
863 irq
= ipic_read(primary_ipic
->regs
, IPIC_SIVCR
) & IPIC_SIVCR_VECTOR_MASK
;
865 if (irq
== 0) /* 0 --> no irq is pending */
868 return irq_linear_revmap(primary_ipic
->irqhost
, irq
);
871 #ifdef CONFIG_SUSPEND
884 static int ipic_suspend(void)
886 struct ipic
*ipic
= primary_ipic
;
888 ipic_saved_state
.sicfr
= ipic_read(ipic
->regs
, IPIC_SICFR
);
889 ipic_saved_state
.siprr
[0] = ipic_read(ipic
->regs
, IPIC_SIPRR_A
);
890 ipic_saved_state
.siprr
[1] = ipic_read(ipic
->regs
, IPIC_SIPRR_D
);
891 ipic_saved_state
.simsr
[0] = ipic_read(ipic
->regs
, IPIC_SIMSR_H
);
892 ipic_saved_state
.simsr
[1] = ipic_read(ipic
->regs
, IPIC_SIMSR_L
);
893 ipic_saved_state
.sicnr
= ipic_read(ipic
->regs
, IPIC_SICNR
);
894 ipic_saved_state
.smprr
[0] = ipic_read(ipic
->regs
, IPIC_SMPRR_A
);
895 ipic_saved_state
.smprr
[1] = ipic_read(ipic
->regs
, IPIC_SMPRR_B
);
896 ipic_saved_state
.semsr
= ipic_read(ipic
->regs
, IPIC_SEMSR
);
897 ipic_saved_state
.secnr
= ipic_read(ipic
->regs
, IPIC_SECNR
);
898 ipic_saved_state
.sermr
= ipic_read(ipic
->regs
, IPIC_SERMR
);
899 ipic_saved_state
.sercr
= ipic_read(ipic
->regs
, IPIC_SERCR
);
901 if (fsl_deep_sleep()) {
902 /* In deep sleep, make sure there can be no
903 * pending interrupts, as this can cause
906 ipic_write(ipic
->regs
, IPIC_SIMSR_H
, 0);
907 ipic_write(ipic
->regs
, IPIC_SIMSR_L
, 0);
908 ipic_write(ipic
->regs
, IPIC_SEMSR
, 0);
909 ipic_write(ipic
->regs
, IPIC_SERMR
, 0);
915 static void ipic_resume(void)
917 struct ipic
*ipic
= primary_ipic
;
919 ipic_write(ipic
->regs
, IPIC_SICFR
, ipic_saved_state
.sicfr
);
920 ipic_write(ipic
->regs
, IPIC_SIPRR_A
, ipic_saved_state
.siprr
[0]);
921 ipic_write(ipic
->regs
, IPIC_SIPRR_D
, ipic_saved_state
.siprr
[1]);
922 ipic_write(ipic
->regs
, IPIC_SIMSR_H
, ipic_saved_state
.simsr
[0]);
923 ipic_write(ipic
->regs
, IPIC_SIMSR_L
, ipic_saved_state
.simsr
[1]);
924 ipic_write(ipic
->regs
, IPIC_SICNR
, ipic_saved_state
.sicnr
);
925 ipic_write(ipic
->regs
, IPIC_SMPRR_A
, ipic_saved_state
.smprr
[0]);
926 ipic_write(ipic
->regs
, IPIC_SMPRR_B
, ipic_saved_state
.smprr
[1]);
927 ipic_write(ipic
->regs
, IPIC_SEMSR
, ipic_saved_state
.semsr
);
928 ipic_write(ipic
->regs
, IPIC_SECNR
, ipic_saved_state
.secnr
);
929 ipic_write(ipic
->regs
, IPIC_SERMR
, ipic_saved_state
.sermr
);
930 ipic_write(ipic
->regs
, IPIC_SERCR
, ipic_saved_state
.sercr
);
933 #define ipic_suspend NULL
934 #define ipic_resume NULL
937 static struct syscore_ops ipic_syscore_ops
= {
938 .suspend
= ipic_suspend
,
939 .resume
= ipic_resume
,
942 static int __init
init_ipic_syscore(void)
944 if (!primary_ipic
|| !primary_ipic
->regs
)
947 printk(KERN_DEBUG
"Registering ipic system core operations\n");
948 register_syscore_ops(&ipic_syscore_ops
);
953 subsys_initcall(init_ipic_syscore
);