spi-topcliff-pch: supports a spi mode setup and bit order setup by IO control
[zen-stable.git] / arch / mips / pmc-sierra / msp71xx / msp_irq_cic.c
blobc4fa2d775d8b80d7db818e74b50b07e19ea6d32a
1 /*
2 * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
4 * This file define the irq handler for MSP CIC subsystem interrupts.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/bitops.h>
16 #include <linux/irq.h>
18 #include <asm/mipsregs.h>
19 #include <asm/system.h>
21 #include <msp_cic_int.h>
22 #include <msp_regs.h>
25 * External API
27 extern void msp_per_irq_init(void);
28 extern void msp_per_irq_dispatch(void);
32 * Convenience Macro. Should be somewhere generic.
34 #define get_current_vpe() \
35 ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
37 #ifdef CONFIG_SMP
39 #define LOCK_VPE(flags, mtflags) \
40 do { \
41 local_irq_save(flags); \
42 mtflags = dmt(); \
43 } while (0)
45 #define UNLOCK_VPE(flags, mtflags) \
46 do { \
47 emt(mtflags); \
48 local_irq_restore(flags);\
49 } while (0)
51 #define LOCK_CORE(flags, mtflags) \
52 do { \
53 local_irq_save(flags); \
54 mtflags = dvpe(); \
55 } while (0)
57 #define UNLOCK_CORE(flags, mtflags) \
58 do { \
59 evpe(mtflags); \
60 local_irq_restore(flags);\
61 } while (0)
63 #else
65 #define LOCK_VPE(flags, mtflags)
66 #define UNLOCK_VPE(flags, mtflags)
67 #endif
69 /* ensure writes to cic are completed */
70 static inline void cic_wmb(void)
72 const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG;
73 volatile u32 dummy_read;
75 wmb();
76 dummy_read = __raw_readl(cic_mem);
77 dummy_read++;
80 static void unmask_cic_irq(struct irq_data *d)
82 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
83 int vpe;
84 #ifdef CONFIG_SMP
85 unsigned int mtflags;
86 unsigned long flags;
89 * Make sure we have IRQ affinity. It may have changed while
90 * we were processing the IRQ.
92 if (!cpumask_test_cpu(smp_processor_id(), d->affinity))
93 return;
94 #endif
96 vpe = get_current_vpe();
97 LOCK_VPE(flags, mtflags);
98 cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE));
99 UNLOCK_VPE(flags, mtflags);
100 cic_wmb();
103 static void mask_cic_irq(struct irq_data *d)
105 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
106 int vpe = get_current_vpe();
107 #ifdef CONFIG_SMP
108 unsigned long flags, mtflags;
109 #endif
110 LOCK_VPE(flags, mtflags);
111 cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE));
112 UNLOCK_VPE(flags, mtflags);
113 cic_wmb();
115 static void msp_cic_irq_ack(struct irq_data *d)
117 mask_cic_irq(d);
119 * Only really necessary for 18, 16-14 and sometimes 3:0
120 * (since these can be edge sensitive) but it doesn't
121 * hurt for the others
123 *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
124 smtc_im_ack_irq(d->irq);
127 /*Note: Limiting to VSMP . Not tested in SMTC */
129 #ifdef CONFIG_MIPS_MT_SMP
130 static int msp_cic_irq_set_affinity(struct irq_data *d,
131 const struct cpumask *cpumask, bool force)
133 int cpu;
134 unsigned long flags;
135 unsigned int mtflags;
136 unsigned long imask = (1 << (irq - MSP_CIC_INTBASE));
137 volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG;
139 /* timer balancing should be disabled in kernel code */
140 BUG_ON(irq == MSP_INT_VPE0_TIMER || irq == MSP_INT_VPE1_TIMER);
142 LOCK_CORE(flags, mtflags);
143 /* enable if any of each VPE's TCs require this IRQ */
144 for_each_online_cpu(cpu) {
145 if (cpumask_test_cpu(cpu, cpumask))
146 cic_mask[cpu] |= imask;
147 else
148 cic_mask[cpu] &= ~imask;
152 UNLOCK_CORE(flags, mtflags);
153 return 0;
156 #endif
158 static struct irq_chip msp_cic_irq_controller = {
159 .name = "MSP_CIC",
160 .irq_mask = mask_cic_irq,
161 .irq_mask_ack = msp_cic_irq_ack,
162 .irq_unmask = unmask_cic_irq,
163 .irq_ack = msp_cic_irq_ack,
164 #ifdef CONFIG_MIPS_MT_SMP
165 .irq_set_affinity = msp_cic_irq_set_affinity,
166 #endif
169 void __init msp_cic_irq_init(void)
171 int i;
172 /* Mask/clear interrupts. */
173 *CIC_VPE0_MSK_REG = 0x00000000;
174 *CIC_VPE1_MSK_REG = 0x00000000;
175 *CIC_STS_REG = 0xFFFFFFFF;
177 * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI.
178 * These inputs map to EXT_INT_POL[6:4] inside the CIC.
179 * They are to be active low, level sensitive.
181 *CIC_EXT_CFG_REG &= 0xFFFF8F8F;
183 /* initialize all the IRQ descriptors */
184 for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
185 irq_set_chip_and_handler(i, &msp_cic_irq_controller,
186 handle_level_irq);
187 #ifdef CONFIG_MIPS_MT_SMTC
188 /* Mask of CIC interrupt */
189 irq_hwmask[i] = C_IRQ4;
190 #endif
193 /* Initialize the PER interrupt sub-system */
194 msp_per_irq_init();
197 /* CIC masked by CIC vector processing before dispatch called */
198 void msp_cic_irq_dispatch(void)
200 volatile u32 *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG;
201 u32 cic_mask;
202 u32 pending;
203 int cic_status = *CIC_STS_REG;
204 cic_mask = cic_msk_reg[get_current_vpe()];
205 pending = cic_status & cic_mask;
206 if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) {
207 do_IRQ(MSP_INT_VPE0_TIMER);
208 } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) {
209 do_IRQ(MSP_INT_VPE1_TIMER);
210 } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) {
211 msp_per_irq_dispatch();
212 } else if (pending) {
213 do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1);
214 } else{
215 spurious_interrupt();