xen/xenbus: Add 'will_handle' callback support in xenbus_watch_path()
[linux/fpc-iii.git] / arch / powerpc / sysdev / mv64x60_pic.c
bloba79953deb4896b5205e2d1f0f89f1fc92bf8e5e4
1 /*
2 * Interrupt handling for Marvell mv64360/mv64460 host bridges (Discovery)
4 * Author: Dale Farnsworth <dale@farnsworth.org>
6 * 2007 (c) MontaVista, Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
12 #include <linux/stddef.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/irq.h>
16 #include <linux/interrupt.h>
17 #include <linux/spinlock.h>
19 #include <asm/byteorder.h>
20 #include <asm/io.h>
21 #include <asm/prom.h>
22 #include <asm/irq.h>
24 #include "mv64x60.h"
26 /* Interrupt Controller Interface Registers */
27 #define MV64X60_IC_MAIN_CAUSE_LO 0x0004
28 #define MV64X60_IC_MAIN_CAUSE_HI 0x000c
29 #define MV64X60_IC_CPU0_INTR_MASK_LO 0x0014
30 #define MV64X60_IC_CPU0_INTR_MASK_HI 0x001c
31 #define MV64X60_IC_CPU0_SELECT_CAUSE 0x0024
33 #define MV64X60_HIGH_GPP_GROUPS 0x0f000000
34 #define MV64X60_SELECT_CAUSE_HIGH 0x40000000
36 /* General Purpose Pins Controller Interface Registers */
37 #define MV64x60_GPP_INTR_CAUSE 0x0008
38 #define MV64x60_GPP_INTR_MASK 0x000c
40 #define MV64x60_LEVEL1_LOW 0
41 #define MV64x60_LEVEL1_HIGH 1
42 #define MV64x60_LEVEL1_GPP 2
44 #define MV64x60_LEVEL1_MASK 0x00000060
45 #define MV64x60_LEVEL1_OFFSET 5
47 #define MV64x60_LEVEL2_MASK 0x0000001f
49 #define MV64x60_NUM_IRQS 96
51 static DEFINE_SPINLOCK(mv64x60_lock);
53 static void __iomem *mv64x60_irq_reg_base;
54 static void __iomem *mv64x60_gpp_reg_base;
57 * Interrupt Controller Handling
59 * The interrupt controller handles three groups of interrupts:
60 * main low: IRQ0-IRQ31
61 * main high: IRQ32-IRQ63
62 * gpp: IRQ64-IRQ95
64 * This code handles interrupts in two levels. Level 1 selects the
65 * interrupt group, and level 2 selects an IRQ within that group.
66 * Each group has its own irq_chip structure.
69 static u32 mv64x60_cached_low_mask;
70 static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS;
71 static u32 mv64x60_cached_gpp_mask;
73 static struct irq_domain *mv64x60_irq_host;
76 * mv64x60_chip_low functions
79 static void mv64x60_mask_low(struct irq_data *d)
81 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
82 unsigned long flags;
84 spin_lock_irqsave(&mv64x60_lock, flags);
85 mv64x60_cached_low_mask &= ~(1 << level2);
86 out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
87 mv64x60_cached_low_mask);
88 spin_unlock_irqrestore(&mv64x60_lock, flags);
89 (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO);
92 static void mv64x60_unmask_low(struct irq_data *d)
94 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
95 unsigned long flags;
97 spin_lock_irqsave(&mv64x60_lock, flags);
98 mv64x60_cached_low_mask |= 1 << level2;
99 out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
100 mv64x60_cached_low_mask);
101 spin_unlock_irqrestore(&mv64x60_lock, flags);
102 (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO);
105 static struct irq_chip mv64x60_chip_low = {
106 .name = "mv64x60_low",
107 .irq_mask = mv64x60_mask_low,
108 .irq_mask_ack = mv64x60_mask_low,
109 .irq_unmask = mv64x60_unmask_low,
113 * mv64x60_chip_high functions
116 static void mv64x60_mask_high(struct irq_data *d)
118 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
119 unsigned long flags;
121 spin_lock_irqsave(&mv64x60_lock, flags);
122 mv64x60_cached_high_mask &= ~(1 << level2);
123 out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
124 mv64x60_cached_high_mask);
125 spin_unlock_irqrestore(&mv64x60_lock, flags);
126 (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI);
129 static void mv64x60_unmask_high(struct irq_data *d)
131 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
132 unsigned long flags;
134 spin_lock_irqsave(&mv64x60_lock, flags);
135 mv64x60_cached_high_mask |= 1 << level2;
136 out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
137 mv64x60_cached_high_mask);
138 spin_unlock_irqrestore(&mv64x60_lock, flags);
139 (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI);
142 static struct irq_chip mv64x60_chip_high = {
143 .name = "mv64x60_high",
144 .irq_mask = mv64x60_mask_high,
145 .irq_mask_ack = mv64x60_mask_high,
146 .irq_unmask = mv64x60_unmask_high,
150 * mv64x60_chip_gpp functions
153 static void mv64x60_mask_gpp(struct irq_data *d)
155 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
156 unsigned long flags;
158 spin_lock_irqsave(&mv64x60_lock, flags);
159 mv64x60_cached_gpp_mask &= ~(1 << level2);
160 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
161 mv64x60_cached_gpp_mask);
162 spin_unlock_irqrestore(&mv64x60_lock, flags);
163 (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK);
166 static void mv64x60_mask_ack_gpp(struct irq_data *d)
168 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
169 unsigned long flags;
171 spin_lock_irqsave(&mv64x60_lock, flags);
172 mv64x60_cached_gpp_mask &= ~(1 << level2);
173 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
174 mv64x60_cached_gpp_mask);
175 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE,
176 ~(1 << level2));
177 spin_unlock_irqrestore(&mv64x60_lock, flags);
178 (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE);
181 static void mv64x60_unmask_gpp(struct irq_data *d)
183 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
184 unsigned long flags;
186 spin_lock_irqsave(&mv64x60_lock, flags);
187 mv64x60_cached_gpp_mask |= 1 << level2;
188 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
189 mv64x60_cached_gpp_mask);
190 spin_unlock_irqrestore(&mv64x60_lock, flags);
191 (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK);
194 static struct irq_chip mv64x60_chip_gpp = {
195 .name = "mv64x60_gpp",
196 .irq_mask = mv64x60_mask_gpp,
197 .irq_mask_ack = mv64x60_mask_ack_gpp,
198 .irq_unmask = mv64x60_unmask_gpp,
202 * mv64x60_host_ops functions
205 static struct irq_chip *mv64x60_chips[] = {
206 [MV64x60_LEVEL1_LOW] = &mv64x60_chip_low,
207 [MV64x60_LEVEL1_HIGH] = &mv64x60_chip_high,
208 [MV64x60_LEVEL1_GPP] = &mv64x60_chip_gpp,
211 static int mv64x60_host_map(struct irq_domain *h, unsigned int virq,
212 irq_hw_number_t hwirq)
214 int level1;
216 irq_set_status_flags(virq, IRQ_LEVEL);
218 level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET;
219 BUG_ON(level1 > MV64x60_LEVEL1_GPP);
220 irq_set_chip_and_handler(virq, mv64x60_chips[level1],
221 handle_level_irq);
223 return 0;
226 static const struct irq_domain_ops mv64x60_host_ops = {
227 .map = mv64x60_host_map,
231 * Global functions
234 void __init mv64x60_init_irq(void)
236 struct device_node *np;
237 phys_addr_t paddr;
238 unsigned int size;
239 const unsigned int *reg;
240 unsigned long flags;
242 np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-gpp");
243 reg = of_get_property(np, "reg", &size);
244 paddr = of_translate_address(np, reg);
245 mv64x60_gpp_reg_base = ioremap(paddr, reg[1]);
246 of_node_put(np);
248 np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-pic");
249 reg = of_get_property(np, "reg", &size);
250 paddr = of_translate_address(np, reg);
251 mv64x60_irq_reg_base = ioremap(paddr, reg[1]);
253 mv64x60_irq_host = irq_domain_add_linear(np, MV64x60_NUM_IRQS,
254 &mv64x60_host_ops, NULL);
256 spin_lock_irqsave(&mv64x60_lock, flags);
257 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
258 mv64x60_cached_gpp_mask);
259 out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
260 mv64x60_cached_low_mask);
261 out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
262 mv64x60_cached_high_mask);
264 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE, 0);
265 out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_LO, 0);
266 out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_HI, 0);
267 spin_unlock_irqrestore(&mv64x60_lock, flags);
270 unsigned int mv64x60_get_irq(void)
272 u32 cause;
273 int level1;
274 irq_hw_number_t hwirq;
275 int virq = 0;
277 cause = in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_SELECT_CAUSE);
278 if (cause & MV64X60_SELECT_CAUSE_HIGH) {
279 cause &= mv64x60_cached_high_mask;
280 level1 = MV64x60_LEVEL1_HIGH;
281 if (cause & MV64X60_HIGH_GPP_GROUPS) {
282 cause = in_le32(mv64x60_gpp_reg_base +
283 MV64x60_GPP_INTR_CAUSE);
284 cause &= mv64x60_cached_gpp_mask;
285 level1 = MV64x60_LEVEL1_GPP;
287 } else {
288 cause &= mv64x60_cached_low_mask;
289 level1 = MV64x60_LEVEL1_LOW;
291 if (cause) {
292 hwirq = (level1 << MV64x60_LEVEL1_OFFSET) | __ilog2(cause);
293 virq = irq_linear_revmap(mv64x60_irq_host, hwirq);
296 return virq;