2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
11 #include <linux/init.h>
12 #include <linux/irq.h>
13 #include <linux/spinlock.h>
14 #include "internals.h"
16 static unsigned long ack_handle
[INTC_NR_IRQS
];
18 static intc_enum __init
intc_grp_id(struct intc_desc
*desc
,
21 struct intc_group
*g
= desc
->hw
.groups
;
24 for (i
= 0; g
&& enum_id
&& i
< desc
->hw
.nr_groups
; i
++) {
25 g
= desc
->hw
.groups
+ i
;
27 for (j
= 0; g
->enum_ids
[j
]; j
++) {
28 if (g
->enum_ids
[j
] != enum_id
)
38 static unsigned int __init
_intc_mask_data(struct intc_desc
*desc
,
39 struct intc_desc_int
*d
,
41 unsigned int *reg_idx
,
42 unsigned int *fld_idx
)
44 struct intc_mask_reg
*mr
= desc
->hw
.mask_regs
;
45 unsigned int fn
, mode
;
46 unsigned long reg_e
, reg_d
;
48 while (mr
&& enum_id
&& *reg_idx
< desc
->hw
.nr_mask_regs
) {
49 mr
= desc
->hw
.mask_regs
+ *reg_idx
;
51 for (; *fld_idx
< ARRAY_SIZE(mr
->enum_ids
); (*fld_idx
)++) {
52 if (mr
->enum_ids
[*fld_idx
] != enum_id
)
55 if (mr
->set_reg
&& mr
->clr_reg
) {
56 fn
= REG_FN_WRITE_BASE
;
61 fn
= REG_FN_MODIFY_BASE
;
63 mode
= MODE_ENABLE_REG
;
73 fn
+= (mr
->reg_width
>> 3) - 1;
74 return _INTC_MK(fn
, mode
,
75 intc_get_reg(d
, reg_e
),
76 intc_get_reg(d
, reg_d
),
78 (mr
->reg_width
- 1) - *fld_idx
);
89 intc_get_mask_handle(struct intc_desc
*desc
, struct intc_desc_int
*d
,
90 intc_enum enum_id
, int do_grps
)
96 ret
= _intc_mask_data(desc
, d
, enum_id
, &i
, &j
);
101 return intc_get_mask_handle(desc
, d
, intc_grp_id(desc
, enum_id
), 0);
106 static unsigned int __init
_intc_prio_data(struct intc_desc
*desc
,
107 struct intc_desc_int
*d
,
109 unsigned int *reg_idx
,
110 unsigned int *fld_idx
)
112 struct intc_prio_reg
*pr
= desc
->hw
.prio_regs
;
113 unsigned int fn
, n
, mode
, bit
;
114 unsigned long reg_e
, reg_d
;
116 while (pr
&& enum_id
&& *reg_idx
< desc
->hw
.nr_prio_regs
) {
117 pr
= desc
->hw
.prio_regs
+ *reg_idx
;
119 for (; *fld_idx
< ARRAY_SIZE(pr
->enum_ids
); (*fld_idx
)++) {
120 if (pr
->enum_ids
[*fld_idx
] != enum_id
)
123 if (pr
->set_reg
&& pr
->clr_reg
) {
124 fn
= REG_FN_WRITE_BASE
;
125 mode
= MODE_PCLR_REG
;
129 fn
= REG_FN_MODIFY_BASE
;
130 mode
= MODE_PRIO_REG
;
137 fn
+= (pr
->reg_width
>> 3) - 1;
140 BUG_ON(n
* pr
->field_width
> pr
->reg_width
);
142 bit
= pr
->reg_width
- (n
* pr
->field_width
);
144 return _INTC_MK(fn
, mode
,
145 intc_get_reg(d
, reg_e
),
146 intc_get_reg(d
, reg_d
),
147 pr
->field_width
, bit
);
158 intc_get_prio_handle(struct intc_desc
*desc
, struct intc_desc_int
*d
,
159 intc_enum enum_id
, int do_grps
)
165 ret
= _intc_prio_data(desc
, d
, enum_id
, &i
, &j
);
170 return intc_get_prio_handle(desc
, d
, intc_grp_id(desc
, enum_id
), 0);
175 static unsigned int intc_ack_data(struct intc_desc
*desc
,
176 struct intc_desc_int
*d
, intc_enum enum_id
)
178 struct intc_mask_reg
*mr
= desc
->hw
.ack_regs
;
179 unsigned int i
, j
, fn
, mode
;
180 unsigned long reg_e
, reg_d
;
182 for (i
= 0; mr
&& enum_id
&& i
< desc
->hw
.nr_ack_regs
; i
++) {
183 mr
= desc
->hw
.ack_regs
+ i
;
185 for (j
= 0; j
< ARRAY_SIZE(mr
->enum_ids
); j
++) {
186 if (mr
->enum_ids
[j
] != enum_id
)
189 fn
= REG_FN_MODIFY_BASE
;
190 mode
= MODE_ENABLE_REG
;
194 fn
+= (mr
->reg_width
>> 3) - 1;
195 return _INTC_MK(fn
, mode
,
196 intc_get_reg(d
, reg_e
),
197 intc_get_reg(d
, reg_d
),
199 (mr
->reg_width
- 1) - j
);
206 static void intc_enable_disable(struct intc_desc_int
*d
,
207 unsigned long handle
, int do_enable
)
211 unsigned long (*fn
)(unsigned long, unsigned long,
212 unsigned long (*)(unsigned long, unsigned long,
217 for (cpu
= 0; cpu
< SMP_NR(d
, _INTC_ADDR_E(handle
)); cpu
++) {
218 addr
= INTC_REG(d
, _INTC_ADDR_E(handle
), cpu
);
219 fn
= intc_enable_noprio_fns
[_INTC_MODE(handle
)];
220 fn(addr
, handle
, intc_reg_fns
[_INTC_FN(handle
)], 0);
223 for (cpu
= 0; cpu
< SMP_NR(d
, _INTC_ADDR_D(handle
)); cpu
++) {
224 addr
= INTC_REG(d
, _INTC_ADDR_D(handle
), cpu
);
225 fn
= intc_disable_fns
[_INTC_MODE(handle
)];
226 fn(addr
, handle
, intc_reg_fns
[_INTC_FN(handle
)], 0);
231 void __init
intc_enable_disable_enum(struct intc_desc
*desc
,
232 struct intc_desc_int
*d
,
233 intc_enum enum_id
, int enable
)
235 unsigned int i
, j
, data
;
237 /* go through and enable/disable all mask bits */
240 data
= _intc_mask_data(desc
, d
, enum_id
, &i
, &j
);
242 intc_enable_disable(d
, data
, enable
);
246 /* go through and enable/disable all priority fields */
249 data
= _intc_prio_data(desc
, d
, enum_id
, &i
, &j
);
251 intc_enable_disable(d
, data
, enable
);
258 intc_get_sense_handle(struct intc_desc
*desc
, struct intc_desc_int
*d
,
261 struct intc_sense_reg
*sr
= desc
->hw
.sense_regs
;
262 unsigned int i
, j
, fn
, bit
;
264 for (i
= 0; sr
&& enum_id
&& i
< desc
->hw
.nr_sense_regs
; i
++) {
265 sr
= desc
->hw
.sense_regs
+ i
;
267 for (j
= 0; j
< ARRAY_SIZE(sr
->enum_ids
); j
++) {
268 if (sr
->enum_ids
[j
] != enum_id
)
271 fn
= REG_FN_MODIFY_BASE
;
272 fn
+= (sr
->reg_width
>> 3) - 1;
274 BUG_ON((j
+ 1) * sr
->field_width
> sr
->reg_width
);
276 bit
= sr
->reg_width
- ((j
+ 1) * sr
->field_width
);
278 return _INTC_MK(fn
, 0, intc_get_reg(d
, sr
->reg
),
279 0, sr
->field_width
, bit
);
287 void intc_set_ack_handle(unsigned int irq
, struct intc_desc
*desc
,
288 struct intc_desc_int
*d
, intc_enum id
)
293 * Nothing to do for this IRQ.
295 if (!desc
->hw
.ack_regs
)
298 raw_spin_lock_irqsave(&intc_big_lock
, flags
);
299 ack_handle
[irq
] = intc_ack_data(desc
, d
, id
);
300 raw_spin_unlock_irqrestore(&intc_big_lock
, flags
);
303 unsigned long intc_get_ack_handle(unsigned int irq
)
305 return ack_handle
[irq
];