conn reset on neighbor stall, neighbor free
[cor_2_6_31.git] / drivers / sh / intc.c
blob3dd231a643b5edabe44d1b156ffb9de426d640cd
1 /*
2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
4 * Copyright (C) 2007, 2008 Magnus Damm
6 * Based on intc2.c and ipr.c
8 * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
9 * Copyright (C) 2000 Kazumoto Kojima
10 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
11 * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
12 * Copyright (C) 2005, 2006 Paul Mundt
14 * This file is subject to the terms and conditions of the GNU General Public
15 * License. See the file "COPYING" in the main directory of this archive
16 * for more details.
18 #include <linux/init.h>
19 #include <linux/irq.h>
20 #include <linux/module.h>
21 #include <linux/io.h>
22 #include <linux/interrupt.h>
23 #include <linux/sh_intc.h>
24 #include <linux/sysdev.h>
25 #include <linux/list.h>
26 #include <linux/topology.h>
28 #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
29 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
30 ((addr_e) << 16) | ((addr_d << 24)))
32 #define _INTC_SHIFT(h) (h & 0x1f)
33 #define _INTC_WIDTH(h) ((h >> 5) & 0xf)
34 #define _INTC_FN(h) ((h >> 9) & 0xf)
35 #define _INTC_MODE(h) ((h >> 13) & 0x7)
36 #define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
37 #define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
39 struct intc_handle_int {
40 unsigned int irq;
41 unsigned long handle;
44 struct intc_desc_int {
45 struct list_head list;
46 struct sys_device sysdev;
47 pm_message_t state;
48 unsigned long *reg;
49 #ifdef CONFIG_SMP
50 unsigned long *smp;
51 #endif
52 unsigned int nr_reg;
53 struct intc_handle_int *prio;
54 unsigned int nr_prio;
55 struct intc_handle_int *sense;
56 unsigned int nr_sense;
57 struct irq_chip chip;
60 static LIST_HEAD(intc_list);
62 #ifdef CONFIG_SMP
63 #define IS_SMP(x) x.smp
64 #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
65 #define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
66 #else
67 #define IS_SMP(x) 0
68 #define INTC_REG(d, x, c) (d->reg[(x)])
69 #define SMP_NR(d, x) 1
70 #endif
72 static unsigned int intc_prio_level[NR_IRQS]; /* for now */
73 #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
74 static unsigned long ack_handle[NR_IRQS];
75 #endif
77 static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
79 struct irq_chip *chip = get_irq_chip(irq);
80 return (void *)((char *)chip - offsetof(struct intc_desc_int, chip));
83 static inline unsigned int set_field(unsigned int value,
84 unsigned int field_value,
85 unsigned int handle)
87 unsigned int width = _INTC_WIDTH(handle);
88 unsigned int shift = _INTC_SHIFT(handle);
90 value &= ~(((1 << width) - 1) << shift);
91 value |= field_value << shift;
92 return value;
95 static void write_8(unsigned long addr, unsigned long h, unsigned long data)
97 __raw_writeb(set_field(0, data, h), addr);
100 static void write_16(unsigned long addr, unsigned long h, unsigned long data)
102 __raw_writew(set_field(0, data, h), addr);
105 static void write_32(unsigned long addr, unsigned long h, unsigned long data)
107 __raw_writel(set_field(0, data, h), addr);
110 static void modify_8(unsigned long addr, unsigned long h, unsigned long data)
112 unsigned long flags;
113 local_irq_save(flags);
114 __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
115 local_irq_restore(flags);
118 static void modify_16(unsigned long addr, unsigned long h, unsigned long data)
120 unsigned long flags;
121 local_irq_save(flags);
122 __raw_writew(set_field(__raw_readw(addr), data, h), addr);
123 local_irq_restore(flags);
126 static void modify_32(unsigned long addr, unsigned long h, unsigned long data)
128 unsigned long flags;
129 local_irq_save(flags);
130 __raw_writel(set_field(__raw_readl(addr), data, h), addr);
131 local_irq_restore(flags);
134 enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 };
136 static void (*intc_reg_fns[])(unsigned long addr,
137 unsigned long h,
138 unsigned long data) = {
139 [REG_FN_WRITE_BASE + 0] = write_8,
140 [REG_FN_WRITE_BASE + 1] = write_16,
141 [REG_FN_WRITE_BASE + 3] = write_32,
142 [REG_FN_MODIFY_BASE + 0] = modify_8,
143 [REG_FN_MODIFY_BASE + 1] = modify_16,
144 [REG_FN_MODIFY_BASE + 3] = modify_32,
147 enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
148 MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
149 MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
150 MODE_PRIO_REG, /* Priority value written to enable interrupt */
151 MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
154 static void intc_mode_field(unsigned long addr,
155 unsigned long handle,
156 void (*fn)(unsigned long,
157 unsigned long,
158 unsigned long),
159 unsigned int irq)
161 fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
164 static void intc_mode_zero(unsigned long addr,
165 unsigned long handle,
166 void (*fn)(unsigned long,
167 unsigned long,
168 unsigned long),
169 unsigned int irq)
171 fn(addr, handle, 0);
174 static void intc_mode_prio(unsigned long addr,
175 unsigned long handle,
176 void (*fn)(unsigned long,
177 unsigned long,
178 unsigned long),
179 unsigned int irq)
181 fn(addr, handle, intc_prio_level[irq]);
184 static void (*intc_enable_fns[])(unsigned long addr,
185 unsigned long handle,
186 void (*fn)(unsigned long,
187 unsigned long,
188 unsigned long),
189 unsigned int irq) = {
190 [MODE_ENABLE_REG] = intc_mode_field,
191 [MODE_MASK_REG] = intc_mode_zero,
192 [MODE_DUAL_REG] = intc_mode_field,
193 [MODE_PRIO_REG] = intc_mode_prio,
194 [MODE_PCLR_REG] = intc_mode_prio,
197 static void (*intc_disable_fns[])(unsigned long addr,
198 unsigned long handle,
199 void (*fn)(unsigned long,
200 unsigned long,
201 unsigned long),
202 unsigned int irq) = {
203 [MODE_ENABLE_REG] = intc_mode_zero,
204 [MODE_MASK_REG] = intc_mode_field,
205 [MODE_DUAL_REG] = intc_mode_field,
206 [MODE_PRIO_REG] = intc_mode_zero,
207 [MODE_PCLR_REG] = intc_mode_field,
210 static inline void _intc_enable(unsigned int irq, unsigned long handle)
212 struct intc_desc_int *d = get_intc_desc(irq);
213 unsigned long addr;
214 unsigned int cpu;
216 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
217 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
218 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
219 [_INTC_FN(handle)], irq);
223 static void intc_enable(unsigned int irq)
225 _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
228 static void intc_disable(unsigned int irq)
230 struct intc_desc_int *d = get_intc_desc(irq);
231 unsigned long handle = (unsigned long) get_irq_chip_data(irq);
232 unsigned long addr;
233 unsigned int cpu;
235 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
236 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
237 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
238 [_INTC_FN(handle)], irq);
242 static int intc_set_wake(unsigned int irq, unsigned int on)
244 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
247 #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
248 static void intc_mask_ack(unsigned int irq)
250 struct intc_desc_int *d = get_intc_desc(irq);
251 unsigned long handle = ack_handle[irq];
252 unsigned long addr;
254 intc_disable(irq);
256 /* read register and write zero only to the assocaited bit */
258 if (handle) {
259 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
260 switch (_INTC_FN(handle)) {
261 case REG_FN_MODIFY_BASE + 0: /* 8bit */
262 __raw_readb(addr);
263 __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
264 break;
265 case REG_FN_MODIFY_BASE + 1: /* 16bit */
266 __raw_readw(addr);
267 __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
268 break;
269 case REG_FN_MODIFY_BASE + 3: /* 32bit */
270 __raw_readl(addr);
271 __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
272 break;
273 default:
274 BUG();
275 break;
279 #endif
281 static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
282 unsigned int nr_hp,
283 unsigned int irq)
285 int i;
287 /* this doesn't scale well, but...
289 * this function should only be used for cerain uncommon
290 * operations such as intc_set_priority() and intc_set_sense()
291 * and in those rare cases performance doesn't matter that much.
292 * keeping the memory footprint low is more important.
294 * one rather simple way to speed this up and still keep the
295 * memory footprint down is to make sure the array is sorted
296 * and then perform a bisect to lookup the irq.
299 for (i = 0; i < nr_hp; i++) {
300 if ((hp + i)->irq != irq)
301 continue;
303 return hp + i;
306 return NULL;
309 int intc_set_priority(unsigned int irq, unsigned int prio)
311 struct intc_desc_int *d = get_intc_desc(irq);
312 struct intc_handle_int *ihp;
314 if (!intc_prio_level[irq] || prio <= 1)
315 return -EINVAL;
317 ihp = intc_find_irq(d->prio, d->nr_prio, irq);
318 if (ihp) {
319 if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
320 return -EINVAL;
322 intc_prio_level[irq] = prio;
325 * only set secondary masking method directly
326 * primary masking method is using intc_prio_level[irq]
327 * priority level will be set during next enable()
330 if (_INTC_FN(ihp->handle) != REG_FN_ERR)
331 _intc_enable(irq, ihp->handle);
333 return 0;
336 #define VALID(x) (x | 0x80)
338 static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
339 [IRQ_TYPE_EDGE_FALLING] = VALID(0),
340 [IRQ_TYPE_EDGE_RISING] = VALID(1),
341 [IRQ_TYPE_LEVEL_LOW] = VALID(2),
342 /* SH7706, SH7707 and SH7709 do not support high level triggered */
343 #if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
344 !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
345 !defined(CONFIG_CPU_SUBTYPE_SH7709)
346 [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
347 #endif
350 static int intc_set_sense(unsigned int irq, unsigned int type)
352 struct intc_desc_int *d = get_intc_desc(irq);
353 unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
354 struct intc_handle_int *ihp;
355 unsigned long addr;
357 if (!value)
358 return -EINVAL;
360 ihp = intc_find_irq(d->sense, d->nr_sense, irq);
361 if (ihp) {
362 addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
363 intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
365 return 0;
368 static unsigned int __init intc_get_reg(struct intc_desc_int *d,
369 unsigned long address)
371 unsigned int k;
373 for (k = 0; k < d->nr_reg; k++) {
374 if (d->reg[k] == address)
375 return k;
378 BUG();
379 return 0;
382 static intc_enum __init intc_grp_id(struct intc_desc *desc,
383 intc_enum enum_id)
385 struct intc_group *g = desc->groups;
386 unsigned int i, j;
388 for (i = 0; g && enum_id && i < desc->nr_groups; i++) {
389 g = desc->groups + i;
391 for (j = 0; g->enum_ids[j]; j++) {
392 if (g->enum_ids[j] != enum_id)
393 continue;
395 return g->enum_id;
399 return 0;
402 static unsigned int __init intc_mask_data(struct intc_desc *desc,
403 struct intc_desc_int *d,
404 intc_enum enum_id, int do_grps)
406 struct intc_mask_reg *mr = desc->mask_regs;
407 unsigned int i, j, fn, mode;
408 unsigned long reg_e, reg_d;
410 for (i = 0; mr && enum_id && i < desc->nr_mask_regs; i++) {
411 mr = desc->mask_regs + i;
413 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
414 if (mr->enum_ids[j] != enum_id)
415 continue;
417 if (mr->set_reg && mr->clr_reg) {
418 fn = REG_FN_WRITE_BASE;
419 mode = MODE_DUAL_REG;
420 reg_e = mr->clr_reg;
421 reg_d = mr->set_reg;
422 } else {
423 fn = REG_FN_MODIFY_BASE;
424 if (mr->set_reg) {
425 mode = MODE_ENABLE_REG;
426 reg_e = mr->set_reg;
427 reg_d = mr->set_reg;
428 } else {
429 mode = MODE_MASK_REG;
430 reg_e = mr->clr_reg;
431 reg_d = mr->clr_reg;
435 fn += (mr->reg_width >> 3) - 1;
436 return _INTC_MK(fn, mode,
437 intc_get_reg(d, reg_e),
438 intc_get_reg(d, reg_d),
440 (mr->reg_width - 1) - j);
444 if (do_grps)
445 return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
447 return 0;
450 static unsigned int __init intc_prio_data(struct intc_desc *desc,
451 struct intc_desc_int *d,
452 intc_enum enum_id, int do_grps)
454 struct intc_prio_reg *pr = desc->prio_regs;
455 unsigned int i, j, fn, mode, bit;
456 unsigned long reg_e, reg_d;
458 for (i = 0; pr && enum_id && i < desc->nr_prio_regs; i++) {
459 pr = desc->prio_regs + i;
461 for (j = 0; j < ARRAY_SIZE(pr->enum_ids); j++) {
462 if (pr->enum_ids[j] != enum_id)
463 continue;
465 if (pr->set_reg && pr->clr_reg) {
466 fn = REG_FN_WRITE_BASE;
467 mode = MODE_PCLR_REG;
468 reg_e = pr->set_reg;
469 reg_d = pr->clr_reg;
470 } else {
471 fn = REG_FN_MODIFY_BASE;
472 mode = MODE_PRIO_REG;
473 if (!pr->set_reg)
474 BUG();
475 reg_e = pr->set_reg;
476 reg_d = pr->set_reg;
479 fn += (pr->reg_width >> 3) - 1;
481 BUG_ON((j + 1) * pr->field_width > pr->reg_width);
483 bit = pr->reg_width - ((j + 1) * pr->field_width);
485 return _INTC_MK(fn, mode,
486 intc_get_reg(d, reg_e),
487 intc_get_reg(d, reg_d),
488 pr->field_width, bit);
492 if (do_grps)
493 return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
495 return 0;
498 #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
499 static unsigned int __init intc_ack_data(struct intc_desc *desc,
500 struct intc_desc_int *d,
501 intc_enum enum_id)
503 struct intc_mask_reg *mr = desc->ack_regs;
504 unsigned int i, j, fn, mode;
505 unsigned long reg_e, reg_d;
507 for (i = 0; mr && enum_id && i < desc->nr_ack_regs; i++) {
508 mr = desc->ack_regs + i;
510 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
511 if (mr->enum_ids[j] != enum_id)
512 continue;
514 fn = REG_FN_MODIFY_BASE;
515 mode = MODE_ENABLE_REG;
516 reg_e = mr->set_reg;
517 reg_d = mr->set_reg;
519 fn += (mr->reg_width >> 3) - 1;
520 return _INTC_MK(fn, mode,
521 intc_get_reg(d, reg_e),
522 intc_get_reg(d, reg_d),
524 (mr->reg_width - 1) - j);
528 return 0;
530 #endif
532 static unsigned int __init intc_sense_data(struct intc_desc *desc,
533 struct intc_desc_int *d,
534 intc_enum enum_id)
536 struct intc_sense_reg *sr = desc->sense_regs;
537 unsigned int i, j, fn, bit;
539 for (i = 0; sr && enum_id && i < desc->nr_sense_regs; i++) {
540 sr = desc->sense_regs + i;
542 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
543 if (sr->enum_ids[j] != enum_id)
544 continue;
546 fn = REG_FN_MODIFY_BASE;
547 fn += (sr->reg_width >> 3) - 1;
549 BUG_ON((j + 1) * sr->field_width > sr->reg_width);
551 bit = sr->reg_width - ((j + 1) * sr->field_width);
553 return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
554 0, sr->field_width, bit);
558 return 0;
561 static void __init intc_register_irq(struct intc_desc *desc,
562 struct intc_desc_int *d,
563 intc_enum enum_id,
564 unsigned int irq)
566 struct intc_handle_int *hp;
567 unsigned int data[2], primary;
569 /* Prefer single interrupt source bitmap over other combinations:
570 * 1. bitmap, single interrupt source
571 * 2. priority, single interrupt source
572 * 3. bitmap, multiple interrupt sources (groups)
573 * 4. priority, multiple interrupt sources (groups)
576 data[0] = intc_mask_data(desc, d, enum_id, 0);
577 data[1] = intc_prio_data(desc, d, enum_id, 0);
579 primary = 0;
580 if (!data[0] && data[1])
581 primary = 1;
583 if (!data[0] && !data[1])
584 pr_warning("intc: missing unique irq mask for "
585 "irq %d (vect 0x%04x)\n", irq, irq2evt(irq));
587 data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
588 data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
590 if (!data[primary])
591 primary ^= 1;
593 BUG_ON(!data[primary]); /* must have primary masking method */
595 disable_irq_nosync(irq);
596 set_irq_chip_and_handler_name(irq, &d->chip,
597 handle_level_irq, "level");
598 set_irq_chip_data(irq, (void *)data[primary]);
600 /* set priority level
601 * - this needs to be at least 2 for 5-bit priorities on 7780
603 intc_prio_level[irq] = 2;
605 /* enable secondary masking method if present */
606 if (data[!primary])
607 _intc_enable(irq, data[!primary]);
609 /* add irq to d->prio list if priority is available */
610 if (data[1]) {
611 hp = d->prio + d->nr_prio;
612 hp->irq = irq;
613 hp->handle = data[1];
615 if (primary) {
617 * only secondary priority should access registers, so
618 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
621 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
622 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
624 d->nr_prio++;
627 /* add irq to d->sense list if sense is available */
628 data[0] = intc_sense_data(desc, d, enum_id);
629 if (data[0]) {
630 (d->sense + d->nr_sense)->irq = irq;
631 (d->sense + d->nr_sense)->handle = data[0];
632 d->nr_sense++;
635 /* irq should be disabled by default */
636 d->chip.mask(irq);
638 #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
639 if (desc->ack_regs)
640 ack_handle[irq] = intc_ack_data(desc, d, enum_id);
641 #endif
644 static unsigned int __init save_reg(struct intc_desc_int *d,
645 unsigned int cnt,
646 unsigned long value,
647 unsigned int smp)
649 if (value) {
650 d->reg[cnt] = value;
651 #ifdef CONFIG_SMP
652 d->smp[cnt] = smp;
653 #endif
654 return 1;
657 return 0;
660 static unsigned char *intc_evt2irq_table;
662 unsigned int intc_evt2irq(unsigned int vector)
664 unsigned int irq = evt2irq(vector);
666 if (intc_evt2irq_table && intc_evt2irq_table[irq])
667 irq = intc_evt2irq_table[irq];
669 return irq;
672 void __init register_intc_controller(struct intc_desc *desc)
674 unsigned int i, k, smp;
675 struct intc_desc_int *d;
677 d = kzalloc(sizeof(*d), GFP_NOWAIT);
679 INIT_LIST_HEAD(&d->list);
680 list_add(&d->list, &intc_list);
682 d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0;
683 d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0;
684 d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0;
686 #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
687 d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0;
688 #endif
689 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
690 #ifdef CONFIG_SMP
691 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
692 #endif
693 k = 0;
695 if (desc->mask_regs) {
696 for (i = 0; i < desc->nr_mask_regs; i++) {
697 smp = IS_SMP(desc->mask_regs[i]);
698 k += save_reg(d, k, desc->mask_regs[i].set_reg, smp);
699 k += save_reg(d, k, desc->mask_regs[i].clr_reg, smp);
703 if (desc->prio_regs) {
704 d->prio = kzalloc(desc->nr_vectors * sizeof(*d->prio), GFP_NOWAIT);
706 for (i = 0; i < desc->nr_prio_regs; i++) {
707 smp = IS_SMP(desc->prio_regs[i]);
708 k += save_reg(d, k, desc->prio_regs[i].set_reg, smp);
709 k += save_reg(d, k, desc->prio_regs[i].clr_reg, smp);
713 if (desc->sense_regs) {
714 d->sense = kzalloc(desc->nr_vectors * sizeof(*d->sense), GFP_NOWAIT);
716 for (i = 0; i < desc->nr_sense_regs; i++) {
717 k += save_reg(d, k, desc->sense_regs[i].reg, 0);
721 d->chip.name = desc->name;
722 d->chip.mask = intc_disable;
723 d->chip.unmask = intc_enable;
724 d->chip.mask_ack = intc_disable;
725 d->chip.enable = intc_enable;
726 d->chip.disable = intc_disable;
727 d->chip.shutdown = intc_disable;
728 d->chip.set_type = intc_set_sense;
729 d->chip.set_wake = intc_set_wake;
731 #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
732 if (desc->ack_regs) {
733 for (i = 0; i < desc->nr_ack_regs; i++)
734 k += save_reg(d, k, desc->ack_regs[i].set_reg, 0);
736 d->chip.mask_ack = intc_mask_ack;
738 #endif
740 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
742 /* keep the first vector only if same enum is used multiple times */
743 for (i = 0; i < desc->nr_vectors; i++) {
744 struct intc_vect *vect = desc->vectors + i;
745 int first_irq = evt2irq(vect->vect);
747 if (!vect->enum_id)
748 continue;
750 for (k = i + 1; k < desc->nr_vectors; k++) {
751 struct intc_vect *vect2 = desc->vectors + k;
753 if (vect->enum_id != vect2->enum_id)
754 continue;
756 vect2->enum_id = 0;
758 if (!intc_evt2irq_table)
759 intc_evt2irq_table = kzalloc(NR_IRQS, GFP_NOWAIT);
761 if (!intc_evt2irq_table) {
762 pr_warning("intc: cannot allocate evt2irq!\n");
763 continue;
766 intc_evt2irq_table[evt2irq(vect2->vect)] = first_irq;
770 /* register the vectors one by one */
771 for (i = 0; i < desc->nr_vectors; i++) {
772 struct intc_vect *vect = desc->vectors + i;
773 unsigned int irq = evt2irq(vect->vect);
774 struct irq_desc *irq_desc;
776 if (!vect->enum_id)
777 continue;
779 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
780 if (unlikely(!irq_desc)) {
781 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
782 continue;
785 intc_register_irq(desc, d, vect->enum_id, irq);
789 static int intc_suspend(struct sys_device *dev, pm_message_t state)
791 struct intc_desc_int *d;
792 struct irq_desc *desc;
793 int irq;
795 /* get intc controller associated with this sysdev */
796 d = container_of(dev, struct intc_desc_int, sysdev);
798 switch (state.event) {
799 case PM_EVENT_ON:
800 if (d->state.event != PM_EVENT_FREEZE)
801 break;
802 for_each_irq_desc(irq, desc) {
803 if (desc->chip != &d->chip)
804 continue;
805 if (desc->status & IRQ_DISABLED)
806 intc_disable(irq);
807 else
808 intc_enable(irq);
810 break;
811 case PM_EVENT_FREEZE:
812 /* nothing has to be done */
813 break;
814 case PM_EVENT_SUSPEND:
815 /* enable wakeup irqs belonging to this intc controller */
816 for_each_irq_desc(irq, desc) {
817 if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
818 intc_enable(irq);
820 break;
822 d->state = state;
824 return 0;
827 static int intc_resume(struct sys_device *dev)
829 return intc_suspend(dev, PMSG_ON);
832 static struct sysdev_class intc_sysdev_class = {
833 .name = "intc",
834 .suspend = intc_suspend,
835 .resume = intc_resume,
838 /* register this intc as sysdev to allow suspend/resume */
839 static int __init register_intc_sysdevs(void)
841 struct intc_desc_int *d;
842 int error;
843 int id = 0;
845 error = sysdev_class_register(&intc_sysdev_class);
846 if (!error) {
847 list_for_each_entry(d, &intc_list, list) {
848 d->sysdev.id = id;
849 d->sysdev.cls = &intc_sysdev_class;
850 error = sysdev_register(&d->sysdev);
851 if (error)
852 break;
853 id++;
857 if (error)
858 pr_warning("intc: sysdev registration error\n");
860 return error;
863 device_initcall(register_intc_sysdevs);