Avoid beyond bounds copy while caching ACL
[zen-stable.git] / arch / mips / alchemy / devboards / bcsr.c
blob1e83ce2e1147fb4e71bc893630849ee147530198
1 /*
2 * bcsr.h -- Db1xxx/Pb1xxx Devboard CPLD registers ("BCSR") abstraction.
4 * All Alchemy development boards (except, of course, the weird PB1000)
5 * have a few registers in a CPLD with standardised layout; they mostly
6 * only differ in base address.
7 * All registers are 16bits wide with 32bit spacing.
8 */
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/spinlock.h>
13 #include <linux/irq.h>
14 #include <asm/addrspace.h>
15 #include <asm/io.h>
16 #include <asm/mach-db1x00/bcsr.h>
18 static struct bcsr_reg {
19 void __iomem *raddr;
20 spinlock_t lock;
21 } bcsr_regs[BCSR_CNT];
23 static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */
24 static int bcsr_csc_base; /* linux-irq of first cascaded irq */
26 void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys)
28 int i;
30 bcsr1_phys = KSEG1ADDR(CPHYSADDR(bcsr1_phys));
31 bcsr2_phys = KSEG1ADDR(CPHYSADDR(bcsr2_phys));
33 bcsr_virt = (void __iomem *)bcsr1_phys;
35 for (i = 0; i < BCSR_CNT; i++) {
36 if (i >= BCSR_HEXLEDS)
37 bcsr_regs[i].raddr = (void __iomem *)bcsr2_phys +
38 (0x04 * (i - BCSR_HEXLEDS));
39 else
40 bcsr_regs[i].raddr = (void __iomem *)bcsr1_phys +
41 (0x04 * i);
43 spin_lock_init(&bcsr_regs[i].lock);
47 unsigned short bcsr_read(enum bcsr_id reg)
49 unsigned short r;
50 unsigned long flags;
52 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
53 r = __raw_readw(bcsr_regs[reg].raddr);
54 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
55 return r;
57 EXPORT_SYMBOL_GPL(bcsr_read);
59 void bcsr_write(enum bcsr_id reg, unsigned short val)
61 unsigned long flags;
63 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
64 __raw_writew(val, bcsr_regs[reg].raddr);
65 wmb();
66 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
68 EXPORT_SYMBOL_GPL(bcsr_write);
70 void bcsr_mod(enum bcsr_id reg, unsigned short clr, unsigned short set)
72 unsigned short r;
73 unsigned long flags;
75 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
76 r = __raw_readw(bcsr_regs[reg].raddr);
77 r &= ~clr;
78 r |= set;
79 __raw_writew(r, bcsr_regs[reg].raddr);
80 wmb();
81 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
83 EXPORT_SYMBOL_GPL(bcsr_mod);
86 * DB1200/PB1200 CPLD IRQ muxer
88 static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
90 unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
92 disable_irq_nosync(irq);
94 for ( ; bisr; bisr &= bisr - 1)
95 generic_handle_irq(bcsr_csc_base + __ffs(bisr));
97 enable_irq(irq);
100 static void bcsr_irq_mask(struct irq_data *d)
102 unsigned short v = 1 << (d->irq - bcsr_csc_base);
103 __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
104 wmb();
107 static void bcsr_irq_maskack(struct irq_data *d)
109 unsigned short v = 1 << (d->irq - bcsr_csc_base);
110 __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
111 __raw_writew(v, bcsr_virt + BCSR_REG_INTSTAT); /* ack */
112 wmb();
115 static void bcsr_irq_unmask(struct irq_data *d)
117 unsigned short v = 1 << (d->irq - bcsr_csc_base);
118 __raw_writew(v, bcsr_virt + BCSR_REG_MASKSET);
119 wmb();
122 static struct irq_chip bcsr_irq_type = {
123 .name = "CPLD",
124 .irq_mask = bcsr_irq_mask,
125 .irq_mask_ack = bcsr_irq_maskack,
126 .irq_unmask = bcsr_irq_unmask,
129 void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq)
131 unsigned int irq;
133 /* mask & enable & ack all */
134 __raw_writew(0xffff, bcsr_virt + BCSR_REG_MASKCLR);
135 __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSET);
136 __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSTAT);
137 wmb();
139 bcsr_csc_base = csc_start;
141 for (irq = csc_start; irq <= csc_end; irq++)
142 irq_set_chip_and_handler_name(irq, &bcsr_irq_type,
143 handle_level_irq, "level");
145 irq_set_chained_handler(hook_irq, bcsr_csc_handler);