[PATCH] ocfs2: zero_user_page conversion
[wrt350n-kernel.git] / arch / ppc / 8xx_io / commproc.c
blob7088428e1fe22e32e9abd27bbb015b5639123d6c
1 /*
2 * General Purpose functions for the global management of the
3 * Communication Processor Module.
4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
6 * In addition to the individual control of the communication
7 * channels, there are a few functions that globally affect the
8 * communication processor.
10 * Buffer descriptors must be allocated from the dual ported memory
11 * space. The allocator for that is here. When the communication
12 * process is reset, we reclaim the memory available. There is
13 * currently no deallocator for this memory.
14 * The amount of space available is platform dependent. On the
15 * MBX, the EPPC software loads additional microcode into the
16 * communication processor, and uses some of the DP ram for this
17 * purpose. Current, the first 512 bytes and the last 256 bytes of
18 * memory are used. Right now I am conservative and only use the
19 * memory that can never be used for microcode. If there are
20 * applications that require more DP ram, we can expand the boundaries
21 * but then we have to be careful of any downloaded microcode.
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/param.h>
28 #include <linux/string.h>
29 #include <linux/mm.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/module.h>
33 #include <asm/mpc8xx.h>
34 #include <asm/page.h>
35 #include <asm/pgtable.h>
36 #include <asm/8xx_immap.h>
37 #include <asm/commproc.h>
38 #include <asm/io.h>
39 #include <asm/tlbflush.h>
40 #include <asm/rheap.h>
42 #define immr_map(member) \
43 ({ \
44 u32 offset = offsetof(immap_t, member); \
45 void *addr = ioremap (IMAP_ADDR + offset, \
46 sizeof( ((immap_t*)0)->member)); \
47 addr; \
50 #define immr_map_size(member, size) \
51 ({ \
52 u32 offset = offsetof(immap_t, member); \
53 void *addr = ioremap (IMAP_ADDR + offset, size); \
54 addr; \
57 static void m8xx_cpm_dpinit(void);
58 static uint host_buffer; /* One page of host buffer */
59 static uint host_end; /* end + 1 */
60 cpm8xx_t *cpmp; /* Pointer to comm processor space */
62 /* CPM interrupt vector functions.
64 struct cpm_action {
65 void (*handler)(void *);
66 void *dev_id;
68 static struct cpm_action cpm_vecs[CPMVEC_NR];
69 static irqreturn_t cpm_interrupt(int irq, void * dev);
70 static irqreturn_t cpm_error_interrupt(int irq, void *dev);
71 static void alloc_host_memory(void);
72 /* Define a table of names to identify CPM interrupt handlers in
73 * /proc/interrupts.
75 const char *cpm_int_name[] =
76 { "error", "PC4", "PC5", "SMC2",
77 "SMC1", "SPI", "PC6", "Timer 4",
78 "", "PC7", "PC8", "PC9",
79 "Timer 3", "", "PC10", "PC11",
80 "I2C", "RISC Timer", "Timer 2", "",
81 "IDMA2", "IDMA1", "SDMA error", "PC12",
82 "PC13", "Timer 1", "PC14", "SCC4",
83 "SCC3", "SCC2", "SCC1", "PC15"
86 static void
87 cpm_mask_irq(unsigned int irq)
89 int cpm_vec = irq - CPM_IRQ_OFFSET;
91 clrbits32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr, (1 << cpm_vec));
94 static void
95 cpm_unmask_irq(unsigned int irq)
97 int cpm_vec = irq - CPM_IRQ_OFFSET;
99 setbits32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr, (1 << cpm_vec));
102 static void
103 cpm_ack(unsigned int irq)
105 /* We do not need to do anything here. */
108 static void
109 cpm_eoi(unsigned int irq)
111 int cpm_vec = irq - CPM_IRQ_OFFSET;
113 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cisr, (1 << cpm_vec));
116 struct hw_interrupt_type cpm_pic = {
117 .typename = " CPM ",
118 .enable = cpm_unmask_irq,
119 .disable = cpm_mask_irq,
120 .ack = cpm_ack,
121 .end = cpm_eoi,
124 void
125 m8xx_cpm_reset(void)
127 volatile immap_t *imp;
128 volatile cpm8xx_t *commproc;
130 imp = (immap_t *)IMAP_ADDR;
131 commproc = (cpm8xx_t *)&imp->im_cpm;
133 #ifdef CONFIG_UCODE_PATCH
134 /* Perform a reset.
136 commproc->cp_cpcr = (CPM_CR_RST | CPM_CR_FLG);
138 /* Wait for it.
140 while (commproc->cp_cpcr & CPM_CR_FLG);
142 cpm_load_patch(imp);
143 #endif
145 /* Set SDMA Bus Request priority 5.
146 * On 860T, this also enables FEC priority 6. I am not sure
147 * this is what we really want for some applications, but the
148 * manual recommends it.
149 * Bit 25, FAM can also be set to use FEC aggressive mode (860T).
151 out_be32(&imp->im_siu_conf.sc_sdcr, 1),
153 /* Reclaim the DP memory for our use. */
154 m8xx_cpm_dpinit();
156 /* Tell everyone where the comm processor resides.
158 cpmp = (cpm8xx_t *)commproc;
161 /* We used to do this earlier, but have to postpone as long as possible
162 * to ensure the kernel VM is now running.
164 static void
165 alloc_host_memory(void)
167 dma_addr_t physaddr;
169 /* Set the host page for allocation.
171 host_buffer = (uint)dma_alloc_coherent(NULL, PAGE_SIZE, &physaddr,
172 GFP_KERNEL);
173 host_end = host_buffer + PAGE_SIZE;
176 /* This is called during init_IRQ. We used to do it above, but this
177 * was too early since init_IRQ was not yet called.
179 static struct irqaction cpm_error_irqaction = {
180 .handler = cpm_error_interrupt,
181 .mask = CPU_MASK_NONE,
183 static struct irqaction cpm_interrupt_irqaction = {
184 .handler = cpm_interrupt,
185 .mask = CPU_MASK_NONE,
186 .name = "CPM cascade",
189 void
190 cpm_interrupt_init(void)
192 int i;
194 /* Initialize the CPM interrupt controller.
196 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr,
197 (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
198 ((CPM_INTERRUPT/2) << 13) | CICR_HP_MASK);
199 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr, 0);
201 /* install the CPM interrupt controller routines for the CPM
202 * interrupt vectors
204 for ( i = CPM_IRQ_OFFSET ; i < CPM_IRQ_OFFSET + NR_CPM_INTS ; i++ )
205 irq_desc[i].chip = &cpm_pic;
207 /* Set our interrupt handler with the core CPU. */
208 if (setup_irq(CPM_INTERRUPT, &cpm_interrupt_irqaction))
209 panic("Could not allocate CPM IRQ!");
211 /* Install our own error handler. */
212 cpm_error_irqaction.name = cpm_int_name[CPMVEC_ERROR];
213 if (setup_irq(CPM_IRQ_OFFSET + CPMVEC_ERROR, &cpm_error_irqaction))
214 panic("Could not allocate CPM error IRQ!");
216 setbits32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr, CICR_IEN);
220 * Get the CPM interrupt vector.
223 cpm_get_irq(void)
225 int cpm_vec;
227 /* Get the vector by setting the ACK bit and then reading
228 * the register.
230 out_be16(&((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_civr, 1);
231 cpm_vec = in_be16(&((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_civr);
232 cpm_vec >>= 11;
234 return cpm_vec;
237 /* CPM interrupt controller cascade interrupt.
239 static irqreturn_t
240 cpm_interrupt(int irq, void * dev)
242 /* This interrupt handler never actually gets called. It is
243 * installed only to unmask the CPM cascade interrupt in the SIU
244 * and to make the CPM cascade interrupt visible in /proc/interrupts.
246 return IRQ_HANDLED;
249 /* The CPM can generate the error interrupt when there is a race condition
250 * between generating and masking interrupts. All we have to do is ACK it
251 * and return. This is a no-op function so we don't need any special
252 * tests in the interrupt handler.
254 static irqreturn_t
255 cpm_error_interrupt(int irq, void *dev)
257 return IRQ_HANDLED;
260 /* A helper function to translate the handler prototype required by
261 * request_irq() to the handler prototype required by cpm_install_handler().
263 static irqreturn_t
264 cpm_handler_helper(int irq, void *dev_id)
266 int cpm_vec = irq - CPM_IRQ_OFFSET;
268 (*cpm_vecs[cpm_vec].handler)(dev_id);
270 return IRQ_HANDLED;
273 /* Install a CPM interrupt handler.
274 * This routine accepts a CPM interrupt vector in the range 0 to 31.
275 * This routine is retained for backward compatibility. Rather than using
276 * this routine to install a CPM interrupt handler, you can now use
277 * request_irq() with an IRQ in the range CPM_IRQ_OFFSET to
278 * CPM_IRQ_OFFSET + NR_CPM_INTS - 1 (16 to 47).
280 * Notice that the prototype of the interrupt handler function must be
281 * different depending on whether you install the handler with
282 * request_irq() or cpm_install_handler().
284 void
285 cpm_install_handler(int cpm_vec, void (*handler)(void *), void *dev_id)
287 int err;
289 /* If null handler, assume we are trying to free the IRQ.
291 if (!handler) {
292 free_irq(CPM_IRQ_OFFSET + cpm_vec, dev_id);
293 return;
296 if (cpm_vecs[cpm_vec].handler != 0)
297 printk(KERN_INFO "CPM interrupt %x replacing %x\n",
298 (uint)handler, (uint)cpm_vecs[cpm_vec].handler);
299 cpm_vecs[cpm_vec].handler = handler;
300 cpm_vecs[cpm_vec].dev_id = dev_id;
302 if ((err = request_irq(CPM_IRQ_OFFSET + cpm_vec, cpm_handler_helper,
303 0, cpm_int_name[cpm_vec], dev_id)))
304 printk(KERN_ERR "request_irq() returned %d for CPM vector %d\n",
305 err, cpm_vec);
308 /* Free a CPM interrupt handler.
309 * This routine accepts a CPM interrupt vector in the range 0 to 31.
310 * This routine is retained for backward compatibility.
312 void
313 cpm_free_handler(int cpm_vec)
315 request_irq(CPM_IRQ_OFFSET + cpm_vec, NULL, 0, 0,
316 cpm_vecs[cpm_vec].dev_id);
318 cpm_vecs[cpm_vec].handler = NULL;
319 cpm_vecs[cpm_vec].dev_id = NULL;
322 /* We also own one page of host buffer space for the allocation of
323 * UART "fifos" and the like.
325 uint
326 m8xx_cpm_hostalloc(uint size)
328 uint retloc;
330 if (host_buffer == 0)
331 alloc_host_memory();
333 if ((host_buffer + size) >= host_end)
334 return(0);
336 retloc = host_buffer;
337 host_buffer += size;
339 return(retloc);
342 /* Set a baud rate generator. This needs lots of work. There are
343 * four BRGs, any of which can be wired to any channel.
344 * The internal baud rate clock is the system clock divided by 16.
345 * This assumes the baudrate is 16x oversampled by the uart.
347 #define BRG_INT_CLK (((bd_t *)__res)->bi_intfreq)
348 #define BRG_UART_CLK (BRG_INT_CLK/16)
349 #define BRG_UART_CLK_DIV16 (BRG_UART_CLK/16)
351 void
352 cpm_setbrg(uint brg, uint rate)
354 volatile uint *bp;
356 /* This is good enough to get SMCs running.....
358 bp = (uint *)&cpmp->cp_brgc1;
359 bp += brg;
360 /* The BRG has a 12-bit counter. For really slow baud rates (or
361 * really fast processors), we may have to further divide by 16.
363 if (((BRG_UART_CLK / rate) - 1) < 4096)
364 *bp = (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN;
365 else
366 *bp = (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) |
367 CPM_BRG_EN | CPM_BRG_DIV16;
371 * dpalloc / dpfree bits.
373 static spinlock_t cpm_dpmem_lock;
375 * 16 blocks should be enough to satisfy all requests
376 * until the memory subsystem goes up...
378 static rh_block_t cpm_boot_dpmem_rh_block[16];
379 static rh_info_t cpm_dpmem_info;
381 #define CPM_DPMEM_ALIGNMENT 8
382 static u8* dpram_vbase;
383 static uint dpram_pbase;
385 void m8xx_cpm_dpinit(void)
387 spin_lock_init(&cpm_dpmem_lock);
389 dpram_vbase = immr_map_size(im_cpm.cp_dpmem, CPM_DATAONLY_BASE + CPM_DATAONLY_SIZE);
390 dpram_pbase = (uint)&((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem;
392 /* Initialize the info header */
393 rh_init(&cpm_dpmem_info, CPM_DPMEM_ALIGNMENT,
394 sizeof(cpm_boot_dpmem_rh_block) /
395 sizeof(cpm_boot_dpmem_rh_block[0]),
396 cpm_boot_dpmem_rh_block);
399 * Attach the usable dpmem area.
400 * XXX: This is actually crap. CPM_DATAONLY_BASE and
401 * CPM_DATAONLY_SIZE are a subset of the available dparm. It varies
402 * with the processor and the microcode patches applied / activated.
403 * But the following should be at least safe.
405 rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
409 * Allocate the requested size worth of DP memory.
410 * This function returns an offset into the DPRAM area.
411 * Use cpm_dpram_addr() to get the virtual address of the area.
413 unsigned long cpm_dpalloc(uint size, uint align)
415 unsigned long start;
416 unsigned long flags;
418 spin_lock_irqsave(&cpm_dpmem_lock, flags);
419 cpm_dpmem_info.alignment = align;
420 start = rh_alloc(&cpm_dpmem_info, size, "commproc");
421 spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
423 return start;
425 EXPORT_SYMBOL(cpm_dpalloc);
427 int cpm_dpfree(unsigned long offset)
429 int ret;
430 unsigned long flags;
432 spin_lock_irqsave(&cpm_dpmem_lock, flags);
433 ret = rh_free(&cpm_dpmem_info, offset);
434 spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
436 return ret;
438 EXPORT_SYMBOL(cpm_dpfree);
440 unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align)
442 unsigned long start;
443 unsigned long flags;
445 spin_lock_irqsave(&cpm_dpmem_lock, flags);
446 cpm_dpmem_info.alignment = align;
447 start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc");
448 spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
450 return start;
452 EXPORT_SYMBOL(cpm_dpalloc_fixed);
454 void cpm_dpdump(void)
456 rh_dump(&cpm_dpmem_info);
458 EXPORT_SYMBOL(cpm_dpdump);
460 void *cpm_dpram_addr(unsigned long offset)
462 return ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem + offset;
464 EXPORT_SYMBOL(cpm_dpram_addr);
466 uint cpm_dpram_phys(u8* addr)
468 return (dpram_pbase + (uint)(addr - dpram_vbase));
470 EXPORT_SYMBOL(cpm_dpram_phys);