2 * General Purpose functions for the global management of the
3 * Communication Processor Module.
4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
6 * In addition to the individual control of the communication
7 * channels, there are a few functions that globally affect the
8 * communication processor.
10 * Buffer descriptors must be allocated from the dual ported memory
11 * space. The allocator for that is here. When the communication
12 * process is reset, we reclaim the memory available. There is
13 * currently no deallocator for this memory.
14 * The amount of space available is platform dependent. On the
15 * MBX, the EPPC software loads additional microcode into the
16 * communication processor, and uses some of the DP ram for this
17 * purpose. Current, the first 512 bytes and the last 256 bytes of
18 * memory are used. Right now I am conservative and only use the
19 * memory that can never be used for microcode. If there are
20 * applications that require more DP ram, we can expand the boundaries
21 * but then we have to be careful of any downloaded microcode.
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/param.h>
28 #include <linux/string.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/module.h>
33 #include <asm/mpc8xx.h>
35 #include <asm/pgtable.h>
36 #include <asm/8xx_immap.h>
37 #include <asm/commproc.h>
39 #include <asm/tlbflush.h>
40 #include <asm/rheap.h>
42 static void m8xx_cpm_dpinit(void);
43 static uint host_buffer
; /* One page of host buffer */
44 static uint host_end
; /* end + 1 */
45 cpm8xx_t
*cpmp
; /* Pointer to comm processor space */
47 /* CPM interrupt vector functions.
50 void (*handler
)(void *, struct pt_regs
* regs
);
53 static struct cpm_action cpm_vecs
[CPMVEC_NR
];
54 static irqreturn_t
cpm_interrupt(int irq
, void * dev
, struct pt_regs
* regs
);
55 static irqreturn_t
cpm_error_interrupt(int irq
, void *dev
, struct pt_regs
* regs
);
56 static void alloc_host_memory(void);
57 /* Define a table of names to identify CPM interrupt handlers in
60 const char *cpm_int_name
[] =
61 { "error", "PC4", "PC5", "SMC2",
62 "SMC1", "SPI", "PC6", "Timer 4",
63 "", "PC7", "PC8", "PC9",
64 "Timer 3", "", "PC10", "PC11",
65 "I2C", "RISC Timer", "Timer 2", "",
66 "IDMA2", "IDMA1", "SDMA error", "PC12",
67 "PC13", "Timer 1", "PC14", "SCC4",
68 "SCC3", "SCC2", "SCC1", "PC15"
72 cpm_mask_irq(unsigned int irq
)
74 int cpm_vec
= irq
- CPM_IRQ_OFFSET
;
76 clrbits32(&((immap_t
*)IMAP_ADDR
)->im_cpic
.cpic_cimr
, (1 << cpm_vec
));
80 cpm_unmask_irq(unsigned int irq
)
82 int cpm_vec
= irq
- CPM_IRQ_OFFSET
;
84 setbits32(&((immap_t
*)IMAP_ADDR
)->im_cpic
.cpic_cimr
, (1 << cpm_vec
));
88 cpm_ack(unsigned int irq
)
90 /* We do not need to do anything here. */
94 cpm_eoi(unsigned int irq
)
96 int cpm_vec
= irq
- CPM_IRQ_OFFSET
;
98 out_be32(&((immap_t
*)IMAP_ADDR
)->im_cpic
.cpic_cisr
, (1 << cpm_vec
));
101 struct hw_interrupt_type cpm_pic
= {
103 .enable
= cpm_unmask_irq
,
104 .disable
= cpm_mask_irq
,
112 volatile immap_t
*imp
;
113 volatile cpm8xx_t
*commproc
;
115 imp
= (immap_t
*)IMAP_ADDR
;
116 commproc
= (cpm8xx_t
*)&imp
->im_cpm
;
118 #ifdef CONFIG_UCODE_PATCH
121 commproc
->cp_cpcr
= (CPM_CR_RST
| CPM_CR_FLG
);
125 while (commproc
->cp_cpcr
& CPM_CR_FLG
);
130 /* Set SDMA Bus Request priority 5.
131 * On 860T, this also enables FEC priority 6. I am not sure
132 * this is what we realy want for some applications, but the
133 * manual recommends it.
134 * Bit 25, FAM can also be set to use FEC aggressive mode (860T).
136 out_be32(&imp
->im_siu_conf
.sc_sdcr
, 1),
138 /* Reclaim the DP memory for our use. */
141 /* Tell everyone where the comm processor resides.
143 cpmp
= (cpm8xx_t
*)commproc
;
146 /* We used to do this earlier, but have to postpone as long as possible
147 * to ensure the kernel VM is now running.
150 alloc_host_memory(void)
154 /* Set the host page for allocation.
156 host_buffer
= (uint
)dma_alloc_coherent(NULL
, PAGE_SIZE
, &physaddr
,
158 host_end
= host_buffer
+ PAGE_SIZE
;
161 /* This is called during init_IRQ. We used to do it above, but this
162 * was too early since init_IRQ was not yet called.
164 static struct irqaction cpm_error_irqaction
= {
165 .handler
= cpm_error_interrupt
,
166 .mask
= CPU_MASK_NONE
,
168 static struct irqaction cpm_interrupt_irqaction
= {
169 .handler
= cpm_interrupt
,
170 .mask
= CPU_MASK_NONE
,
171 .name
= "CPM cascade",
175 cpm_interrupt_init(void)
179 /* Initialize the CPM interrupt controller.
181 out_be32(&((immap_t
*)IMAP_ADDR
)->im_cpic
.cpic_cicr
,
182 (CICR_SCD_SCC4
| CICR_SCC_SCC3
| CICR_SCB_SCC2
| CICR_SCA_SCC1
) |
183 ((CPM_INTERRUPT
/2) << 13) | CICR_HP_MASK
);
184 out_be32(&((immap_t
*)IMAP_ADDR
)->im_cpic
.cpic_cimr
, 0);
186 /* install the CPM interrupt controller routines for the CPM
189 for ( i
= CPM_IRQ_OFFSET
; i
< CPM_IRQ_OFFSET
+ NR_CPM_INTS
; i
++ )
190 irq_desc
[i
].chip
= &cpm_pic
;
192 /* Set our interrupt handler with the core CPU. */
193 if (setup_irq(CPM_INTERRUPT
, &cpm_interrupt_irqaction
))
194 panic("Could not allocate CPM IRQ!");
196 /* Install our own error handler. */
197 cpm_error_irqaction
.name
= cpm_int_name
[CPMVEC_ERROR
];
198 if (setup_irq(CPM_IRQ_OFFSET
+ CPMVEC_ERROR
, &cpm_error_irqaction
))
199 panic("Could not allocate CPM error IRQ!");
201 setbits32(&((immap_t
*)IMAP_ADDR
)->im_cpic
.cpic_cicr
, CICR_IEN
);
205 * Get the CPM interrupt vector.
208 cpm_get_irq(struct pt_regs
*regs
)
212 /* Get the vector by setting the ACK bit and then reading
215 out_be16(&((volatile immap_t
*)IMAP_ADDR
)->im_cpic
.cpic_civr
, 1);
216 cpm_vec
= in_be16(&((volatile immap_t
*)IMAP_ADDR
)->im_cpic
.cpic_civr
);
222 /* CPM interrupt controller cascade interrupt.
225 cpm_interrupt(int irq
, void * dev
, struct pt_regs
* regs
)
227 /* This interrupt handler never actually gets called. It is
228 * installed only to unmask the CPM cascade interrupt in the SIU
229 * and to make the CPM cascade interrupt visible in /proc/interrupts.
234 /* The CPM can generate the error interrupt when there is a race condition
235 * between generating and masking interrupts. All we have to do is ACK it
236 * and return. This is a no-op function so we don't need any special
237 * tests in the interrupt handler.
240 cpm_error_interrupt(int irq
, void *dev
, struct pt_regs
*regs
)
245 /* A helper function to translate the handler prototype required by
246 * request_irq() to the handler prototype required by cpm_install_handler().
249 cpm_handler_helper(int irq
, void *dev_id
, struct pt_regs
*regs
)
251 int cpm_vec
= irq
- CPM_IRQ_OFFSET
;
253 (*cpm_vecs
[cpm_vec
].handler
)(dev_id
, regs
);
258 /* Install a CPM interrupt handler.
259 * This routine accepts a CPM interrupt vector in the range 0 to 31.
260 * This routine is retained for backward compatibility. Rather than using
261 * this routine to install a CPM interrupt handler, you can now use
262 * request_irq() with an IRQ in the range CPM_IRQ_OFFSET to
263 * CPM_IRQ_OFFSET + NR_CPM_INTS - 1 (16 to 47).
265 * Notice that the prototype of the interrupt handler function must be
266 * different depending on whether you install the handler with
267 * request_irq() or cpm_install_handler().
270 cpm_install_handler(int cpm_vec
, void (*handler
)(void *, struct pt_regs
*regs
),
275 /* If null handler, assume we are trying to free the IRQ.
278 free_irq(CPM_IRQ_OFFSET
+ cpm_vec
, dev_id
);
282 if (cpm_vecs
[cpm_vec
].handler
!= 0)
283 printk(KERN_INFO
"CPM interrupt %x replacing %x\n",
284 (uint
)handler
, (uint
)cpm_vecs
[cpm_vec
].handler
);
285 cpm_vecs
[cpm_vec
].handler
= handler
;
286 cpm_vecs
[cpm_vec
].dev_id
= dev_id
;
288 if ((err
= request_irq(CPM_IRQ_OFFSET
+ cpm_vec
, cpm_handler_helper
,
289 0, cpm_int_name
[cpm_vec
], dev_id
)))
290 printk(KERN_ERR
"request_irq() returned %d for CPM vector %d\n",
294 /* Free a CPM interrupt handler.
295 * This routine accepts a CPM interrupt vector in the range 0 to 31.
296 * This routine is retained for backward compatibility.
299 cpm_free_handler(int cpm_vec
)
301 request_irq(CPM_IRQ_OFFSET
+ cpm_vec
, NULL
, 0, 0,
302 cpm_vecs
[cpm_vec
].dev_id
);
304 cpm_vecs
[cpm_vec
].handler
= NULL
;
305 cpm_vecs
[cpm_vec
].dev_id
= NULL
;
308 /* We also own one page of host buffer space for the allocation of
309 * UART "fifos" and the like.
312 m8xx_cpm_hostalloc(uint size
)
316 if (host_buffer
== 0)
319 if ((host_buffer
+ size
) >= host_end
)
322 retloc
= host_buffer
;
328 /* Set a baud rate generator. This needs lots of work. There are
329 * four BRGs, any of which can be wired to any channel.
330 * The internal baud rate clock is the system clock divided by 16.
331 * This assumes the baudrate is 16x oversampled by the uart.
333 #define BRG_INT_CLK (((bd_t *)__res)->bi_intfreq)
334 #define BRG_UART_CLK (BRG_INT_CLK/16)
335 #define BRG_UART_CLK_DIV16 (BRG_UART_CLK/16)
338 cpm_setbrg(uint brg
, uint rate
)
342 /* This is good enough to get SMCs running.....
344 bp
= (uint
*)&cpmp
->cp_brgc1
;
346 /* The BRG has a 12-bit counter. For really slow baud rates (or
347 * really fast processors), we may have to further divide by 16.
349 if (((BRG_UART_CLK
/ rate
) - 1) < 4096)
350 *bp
= (((BRG_UART_CLK
/ rate
) - 1) << 1) | CPM_BRG_EN
;
352 *bp
= (((BRG_UART_CLK_DIV16
/ rate
) - 1) << 1) |
353 CPM_BRG_EN
| CPM_BRG_DIV16
;
357 * dpalloc / dpfree bits.
359 static spinlock_t cpm_dpmem_lock
;
361 * 16 blocks should be enough to satisfy all requests
362 * until the memory subsystem goes up...
364 static rh_block_t cpm_boot_dpmem_rh_block
[16];
365 static rh_info_t cpm_dpmem_info
;
367 #define CPM_DPMEM_ALIGNMENT 8
369 void m8xx_cpm_dpinit(void)
371 spin_lock_init(&cpm_dpmem_lock
);
373 /* Initialize the info header */
374 rh_init(&cpm_dpmem_info
, CPM_DPMEM_ALIGNMENT
,
375 sizeof(cpm_boot_dpmem_rh_block
) /
376 sizeof(cpm_boot_dpmem_rh_block
[0]),
377 cpm_boot_dpmem_rh_block
);
380 * Attach the usable dpmem area.
381 * XXX: This is actually crap. CPM_DATAONLY_BASE and
382 * CPM_DATAONLY_SIZE are a subset of the available dparm. It varies
383 * with the processor and the microcode patches applied / activated.
384 * But the following should be at least safe.
386 rh_attach_region(&cpm_dpmem_info
, (void *)CPM_DATAONLY_BASE
, CPM_DATAONLY_SIZE
);
390 * Allocate the requested size worth of DP memory.
391 * This function returns an offset into the DPRAM area.
392 * Use cpm_dpram_addr() to get the virtual address of the area.
394 uint
cpm_dpalloc(uint size
, uint align
)
399 spin_lock_irqsave(&cpm_dpmem_lock
, flags
);
400 cpm_dpmem_info
.alignment
= align
;
401 start
= rh_alloc(&cpm_dpmem_info
, size
, "commproc");
402 spin_unlock_irqrestore(&cpm_dpmem_lock
, flags
);
406 EXPORT_SYMBOL(cpm_dpalloc
);
408 int cpm_dpfree(uint offset
)
413 spin_lock_irqsave(&cpm_dpmem_lock
, flags
);
414 ret
= rh_free(&cpm_dpmem_info
, (void *)offset
);
415 spin_unlock_irqrestore(&cpm_dpmem_lock
, flags
);
419 EXPORT_SYMBOL(cpm_dpfree
);
421 uint
cpm_dpalloc_fixed(uint offset
, uint size
, uint align
)
426 spin_lock_irqsave(&cpm_dpmem_lock
, flags
);
427 cpm_dpmem_info
.alignment
= align
;
428 start
= rh_alloc_fixed(&cpm_dpmem_info
, (void *)offset
, size
, "commproc");
429 spin_unlock_irqrestore(&cpm_dpmem_lock
, flags
);
433 EXPORT_SYMBOL(cpm_dpalloc_fixed
);
435 void cpm_dpdump(void)
437 rh_dump(&cpm_dpmem_info
);
439 EXPORT_SYMBOL(cpm_dpdump
);
441 void *cpm_dpram_addr(uint offset
)
443 return ((immap_t
*)IMAP_ADDR
)->im_cpm
.cp_dpmem
+ offset
;
445 EXPORT_SYMBOL(cpm_dpram_addr
);