Linux 3.16-rc2
[linux/fpc-iii.git] / arch / cris / arch-v32 / drivers / sync_serial.c
blobbbb806b68838e074660c830931955ff355502c7e
1 /*
2 * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
4 * Copyright (c) 2005 Axis Communications AB
6 * Author: Mikael Starvik
8 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/major.h>
15 #include <linux/sched.h>
16 #include <linux/mutex.h>
17 #include <linux/interrupt.h>
18 #include <linux/poll.h>
19 #include <linux/init.h>
20 #include <linux/timer.h>
21 #include <linux/spinlock.h>
22 #include <linux/wait.h>
24 #include <asm/io.h>
25 #include <dma.h>
26 #include <pinmux.h>
27 #include <hwregs/reg_rdwr.h>
28 #include <hwregs/sser_defs.h>
29 #include <hwregs/dma_defs.h>
30 #include <hwregs/dma.h>
31 #include <hwregs/intr_vect_defs.h>
32 #include <hwregs/intr_vect.h>
33 #include <hwregs/reg_map.h>
34 #include <asm/sync_serial.h>
37 /* The receiver is a bit tricky because of the continuous stream of data.*/
38 /* */
39 /* Three DMA descriptors are linked together. Each DMA descriptor is */
40 /* responsible for port->bufchunk of a common buffer. */
41 /* */
42 /* +---------------------------------------------+ */
43 /* | +----------+ +----------+ +----------+ | */
44 /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
45 /* +----------+ +----------+ +----------+ */
46 /* | | | */
47 /* v v v */
48 /* +-------------------------------------+ */
49 /* | BUFFER | */
50 /* +-------------------------------------+ */
51 /* |<- data_avail ->| */
52 /* readp writep */
53 /* */
54 /* If the application keeps up the pace readp will be right after writep.*/
55 /* If the application can't keep the pace we have to throw away data. */
56 /* The idea is that readp should be ready with the data pointed out by */
57 /* Descr[i] when the DMA has filled in Descr[i+1]. */
58 /* Otherwise we will discard */
59 /* the rest of the data pointed out by Descr1 and set readp to the start */
60 /* of Descr2 */
62 #define SYNC_SERIAL_MAJOR 125
64 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
65 /* words can be handled */
66 #define IN_BUFFER_SIZE 12288
67 #define IN_DESCR_SIZE 256
68 #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
70 #define OUT_BUFFER_SIZE 1024*8
71 #define NBR_OUT_DESCR 8
73 #define DEFAULT_FRAME_RATE 0
74 #define DEFAULT_WORD_RATE 7
76 /* NOTE: Enabling some debug will likely cause overrun or underrun,
77 * especially if manual mode is use.
79 #define DEBUG(x)
80 #define DEBUGREAD(x)
81 #define DEBUGWRITE(x)
82 #define DEBUGPOLL(x)
83 #define DEBUGRXINT(x)
84 #define DEBUGTXINT(x)
85 #define DEBUGTRDMA(x)
86 #define DEBUGOUTBUF(x)
88 typedef struct sync_port
90 reg_scope_instances regi_sser;
91 reg_scope_instances regi_dmain;
92 reg_scope_instances regi_dmaout;
94 char started; /* 1 if port has been started */
95 char port_nbr; /* Port 0 or 1 */
96 char busy; /* 1 if port is busy */
98 char enabled; /* 1 if port is enabled */
99 char use_dma; /* 1 if port uses dma */
100 char tr_running;
102 char init_irqs;
103 int output;
104 int input;
106 /* Next byte to be read by application */
107 volatile unsigned char *volatile readp;
108 /* Next byte to be written by etrax */
109 volatile unsigned char *volatile writep;
111 unsigned int in_buffer_size;
112 unsigned int inbufchunk;
113 unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
114 unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
115 unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
116 struct dma_descr_data* next_rx_desc;
117 struct dma_descr_data* prev_rx_desc;
119 /* Pointer to the first available descriptor in the ring,
120 * unless active_tr_descr == catch_tr_descr and a dma
121 * transfer is active */
122 struct dma_descr_data *active_tr_descr;
124 /* Pointer to the first allocated descriptor in the ring */
125 struct dma_descr_data *catch_tr_descr;
127 /* Pointer to the descriptor with the current end-of-list */
128 struct dma_descr_data *prev_tr_descr;
129 int full;
131 /* Pointer to the first byte being read by DMA
132 * or current position in out_buffer if not using DMA. */
133 unsigned char *out_rd_ptr;
135 /* Number of bytes currently locked for being read by DMA */
136 int out_buf_count;
138 dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16)));
139 dma_descr_context in_context __attribute__ ((__aligned__(32)));
140 dma_descr_data out_descr[NBR_OUT_DESCR]
141 __attribute__ ((__aligned__(16)));
142 dma_descr_context out_context __attribute__ ((__aligned__(32)));
143 wait_queue_head_t out_wait_q;
144 wait_queue_head_t in_wait_q;
146 spinlock_t lock;
147 } sync_port;
149 static DEFINE_MUTEX(sync_serial_mutex);
150 static int etrax_sync_serial_init(void);
151 static void initialize_port(int portnbr);
152 static inline int sync_data_avail(struct sync_port *port);
154 static int sync_serial_open(struct inode *, struct file*);
155 static int sync_serial_release(struct inode*, struct file*);
156 static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
158 static int sync_serial_ioctl(struct file *,
159 unsigned int cmd, unsigned long arg);
160 static ssize_t sync_serial_write(struct file * file, const char * buf,
161 size_t count, loff_t *ppos);
162 static ssize_t sync_serial_read(struct file *file, char *buf,
163 size_t count, loff_t *ppos);
165 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
166 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
167 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
168 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
169 #define SYNC_SER_DMA
170 #endif
172 static void send_word(sync_port* port);
173 static void start_dma_out(struct sync_port *port, const char *data, int count);
174 static void start_dma_in(sync_port* port);
175 #ifdef SYNC_SER_DMA
176 static irqreturn_t tr_interrupt(int irq, void *dev_id);
177 static irqreturn_t rx_interrupt(int irq, void *dev_id);
178 #endif
180 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
181 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
182 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
183 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
184 #define SYNC_SER_MANUAL
185 #endif
186 #ifdef SYNC_SER_MANUAL
187 static irqreturn_t manual_interrupt(int irq, void *dev_id);
188 #endif
190 #ifdef CONFIG_ETRAXFS /* ETRAX FS */
191 #define OUT_DMA_NBR 4
192 #define IN_DMA_NBR 5
193 #define PINMUX_SSER pinmux_sser0
194 #define SYNCSER_INST regi_sser0
195 #define SYNCSER_INTR_VECT SSER0_INTR_VECT
196 #define OUT_DMA_INST regi_dma4
197 #define IN_DMA_INST regi_dma5
198 #define DMA_OUT_INTR_VECT DMA4_INTR_VECT
199 #define DMA_IN_INTR_VECT DMA5_INTR_VECT
200 #define REQ_DMA_SYNCSER dma_sser0
201 #else /* Artpec-3 */
202 #define OUT_DMA_NBR 6
203 #define IN_DMA_NBR 7
204 #define PINMUX_SSER pinmux_sser
205 #define SYNCSER_INST regi_sser
206 #define SYNCSER_INTR_VECT SSER_INTR_VECT
207 #define OUT_DMA_INST regi_dma6
208 #define IN_DMA_INST regi_dma7
209 #define DMA_OUT_INTR_VECT DMA6_INTR_VECT
210 #define DMA_IN_INTR_VECT DMA7_INTR_VECT
211 #define REQ_DMA_SYNCSER dma_sser
212 #endif
214 /* The ports */
215 static struct sync_port ports[]=
218 .regi_sser = SYNCSER_INST,
219 .regi_dmaout = OUT_DMA_INST,
220 .regi_dmain = IN_DMA_INST,
221 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
222 .use_dma = 1,
223 #else
224 .use_dma = 0,
225 #endif
227 #ifdef CONFIG_ETRAXFS
231 .regi_sser = regi_sser1,
232 .regi_dmaout = regi_dma6,
233 .regi_dmain = regi_dma7,
234 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
235 .use_dma = 1,
236 #else
237 .use_dma = 0,
238 #endif
240 #endif
243 #define NBR_PORTS ARRAY_SIZE(ports)
245 static const struct file_operations sync_serial_fops = {
246 .owner = THIS_MODULE,
247 .write = sync_serial_write,
248 .read = sync_serial_read,
249 .poll = sync_serial_poll,
250 .unlocked_ioctl = sync_serial_ioctl,
251 .open = sync_serial_open,
252 .release = sync_serial_release,
253 .llseek = noop_llseek,
256 static int __init etrax_sync_serial_init(void)
258 ports[0].enabled = 0;
259 #ifdef CONFIG_ETRAXFS
260 ports[1].enabled = 0;
261 #endif
262 if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
263 &sync_serial_fops) < 0) {
264 printk(KERN_WARNING
265 "Unable to get major for synchronous serial port\n");
266 return -EBUSY;
269 /* Initialize Ports */
270 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
271 if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
272 printk(KERN_WARNING
273 "Unable to alloc pins for synchronous serial port 0\n");
274 return -EIO;
276 ports[0].enabled = 1;
277 initialize_port(0);
278 #endif
280 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
281 if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
282 printk(KERN_WARNING
283 "Unable to alloc pins for synchronous serial port 0\n");
284 return -EIO;
286 ports[1].enabled = 1;
287 initialize_port(1);
288 #endif
290 #ifdef CONFIG_ETRAXFS
291 printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
292 #else
293 printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
294 #endif
295 return 0;
298 static void __init initialize_port(int portnbr)
300 int __attribute__((unused)) i;
301 struct sync_port *port = &ports[portnbr];
302 reg_sser_rw_cfg cfg = {0};
303 reg_sser_rw_frm_cfg frm_cfg = {0};
304 reg_sser_rw_tr_cfg tr_cfg = {0};
305 reg_sser_rw_rec_cfg rec_cfg = {0};
307 DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr));
309 port->port_nbr = portnbr;
310 port->init_irqs = 1;
312 port->out_rd_ptr = port->out_buffer;
313 port->out_buf_count = 0;
315 port->output = 1;
316 port->input = 0;
318 port->readp = port->flip;
319 port->writep = port->flip;
320 port->in_buffer_size = IN_BUFFER_SIZE;
321 port->inbufchunk = IN_DESCR_SIZE;
322 port->next_rx_desc = &port->in_descr[0];
323 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
324 port->prev_rx_desc->eol = 1;
326 init_waitqueue_head(&port->out_wait_q);
327 init_waitqueue_head(&port->in_wait_q);
329 spin_lock_init(&port->lock);
331 cfg.out_clk_src = regk_sser_intern_clk;
332 cfg.out_clk_pol = regk_sser_pos;
333 cfg.clk_od_mode = regk_sser_no;
334 cfg.clk_dir = regk_sser_out;
335 cfg.gate_clk = regk_sser_no;
336 cfg.base_freq = regk_sser_f29_493;
337 cfg.clk_div = 256;
338 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
340 frm_cfg.wordrate = DEFAULT_WORD_RATE;
341 frm_cfg.type = regk_sser_edge;
342 frm_cfg.frame_pin_dir = regk_sser_out;
343 frm_cfg.frame_pin_use = regk_sser_frm;
344 frm_cfg.status_pin_dir = regk_sser_in;
345 frm_cfg.status_pin_use = regk_sser_hold;
346 frm_cfg.out_on = regk_sser_tr;
347 frm_cfg.tr_delay = 1;
348 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
350 tr_cfg.urun_stop = regk_sser_no;
351 tr_cfg.sample_size = 7;
352 tr_cfg.sh_dir = regk_sser_msbfirst;
353 tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
354 #if 0
355 tr_cfg.rate_ctrl = regk_sser_bulk;
356 tr_cfg.data_pin_use = regk_sser_dout;
357 #else
358 tr_cfg.rate_ctrl = regk_sser_iso;
359 tr_cfg.data_pin_use = regk_sser_dout;
360 #endif
361 tr_cfg.bulk_wspace = 1;
362 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
364 rec_cfg.sample_size = 7;
365 rec_cfg.sh_dir = regk_sser_msbfirst;
366 rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
367 rec_cfg.fifo_thr = regk_sser_inf;
368 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
370 #ifdef SYNC_SER_DMA
371 /* Setup the descriptor ring for dma out/transmit. */
372 for (i = 0; i < NBR_OUT_DESCR; i++) {
373 port->out_descr[i].wait = 0;
374 port->out_descr[i].intr = 1;
375 port->out_descr[i].eol = 0;
376 port->out_descr[i].out_eop = 0;
377 port->out_descr[i].next =
378 (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]);
381 /* Create a ring from the list. */
382 port->out_descr[NBR_OUT_DESCR-1].next =
383 (dma_descr_data *)virt_to_phys(&port->out_descr[0]);
385 /* Setup context for traversing the ring. */
386 port->active_tr_descr = &port->out_descr[0];
387 port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1];
388 port->catch_tr_descr = &port->out_descr[0];
389 #endif
392 static inline int sync_data_avail(struct sync_port *port)
394 int avail;
395 unsigned char *start;
396 unsigned char *end;
398 start = (unsigned char*)port->readp; /* cast away volatile */
399 end = (unsigned char*)port->writep; /* cast away volatile */
400 /* 0123456789 0123456789
401 * ----- - -----
402 * ^rp ^wp ^wp ^rp
405 if (end >= start)
406 avail = end - start;
407 else
408 avail = port->in_buffer_size - (start - end);
409 return avail;
412 static inline int sync_data_avail_to_end(struct sync_port *port)
414 int avail;
415 unsigned char *start;
416 unsigned char *end;
418 start = (unsigned char*)port->readp; /* cast away volatile */
419 end = (unsigned char*)port->writep; /* cast away volatile */
420 /* 0123456789 0123456789
421 * ----- -----
422 * ^rp ^wp ^wp ^rp
425 if (end >= start)
426 avail = end - start;
427 else
428 avail = port->flip + port->in_buffer_size - start;
429 return avail;
432 static int sync_serial_open(struct inode *inode, struct file *file)
434 int dev = iminor(inode);
435 int ret = -EBUSY;
436 sync_port *port;
437 reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
438 reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
440 mutex_lock(&sync_serial_mutex);
441 DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
443 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
445 DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
446 ret = -ENODEV;
447 goto out;
449 port = &ports[dev];
450 /* Allow open this device twice (assuming one reader and one writer) */
451 if (port->busy == 2)
453 DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
454 goto out;
458 if (port->init_irqs) {
459 if (port->use_dma) {
460 if (port == &ports[0]) {
461 #ifdef SYNC_SER_DMA
462 if (request_irq(DMA_OUT_INTR_VECT,
463 tr_interrupt,
465 "synchronous serial 0 dma tr",
466 &ports[0])) {
467 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
468 goto out;
469 } else if (request_irq(DMA_IN_INTR_VECT,
470 rx_interrupt,
472 "synchronous serial 1 dma rx",
473 &ports[0])) {
474 free_irq(DMA_OUT_INTR_VECT, &port[0]);
475 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
476 goto out;
477 } else if (crisv32_request_dma(OUT_DMA_NBR,
478 "synchronous serial 0 dma tr",
479 DMA_VERBOSE_ON_ERROR,
481 REQ_DMA_SYNCSER)) {
482 free_irq(DMA_OUT_INTR_VECT, &port[0]);
483 free_irq(DMA_IN_INTR_VECT, &port[0]);
484 printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
485 goto out;
486 } else if (crisv32_request_dma(IN_DMA_NBR,
487 "synchronous serial 0 dma rec",
488 DMA_VERBOSE_ON_ERROR,
490 REQ_DMA_SYNCSER)) {
491 crisv32_free_dma(OUT_DMA_NBR);
492 free_irq(DMA_OUT_INTR_VECT, &port[0]);
493 free_irq(DMA_IN_INTR_VECT, &port[0]);
494 printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
495 goto out;
497 #endif
499 #ifdef CONFIG_ETRAXFS
500 else if (port == &ports[1]) {
501 #ifdef SYNC_SER_DMA
502 if (request_irq(DMA6_INTR_VECT,
503 tr_interrupt,
505 "synchronous serial 1 dma tr",
506 &ports[1])) {
507 printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
508 goto out;
509 } else if (request_irq(DMA7_INTR_VECT,
510 rx_interrupt,
512 "synchronous serial 1 dma rx",
513 &ports[1])) {
514 free_irq(DMA6_INTR_VECT, &ports[1]);
515 printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
516 goto out;
517 } else if (crisv32_request_dma(
518 SYNC_SER1_TX_DMA_NBR,
519 "synchronous serial 1 dma tr",
520 DMA_VERBOSE_ON_ERROR,
522 dma_sser1)) {
523 free_irq(DMA6_INTR_VECT, &ports[1]);
524 free_irq(DMA7_INTR_VECT, &ports[1]);
525 printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
526 goto out;
527 } else if (crisv32_request_dma(
528 SYNC_SER1_RX_DMA_NBR,
529 "synchronous serial 3 dma rec",
530 DMA_VERBOSE_ON_ERROR,
532 dma_sser1)) {
533 crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
534 free_irq(DMA6_INTR_VECT, &ports[1]);
535 free_irq(DMA7_INTR_VECT, &ports[1]);
536 printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
537 goto out;
539 #endif
541 #endif
542 /* Enable DMAs */
543 REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
544 REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
545 /* Enable DMA IRQs */
546 REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
547 REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
548 /* Set up wordsize = 1 for DMAs. */
549 DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
550 DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
552 start_dma_in(port);
553 port->init_irqs = 0;
554 } else { /* !port->use_dma */
555 #ifdef SYNC_SER_MANUAL
556 if (port == &ports[0]) {
557 if (request_irq(SYNCSER_INTR_VECT,
558 manual_interrupt,
560 "synchronous serial manual irq",
561 &ports[0])) {
562 printk("Can't allocate sync serial manual irq");
563 goto out;
566 #ifdef CONFIG_ETRAXFS
567 else if (port == &ports[1]) {
568 if (request_irq(SSER1_INTR_VECT,
569 manual_interrupt,
571 "synchronous serial manual irq",
572 &ports[1])) {
573 printk(KERN_CRIT "Can't allocate sync serial manual irq");
574 goto out;
577 #endif
578 port->init_irqs = 0;
579 #else
580 panic("sync_serial: Manual mode not supported.\n");
581 #endif /* SYNC_SER_MANUAL */
584 } /* port->init_irqs */
586 port->busy++;
587 ret = 0;
588 out:
589 mutex_unlock(&sync_serial_mutex);
590 return ret;
593 static int sync_serial_release(struct inode *inode, struct file *file)
595 int dev = iminor(inode);
596 sync_port *port;
598 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
600 DEBUG(printk("Invalid minor %d\n", dev));
601 return -ENODEV;
603 port = &ports[dev];
604 if (port->busy)
605 port->busy--;
606 if (!port->busy)
607 /* XXX */ ;
608 return 0;
611 static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
613 int dev = iminor(file_inode(file));
614 unsigned int mask = 0;
615 sync_port *port;
616 DEBUGPOLL( static unsigned int prev_mask = 0; );
618 port = &ports[dev];
620 if (!port->started) {
621 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
622 reg_sser_rw_rec_cfg rec_cfg =
623 REG_RD(sser, port->regi_sser, rw_rec_cfg);
624 cfg.en = regk_sser_yes;
625 rec_cfg.rec_en = port->input;
626 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
627 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
628 port->started = 1;
631 poll_wait(file, &port->out_wait_q, wait);
632 poll_wait(file, &port->in_wait_q, wait);
634 /* No active transfer, descriptors are available */
635 if (port->output && !port->tr_running)
636 mask |= POLLOUT | POLLWRNORM;
638 /* Descriptor and buffer space available. */
639 if (port->output &&
640 port->active_tr_descr != port->catch_tr_descr &&
641 port->out_buf_count < OUT_BUFFER_SIZE)
642 mask |= POLLOUT | POLLWRNORM;
644 /* At least an inbufchunk of data */
645 if (port->input && sync_data_avail(port) >= port->inbufchunk)
646 mask |= POLLIN | POLLRDNORM;
648 DEBUGPOLL(if (mask != prev_mask)
649 printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
650 mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
651 prev_mask = mask;
653 return mask;
656 static int sync_serial_ioctl(struct file *file,
657 unsigned int cmd, unsigned long arg)
659 int return_val = 0;
660 int dma_w_size = regk_dma_set_w_size1;
661 int dev = iminor(file_inode(file));
662 sync_port *port;
663 reg_sser_rw_tr_cfg tr_cfg;
664 reg_sser_rw_rec_cfg rec_cfg;
665 reg_sser_rw_frm_cfg frm_cfg;
666 reg_sser_rw_cfg gen_cfg;
667 reg_sser_rw_intr_mask intr_mask;
669 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
671 DEBUG(printk("Invalid minor %d\n", dev));
672 return -1;
674 port = &ports[dev];
675 spin_lock_irq(&port->lock);
677 tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
678 rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
679 frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
680 gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
681 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
683 switch(cmd)
685 case SSP_SPEED:
686 if (GET_SPEED(arg) == CODEC)
688 unsigned int freq;
690 gen_cfg.base_freq = regk_sser_f32;
692 /* Clock divider will internally be
693 * gen_cfg.clk_div + 1.
696 freq = GET_FREQ(arg);
697 switch (freq) {
698 case FREQ_32kHz:
699 case FREQ_64kHz:
700 case FREQ_128kHz:
701 case FREQ_256kHz:
702 gen_cfg.clk_div = 125 *
703 (1 << (freq - FREQ_256kHz)) - 1;
704 break;
705 case FREQ_512kHz:
706 gen_cfg.clk_div = 62;
707 break;
708 case FREQ_1MHz:
709 case FREQ_2MHz:
710 case FREQ_4MHz:
711 gen_cfg.clk_div = 8 * (1 << freq) - 1;
712 break;
714 } else {
715 gen_cfg.base_freq = regk_sser_f29_493;
716 switch (GET_SPEED(arg)) {
717 case SSP150:
718 gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
719 break;
720 case SSP300:
721 gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
722 break;
723 case SSP600:
724 gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
725 break;
726 case SSP1200:
727 gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
728 break;
729 case SSP2400:
730 gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
731 break;
732 case SSP4800:
733 gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
734 break;
735 case SSP9600:
736 gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
737 break;
738 case SSP19200:
739 gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
740 break;
741 case SSP28800:
742 gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
743 break;
744 case SSP57600:
745 gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
746 break;
747 case SSP115200:
748 gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
749 break;
750 case SSP230400:
751 gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
752 break;
753 case SSP460800:
754 gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
755 break;
756 case SSP921600:
757 gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
758 break;
759 case SSP3125000:
760 gen_cfg.base_freq = regk_sser_f100;
761 gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
762 break;
766 frm_cfg.wordrate = GET_WORD_RATE(arg);
768 break;
769 case SSP_MODE:
770 switch(arg)
772 case MASTER_OUTPUT:
773 port->output = 1;
774 port->input = 0;
775 frm_cfg.out_on = regk_sser_tr;
776 frm_cfg.frame_pin_dir = regk_sser_out;
777 gen_cfg.clk_dir = regk_sser_out;
778 break;
779 case SLAVE_OUTPUT:
780 port->output = 1;
781 port->input = 0;
782 frm_cfg.frame_pin_dir = regk_sser_in;
783 gen_cfg.clk_dir = regk_sser_in;
784 break;
785 case MASTER_INPUT:
786 port->output = 0;
787 port->input = 1;
788 frm_cfg.frame_pin_dir = regk_sser_out;
789 frm_cfg.out_on = regk_sser_intern_tb;
790 gen_cfg.clk_dir = regk_sser_out;
791 break;
792 case SLAVE_INPUT:
793 port->output = 0;
794 port->input = 1;
795 frm_cfg.frame_pin_dir = regk_sser_in;
796 gen_cfg.clk_dir = regk_sser_in;
797 break;
798 case MASTER_BIDIR:
799 port->output = 1;
800 port->input = 1;
801 frm_cfg.frame_pin_dir = regk_sser_out;
802 frm_cfg.out_on = regk_sser_intern_tb;
803 gen_cfg.clk_dir = regk_sser_out;
804 break;
805 case SLAVE_BIDIR:
806 port->output = 1;
807 port->input = 1;
808 frm_cfg.frame_pin_dir = regk_sser_in;
809 gen_cfg.clk_dir = regk_sser_in;
810 break;
811 default:
812 spin_unlock_irq(&port->lock);
813 return -EINVAL;
815 if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
816 intr_mask.rdav = regk_sser_yes;
817 break;
818 case SSP_FRAME_SYNC:
819 if (arg & NORMAL_SYNC) {
820 frm_cfg.rec_delay = 1;
821 frm_cfg.tr_delay = 1;
823 else if (arg & EARLY_SYNC)
824 frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
825 else if (arg & SECOND_WORD_SYNC) {
826 frm_cfg.rec_delay = 7;
827 frm_cfg.tr_delay = 1;
830 tr_cfg.bulk_wspace = frm_cfg.tr_delay;
831 frm_cfg.early_wend = regk_sser_yes;
832 if (arg & BIT_SYNC)
833 frm_cfg.type = regk_sser_edge;
834 else if (arg & WORD_SYNC)
835 frm_cfg.type = regk_sser_level;
836 else if (arg & EXTENDED_SYNC)
837 frm_cfg.early_wend = regk_sser_no;
839 if (arg & SYNC_ON)
840 frm_cfg.frame_pin_use = regk_sser_frm;
841 else if (arg & SYNC_OFF)
842 frm_cfg.frame_pin_use = regk_sser_gio0;
844 dma_w_size = regk_dma_set_w_size2;
845 if (arg & WORD_SIZE_8) {
846 rec_cfg.sample_size = tr_cfg.sample_size = 7;
847 dma_w_size = regk_dma_set_w_size1;
848 } else if (arg & WORD_SIZE_12)
849 rec_cfg.sample_size = tr_cfg.sample_size = 11;
850 else if (arg & WORD_SIZE_16)
851 rec_cfg.sample_size = tr_cfg.sample_size = 15;
852 else if (arg & WORD_SIZE_24)
853 rec_cfg.sample_size = tr_cfg.sample_size = 23;
854 else if (arg & WORD_SIZE_32)
855 rec_cfg.sample_size = tr_cfg.sample_size = 31;
857 if (arg & BIT_ORDER_MSB)
858 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
859 else if (arg & BIT_ORDER_LSB)
860 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
862 if (arg & FLOW_CONTROL_ENABLE) {
863 frm_cfg.status_pin_use = regk_sser_frm;
864 rec_cfg.fifo_thr = regk_sser_thr16;
865 } else if (arg & FLOW_CONTROL_DISABLE) {
866 frm_cfg.status_pin_use = regk_sser_gio0;
867 rec_cfg.fifo_thr = regk_sser_inf;
870 if (arg & CLOCK_NOT_GATED)
871 gen_cfg.gate_clk = regk_sser_no;
872 else if (arg & CLOCK_GATED)
873 gen_cfg.gate_clk = regk_sser_yes;
875 break;
876 case SSP_IPOLARITY:
877 /* NOTE!! negedge is considered NORMAL */
878 if (arg & CLOCK_NORMAL)
879 rec_cfg.clk_pol = regk_sser_neg;
880 else if (arg & CLOCK_INVERT)
881 rec_cfg.clk_pol = regk_sser_pos;
883 if (arg & FRAME_NORMAL)
884 frm_cfg.level = regk_sser_pos_hi;
885 else if (arg & FRAME_INVERT)
886 frm_cfg.level = regk_sser_neg_lo;
888 if (arg & STATUS_NORMAL)
889 gen_cfg.hold_pol = regk_sser_pos;
890 else if (arg & STATUS_INVERT)
891 gen_cfg.hold_pol = regk_sser_neg;
892 break;
893 case SSP_OPOLARITY:
894 if (arg & CLOCK_NORMAL)
895 gen_cfg.out_clk_pol = regk_sser_pos;
896 else if (arg & CLOCK_INVERT)
897 gen_cfg.out_clk_pol = regk_sser_neg;
899 if (arg & FRAME_NORMAL)
900 frm_cfg.level = regk_sser_pos_hi;
901 else if (arg & FRAME_INVERT)
902 frm_cfg.level = regk_sser_neg_lo;
904 if (arg & STATUS_NORMAL)
905 gen_cfg.hold_pol = regk_sser_pos;
906 else if (arg & STATUS_INVERT)
907 gen_cfg.hold_pol = regk_sser_neg;
908 break;
909 case SSP_SPI:
910 rec_cfg.fifo_thr = regk_sser_inf;
911 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
912 rec_cfg.sample_size = tr_cfg.sample_size = 7;
913 frm_cfg.frame_pin_use = regk_sser_frm;
914 frm_cfg.type = regk_sser_level;
915 frm_cfg.tr_delay = 1;
916 frm_cfg.level = regk_sser_neg_lo;
917 if (arg & SPI_SLAVE)
919 rec_cfg.clk_pol = regk_sser_neg;
920 gen_cfg.clk_dir = regk_sser_in;
921 port->input = 1;
922 port->output = 0;
924 else
926 gen_cfg.out_clk_pol = regk_sser_pos;
927 port->input = 0;
928 port->output = 1;
929 gen_cfg.clk_dir = regk_sser_out;
931 break;
932 case SSP_INBUFCHUNK:
933 break;
934 default:
935 return_val = -1;
939 if (port->started) {
940 rec_cfg.rec_en = port->input;
941 gen_cfg.en = (port->output | port->input);
944 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
945 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
946 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
947 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
948 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
951 if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 |
952 WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) {
953 int en = gen_cfg.en;
954 gen_cfg.en = 0;
955 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
956 /* ##### Should DMA be stoped before we change dma size? */
957 DMA_WR_CMD(port->regi_dmain, dma_w_size);
958 DMA_WR_CMD(port->regi_dmaout, dma_w_size);
959 gen_cfg.en = en;
960 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
963 spin_unlock_irq(&port->lock);
964 return return_val;
967 static long sync_serial_ioctl(struct file *file,
968 unsigned int cmd, unsigned long arg)
970 long ret;
972 mutex_lock(&sync_serial_mutex);
973 ret = sync_serial_ioctl_unlocked(file, cmd, arg);
974 mutex_unlock(&sync_serial_mutex);
976 return ret;
979 /* NOTE: sync_serial_write does not support concurrency */
980 static ssize_t sync_serial_write(struct file *file, const char *buf,
981 size_t count, loff_t *ppos)
983 int dev = iminor(file_inode(file));
984 DECLARE_WAITQUEUE(wait, current);
985 struct sync_port *port;
986 int trunc_count;
987 unsigned long flags;
988 int bytes_free;
989 int out_buf_count;
991 unsigned char *rd_ptr; /* First allocated byte in the buffer */
992 unsigned char *wr_ptr; /* First free byte in the buffer */
993 unsigned char *buf_stop_ptr; /* Last byte + 1 */
995 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
996 DEBUG(printk("Invalid minor %d\n", dev));
997 return -ENODEV;
999 port = &ports[dev];
1001 /* |<- OUT_BUFFER_SIZE ->|
1002 * |<- out_buf_count ->|
1003 * |<- trunc_count ->| ...->|
1004 * ______________________________________________________
1005 * | free | data | free |
1006 * |_________|___________________|________________________|
1007 * ^ rd_ptr ^ wr_ptr
1009 DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n",
1010 port->port_nbr, count, port->active_tr_descr,
1011 port->catch_tr_descr));
1013 /* Read variables that may be updated by interrupts */
1014 spin_lock_irqsave(&port->lock, flags);
1015 rd_ptr = port->out_rd_ptr;
1016 out_buf_count = port->out_buf_count;
1017 spin_unlock_irqrestore(&port->lock, flags);
1019 /* Check if resources are available */
1020 if (port->tr_running &&
1021 ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
1022 out_buf_count >= OUT_BUFFER_SIZE)) {
1023 DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev));
1024 return -EAGAIN;
1027 buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE;
1029 /* Determine pointer to the first free byte, before copying. */
1030 wr_ptr = rd_ptr + out_buf_count;
1031 if (wr_ptr >= buf_stop_ptr)
1032 wr_ptr -= OUT_BUFFER_SIZE;
1034 /* If we wrap the ring buffer, let the user space program handle it by
1035 * truncating the data. This could be more elegant, small buffer
1036 * fragments may occur.
1038 bytes_free = OUT_BUFFER_SIZE - out_buf_count;
1039 if (wr_ptr + bytes_free > buf_stop_ptr)
1040 bytes_free = buf_stop_ptr - wr_ptr;
1041 trunc_count = (count < bytes_free) ? count : bytes_free;
1043 if (copy_from_user(wr_ptr, buf, trunc_count))
1044 return -EFAULT;
1046 DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n",
1047 out_buf_count, trunc_count,
1048 port->out_buf_count, port->out_buffer,
1049 wr_ptr, buf_stop_ptr));
1051 /* Make sure transmitter/receiver is running */
1052 if (!port->started) {
1053 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1054 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1055 cfg.en = regk_sser_yes;
1056 rec_cfg.rec_en = port->input;
1057 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1058 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1059 port->started = 1;
1062 /* Setup wait if blocking */
1063 if (!(file->f_flags & O_NONBLOCK)) {
1064 add_wait_queue(&port->out_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1068 spin_lock_irqsave(&port->lock, flags);
1069 port->out_buf_count += trunc_count;
1070 if (port->use_dma) {
1071 start_dma_out(port, wr_ptr, trunc_count);
1072 } else if (!port->tr_running) {
1073 reg_sser_rw_intr_mask intr_mask;
1074 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1075 /* Start sender by writing data */
1076 send_word(port);
1077 /* and enable transmitter ready IRQ */
1078 intr_mask.trdy = 1;
1079 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1081 spin_unlock_irqrestore(&port->lock, flags);
1083 /* Exit if non blocking */
1084 if (file->f_flags & O_NONBLOCK) {
1085 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n",
1086 port->port_nbr, trunc_count,
1087 REG_RD_INT(dma, port->regi_dmaout, r_intr)));
1088 return trunc_count;
1091 schedule();
1092 set_current_state(TASK_RUNNING);
1093 remove_wait_queue(&port->out_wait_q, &wait);
1095 if (signal_pending(current))
1096 return -EINTR;
1098 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n",
1099 port->port_nbr, trunc_count));
1100 return trunc_count;
1103 static ssize_t sync_serial_read(struct file * file, char * buf,
1104 size_t count, loff_t *ppos)
1106 int dev = iminor(file_inode(file));
1107 int avail;
1108 sync_port *port;
1109 unsigned char* start;
1110 unsigned char* end;
1111 unsigned long flags;
1113 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
1115 DEBUG(printk("Invalid minor %d\n", dev));
1116 return -ENODEV;
1118 port = &ports[dev];
1120 DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
1122 if (!port->started)
1124 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1125 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1126 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1127 cfg.en = regk_sser_yes;
1128 tr_cfg.tr_en = regk_sser_yes;
1129 rec_cfg.rec_en = regk_sser_yes;
1130 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1131 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1132 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1133 port->started = 1;
1136 /* Calculate number of available bytes */
1137 /* Save pointers to avoid that they are modified by interrupt */
1138 spin_lock_irqsave(&port->lock, flags);
1139 start = (unsigned char*)port->readp; /* cast away volatile */
1140 end = (unsigned char*)port->writep; /* cast away volatile */
1141 spin_unlock_irqrestore(&port->lock, flags);
1142 while ((start == end) && !port->full) /* No data */
1144 DEBUGREAD(printk(KERN_DEBUG "&"));
1145 if (file->f_flags & O_NONBLOCK)
1146 return -EAGAIN;
1148 wait_event_interruptible(port->in_wait_q,
1149 !(start == end && !port->full));
1150 if (signal_pending(current))
1151 return -EINTR;
1153 spin_lock_irqsave(&port->lock, flags);
1154 start = (unsigned char*)port->readp; /* cast away volatile */
1155 end = (unsigned char*)port->writep; /* cast away volatile */
1156 spin_unlock_irqrestore(&port->lock, flags);
1159 /* Lazy read, never return wrapped data. */
1160 if (port->full)
1161 avail = port->in_buffer_size;
1162 else if (end > start)
1163 avail = end - start;
1164 else
1165 avail = port->flip + port->in_buffer_size - start;
1167 count = count > avail ? avail : count;
1168 if (copy_to_user(buf, start, count))
1169 return -EFAULT;
1170 /* Disable interrupts while updating readp */
1171 spin_lock_irqsave(&port->lock, flags);
1172 port->readp += count;
1173 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1174 port->readp = port->flip;
1175 port->full = 0;
1176 spin_unlock_irqrestore(&port->lock, flags);
1177 DEBUGREAD(printk("r %d\n", count));
1178 return count;
1181 static void send_word(sync_port* port)
1183 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1184 reg_sser_rw_tr_data tr_data = {0};
1186 switch(tr_cfg.sample_size)
1188 case 8:
1189 port->out_buf_count--;
1190 tr_data.data = *port->out_rd_ptr++;
1191 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1192 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1193 port->out_rd_ptr = port->out_buffer;
1194 break;
1195 case 12:
1197 int data = (*port->out_rd_ptr++) << 8;
1198 data |= *port->out_rd_ptr++;
1199 port->out_buf_count -= 2;
1200 tr_data.data = data;
1201 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1202 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1203 port->out_rd_ptr = port->out_buffer;
1205 break;
1206 case 16:
1207 port->out_buf_count -= 2;
1208 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1209 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1210 port->out_rd_ptr += 2;
1211 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1212 port->out_rd_ptr = port->out_buffer;
1213 break;
1214 case 24:
1215 port->out_buf_count -= 3;
1216 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1217 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1218 port->out_rd_ptr += 2;
1219 tr_data.data = *port->out_rd_ptr++;
1220 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1221 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1222 port->out_rd_ptr = port->out_buffer;
1223 break;
1224 case 32:
1225 port->out_buf_count -= 4;
1226 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1227 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1228 port->out_rd_ptr += 2;
1229 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1230 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1231 port->out_rd_ptr += 2;
1232 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1233 port->out_rd_ptr = port->out_buffer;
1234 break;
1238 static void start_dma_out(struct sync_port *port,
1239 const char *data, int count)
1241 port->active_tr_descr->buf = (char *) virt_to_phys((char *) data);
1242 port->active_tr_descr->after = port->active_tr_descr->buf + count;
1243 port->active_tr_descr->intr = 1;
1245 port->active_tr_descr->eol = 1;
1246 port->prev_tr_descr->eol = 0;
1248 DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n",
1249 port->prev_tr_descr, port->active_tr_descr));
1250 port->prev_tr_descr = port->active_tr_descr;
1251 port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next);
1253 if (!port->tr_running) {
1254 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
1255 rw_tr_cfg);
1257 port->out_context.next = 0;
1258 port->out_context.saved_data =
1259 (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
1260 port->out_context.saved_data_buf = port->prev_tr_descr->buf;
1262 DMA_START_CONTEXT(port->regi_dmaout,
1263 virt_to_phys((char *)&port->out_context));
1265 tr_cfg.tr_en = regk_sser_yes;
1266 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1267 DEBUGTRDMA(printk(KERN_DEBUG "dma s\n"););
1268 } else {
1269 DMA_CONTINUE_DATA(port->regi_dmaout);
1270 DEBUGTRDMA(printk(KERN_DEBUG "dma c\n"););
1273 port->tr_running = 1;
1276 static void start_dma_in(sync_port *port)
1278 int i;
1279 char *buf;
1280 port->writep = port->flip;
1282 if (port->writep > port->flip + port->in_buffer_size) {
1283 panic("Offset too large in sync serial driver\n");
1284 return;
1286 buf = (char*)virt_to_phys(port->in_buffer);
1287 for (i = 0; i < NBR_IN_DESCR; i++) {
1288 port->in_descr[i].buf = buf;
1289 port->in_descr[i].after = buf + port->inbufchunk;
1290 port->in_descr[i].intr = 1;
1291 port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
1292 port->in_descr[i].buf = buf;
1293 buf += port->inbufchunk;
1295 /* Link the last descriptor to the first */
1296 port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1297 port->in_descr[i-1].eol = regk_sser_yes;
1298 port->next_rx_desc = &port->in_descr[0];
1299 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
1300 port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1301 port->in_context.saved_data_buf = port->in_descr[0].buf;
1302 DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
1305 #ifdef SYNC_SER_DMA
1306 static irqreturn_t tr_interrupt(int irq, void *dev_id)
1308 reg_dma_r_masked_intr masked;
1309 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1310 reg_dma_rw_stat stat;
1311 int i;
1312 int found = 0;
1313 int stop_sser = 0;
1315 for (i = 0; i < NBR_PORTS; i++) {
1316 sync_port *port = &ports[i];
1317 if (!port->enabled || !port->use_dma)
1318 continue;
1320 /* IRQ active for the port? */
1321 masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
1322 if (!masked.data)
1323 continue;
1325 found = 1;
1327 /* Check if we should stop the DMA transfer */
1328 stat = REG_RD(dma, port->regi_dmaout, rw_stat);
1329 if (stat.list_state == regk_dma_data_at_eol)
1330 stop_sser = 1;
1332 /* Clear IRQ */
1333 REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
1335 if (!stop_sser) {
1336 /* The DMA has completed a descriptor, EOL was not
1337 * encountered, so step relevant descriptor and
1338 * datapointers forward. */
1339 int sent;
1340 sent = port->catch_tr_descr->after -
1341 port->catch_tr_descr->buf;
1342 DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t"
1343 "in descr %p (ac: %p)\n",
1344 port->out_buf_count, sent,
1345 port->out_buf_count - sent,
1346 port->catch_tr_descr,
1347 port->active_tr_descr););
1348 port->out_buf_count -= sent;
1349 port->catch_tr_descr =
1350 phys_to_virt((int) port->catch_tr_descr->next);
1351 port->out_rd_ptr =
1352 phys_to_virt((int) port->catch_tr_descr->buf);
1353 } else {
1354 int i, sent;
1355 /* EOL handler.
1356 * Note that if an EOL was encountered during the irq
1357 * locked section of sync_ser_write the DMA will be
1358 * restarted and the eol flag will be cleared.
1359 * The remaining descriptors will be traversed by
1360 * the descriptor interrupts as usual.
1362 i = 0;
1363 while (!port->catch_tr_descr->eol) {
1364 sent = port->catch_tr_descr->after -
1365 port->catch_tr_descr->buf;
1366 DEBUGOUTBUF(printk(KERN_DEBUG
1367 "traversing descr %p -%d (%d)\n",
1368 port->catch_tr_descr,
1369 sent,
1370 port->out_buf_count));
1371 port->out_buf_count -= sent;
1372 port->catch_tr_descr = phys_to_virt(
1373 (int)port->catch_tr_descr->next);
1374 i++;
1375 if (i >= NBR_OUT_DESCR) {
1376 /* TODO: Reset and recover */
1377 panic("sync_serial: missing eol");
1380 sent = port->catch_tr_descr->after -
1381 port->catch_tr_descr->buf;
1382 DEBUGOUTBUF(printk(KERN_DEBUG
1383 "eol at descr %p -%d (%d)\n",
1384 port->catch_tr_descr,
1385 sent,
1386 port->out_buf_count));
1388 port->out_buf_count -= sent;
1390 /* Update read pointer to first free byte, we
1391 * may already be writing data there. */
1392 port->out_rd_ptr =
1393 phys_to_virt((int) port->catch_tr_descr->after);
1394 if (port->out_rd_ptr > port->out_buffer +
1395 OUT_BUFFER_SIZE)
1396 port->out_rd_ptr = port->out_buffer;
1398 reg_sser_rw_tr_cfg tr_cfg =
1399 REG_RD(sser, port->regi_sser, rw_tr_cfg);
1400 DEBUGTXINT(printk(KERN_DEBUG
1401 "tr_int DMA stop %d, set catch @ %p\n",
1402 port->out_buf_count,
1403 port->active_tr_descr));
1404 if (port->out_buf_count != 0)
1405 printk(KERN_CRIT "sync_ser: buffer not "
1406 "empty after eol.\n");
1407 port->catch_tr_descr = port->active_tr_descr;
1408 port->tr_running = 0;
1409 tr_cfg.tr_en = regk_sser_no;
1410 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1412 /* wake up the waiting process */
1413 wake_up_interruptible(&port->out_wait_q);
1415 return IRQ_RETVAL(found);
1416 } /* tr_interrupt */
1418 static irqreturn_t rx_interrupt(int irq, void *dev_id)
1420 reg_dma_r_masked_intr masked;
1421 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1423 int i;
1424 int found = 0;
1426 for (i = 0; i < NBR_PORTS; i++)
1428 sync_port *port = &ports[i];
1430 if (!port->enabled || !port->use_dma )
1431 continue;
1433 masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
1435 if (masked.data) /* Descriptor interrupt */
1437 found = 1;
1438 while (REG_RD(dma, port->regi_dmain, rw_data) !=
1439 virt_to_phys(port->next_rx_desc)) {
1440 DEBUGRXINT(printk(KERN_DEBUG "!"));
1441 if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
1442 int first_size = port->flip + port->in_buffer_size - port->writep;
1443 memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
1444 memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
1445 port->writep = port->flip + port->inbufchunk - first_size;
1446 } else {
1447 memcpy((char*)port->writep,
1448 phys_to_virt((unsigned)port->next_rx_desc->buf),
1449 port->inbufchunk);
1450 port->writep += port->inbufchunk;
1451 if (port->writep >= port->flip + port->in_buffer_size)
1452 port->writep = port->flip;
1454 if (port->writep == port->readp)
1456 port->full = 1;
1459 port->next_rx_desc->eol = 1;
1460 port->prev_rx_desc->eol = 0;
1461 /* Cache bug workaround */
1462 flush_dma_descr(port->prev_rx_desc, 0);
1463 port->prev_rx_desc = port->next_rx_desc;
1464 port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
1465 /* Cache bug workaround */
1466 flush_dma_descr(port->prev_rx_desc, 1);
1467 /* wake up the waiting process */
1468 wake_up_interruptible(&port->in_wait_q);
1469 DMA_CONTINUE(port->regi_dmain);
1470 REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
1475 return IRQ_RETVAL(found);
1476 } /* rx_interrupt */
1477 #endif /* SYNC_SER_DMA */
1479 #ifdef SYNC_SER_MANUAL
1480 static irqreturn_t manual_interrupt(int irq, void *dev_id)
1482 int i;
1483 int found = 0;
1484 reg_sser_r_masked_intr masked;
1486 for (i = 0; i < NBR_PORTS; i++)
1488 sync_port *port = &ports[i];
1490 if (!port->enabled || port->use_dma)
1492 continue;
1495 masked = REG_RD(sser, port->regi_sser, r_masked_intr);
1496 if (masked.rdav) /* Data received? */
1498 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1499 reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
1500 found = 1;
1501 /* Read data */
1502 switch(rec_cfg.sample_size)
1504 case 8:
1505 *port->writep++ = data.data & 0xff;
1506 break;
1507 case 12:
1508 *port->writep = (data.data & 0x0ff0) >> 4;
1509 *(port->writep + 1) = data.data & 0x0f;
1510 port->writep+=2;
1511 break;
1512 case 16:
1513 *(unsigned short*)port->writep = data.data;
1514 port->writep+=2;
1515 break;
1516 case 24:
1517 *(unsigned int*)port->writep = data.data;
1518 port->writep+=3;
1519 break;
1520 case 32:
1521 *(unsigned int*)port->writep = data.data;
1522 port->writep+=4;
1523 break;
1526 if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
1527 port->writep = port->flip;
1528 if (port->writep == port->readp) {
1529 /* receive buffer overrun, discard oldest data
1531 port->readp++;
1532 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1533 port->readp = port->flip;
1535 if (sync_data_avail(port) >= port->inbufchunk)
1536 wake_up_interruptible(&port->in_wait_q); /* Wake up application */
1539 if (masked.trdy) /* Transmitter ready? */
1541 found = 1;
1542 if (port->out_buf_count > 0) /* More data to send */
1543 send_word(port);
1544 else /* transmission finished */
1546 reg_sser_rw_intr_mask intr_mask;
1547 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1548 intr_mask.trdy = 0;
1549 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1550 wake_up_interruptible(&port->out_wait_q); /* Wake up application */
1554 return IRQ_RETVAL(found);
1556 #endif
1558 module_init(etrax_sync_serial_init);