Use dentry_path() to create full path to inode object
[pohmelfs.git] / arch / cris / arch-v32 / drivers / sync_serial.c
bloba6a180bc566ff51d533cbd5ddc85fec2469381a2
1 /*
2 * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
4 * Copyright (c) 2005 Axis Communications AB
6 * Author: Mikael Starvik
8 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/major.h>
15 #include <linux/sched.h>
16 #include <linux/mutex.h>
17 #include <linux/interrupt.h>
18 #include <linux/poll.h>
19 #include <linux/init.h>
20 #include <linux/timer.h>
21 #include <linux/spinlock.h>
23 #include <asm/io.h>
24 #include <dma.h>
25 #include <pinmux.h>
26 #include <hwregs/reg_rdwr.h>
27 #include <hwregs/sser_defs.h>
28 #include <hwregs/dma_defs.h>
29 #include <hwregs/dma.h>
30 #include <hwregs/intr_vect_defs.h>
31 #include <hwregs/intr_vect.h>
32 #include <hwregs/reg_map.h>
33 #include <asm/sync_serial.h>
36 /* The receiver is a bit tricky because of the continuous stream of data.*/
37 /* */
38 /* Three DMA descriptors are linked together. Each DMA descriptor is */
39 /* responsible for port->bufchunk of a common buffer. */
40 /* */
41 /* +---------------------------------------------+ */
42 /* | +----------+ +----------+ +----------+ | */
43 /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
44 /* +----------+ +----------+ +----------+ */
45 /* | | | */
46 /* v v v */
47 /* +-------------------------------------+ */
48 /* | BUFFER | */
49 /* +-------------------------------------+ */
50 /* |<- data_avail ->| */
51 /* readp writep */
52 /* */
53 /* If the application keeps up the pace readp will be right after writep.*/
54 /* If the application can't keep the pace we have to throw away data. */
55 /* The idea is that readp should be ready with the data pointed out by */
56 /* Descr[i] when the DMA has filled in Descr[i+1]. */
57 /* Otherwise we will discard */
58 /* the rest of the data pointed out by Descr1 and set readp to the start */
59 /* of Descr2 */
61 #define SYNC_SERIAL_MAJOR 125
63 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
64 /* words can be handled */
65 #define IN_BUFFER_SIZE 12288
66 #define IN_DESCR_SIZE 256
67 #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
69 #define OUT_BUFFER_SIZE 1024*8
70 #define NBR_OUT_DESCR 8
72 #define DEFAULT_FRAME_RATE 0
73 #define DEFAULT_WORD_RATE 7
75 /* NOTE: Enabling some debug will likely cause overrun or underrun,
76 * especially if manual mode is use.
78 #define DEBUG(x)
79 #define DEBUGREAD(x)
80 #define DEBUGWRITE(x)
81 #define DEBUGPOLL(x)
82 #define DEBUGRXINT(x)
83 #define DEBUGTXINT(x)
84 #define DEBUGTRDMA(x)
85 #define DEBUGOUTBUF(x)
87 typedef struct sync_port
89 reg_scope_instances regi_sser;
90 reg_scope_instances regi_dmain;
91 reg_scope_instances regi_dmaout;
93 char started; /* 1 if port has been started */
94 char port_nbr; /* Port 0 or 1 */
95 char busy; /* 1 if port is busy */
97 char enabled; /* 1 if port is enabled */
98 char use_dma; /* 1 if port uses dma */
99 char tr_running;
101 char init_irqs;
102 int output;
103 int input;
105 /* Next byte to be read by application */
106 volatile unsigned char *volatile readp;
107 /* Next byte to be written by etrax */
108 volatile unsigned char *volatile writep;
110 unsigned int in_buffer_size;
111 unsigned int inbufchunk;
112 unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
113 unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
114 unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
115 struct dma_descr_data* next_rx_desc;
116 struct dma_descr_data* prev_rx_desc;
118 /* Pointer to the first available descriptor in the ring,
119 * unless active_tr_descr == catch_tr_descr and a dma
120 * transfer is active */
121 struct dma_descr_data *active_tr_descr;
123 /* Pointer to the first allocated descriptor in the ring */
124 struct dma_descr_data *catch_tr_descr;
126 /* Pointer to the descriptor with the current end-of-list */
127 struct dma_descr_data *prev_tr_descr;
128 int full;
130 /* Pointer to the first byte being read by DMA
131 * or current position in out_buffer if not using DMA. */
132 unsigned char *out_rd_ptr;
134 /* Number of bytes currently locked for being read by DMA */
135 int out_buf_count;
137 dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16)));
138 dma_descr_context in_context __attribute__ ((__aligned__(32)));
139 dma_descr_data out_descr[NBR_OUT_DESCR]
140 __attribute__ ((__aligned__(16)));
141 dma_descr_context out_context __attribute__ ((__aligned__(32)));
142 wait_queue_head_t out_wait_q;
143 wait_queue_head_t in_wait_q;
145 spinlock_t lock;
146 } sync_port;
148 static DEFINE_MUTEX(sync_serial_mutex);
149 static int etrax_sync_serial_init(void);
150 static void initialize_port(int portnbr);
151 static inline int sync_data_avail(struct sync_port *port);
153 static int sync_serial_open(struct inode *, struct file*);
154 static int sync_serial_release(struct inode*, struct file*);
155 static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
157 static int sync_serial_ioctl(struct file *,
158 unsigned int cmd, unsigned long arg);
159 static ssize_t sync_serial_write(struct file * file, const char * buf,
160 size_t count, loff_t *ppos);
161 static ssize_t sync_serial_read(struct file *file, char *buf,
162 size_t count, loff_t *ppos);
164 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
165 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
166 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
167 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
168 #define SYNC_SER_DMA
169 #endif
171 static void send_word(sync_port* port);
172 static void start_dma_out(struct sync_port *port, const char *data, int count);
173 static void start_dma_in(sync_port* port);
174 #ifdef SYNC_SER_DMA
175 static irqreturn_t tr_interrupt(int irq, void *dev_id);
176 static irqreturn_t rx_interrupt(int irq, void *dev_id);
177 #endif
179 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
180 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
181 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
182 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
183 #define SYNC_SER_MANUAL
184 #endif
185 #ifdef SYNC_SER_MANUAL
186 static irqreturn_t manual_interrupt(int irq, void *dev_id);
187 #endif
189 #ifdef CONFIG_ETRAXFS /* ETRAX FS */
190 #define OUT_DMA_NBR 4
191 #define IN_DMA_NBR 5
192 #define PINMUX_SSER pinmux_sser0
193 #define SYNCSER_INST regi_sser0
194 #define SYNCSER_INTR_VECT SSER0_INTR_VECT
195 #define OUT_DMA_INST regi_dma4
196 #define IN_DMA_INST regi_dma5
197 #define DMA_OUT_INTR_VECT DMA4_INTR_VECT
198 #define DMA_IN_INTR_VECT DMA5_INTR_VECT
199 #define REQ_DMA_SYNCSER dma_sser0
200 #else /* Artpec-3 */
201 #define OUT_DMA_NBR 6
202 #define IN_DMA_NBR 7
203 #define PINMUX_SSER pinmux_sser
204 #define SYNCSER_INST regi_sser
205 #define SYNCSER_INTR_VECT SSER_INTR_VECT
206 #define OUT_DMA_INST regi_dma6
207 #define IN_DMA_INST regi_dma7
208 #define DMA_OUT_INTR_VECT DMA6_INTR_VECT
209 #define DMA_IN_INTR_VECT DMA7_INTR_VECT
210 #define REQ_DMA_SYNCSER dma_sser
211 #endif
213 /* The ports */
214 static struct sync_port ports[]=
217 .regi_sser = SYNCSER_INST,
218 .regi_dmaout = OUT_DMA_INST,
219 .regi_dmain = IN_DMA_INST,
220 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
221 .use_dma = 1,
222 #else
223 .use_dma = 0,
224 #endif
226 #ifdef CONFIG_ETRAXFS
230 .regi_sser = regi_sser1,
231 .regi_dmaout = regi_dma6,
232 .regi_dmain = regi_dma7,
233 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
234 .use_dma = 1,
235 #else
236 .use_dma = 0,
237 #endif
239 #endif
242 #define NBR_PORTS ARRAY_SIZE(ports)
244 static const struct file_operations sync_serial_fops = {
245 .owner = THIS_MODULE,
246 .write = sync_serial_write,
247 .read = sync_serial_read,
248 .poll = sync_serial_poll,
249 .unlocked_ioctl = sync_serial_ioctl,
250 .open = sync_serial_open,
251 .release = sync_serial_release,
252 .llseek = noop_llseek,
255 static int __init etrax_sync_serial_init(void)
257 ports[0].enabled = 0;
258 #ifdef CONFIG_ETRAXFS
259 ports[1].enabled = 0;
260 #endif
261 if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
262 &sync_serial_fops) < 0) {
263 printk(KERN_WARNING
264 "Unable to get major for synchronous serial port\n");
265 return -EBUSY;
268 /* Initialize Ports */
269 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
270 if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
271 printk(KERN_WARNING
272 "Unable to alloc pins for synchronous serial port 0\n");
273 return -EIO;
275 ports[0].enabled = 1;
276 initialize_port(0);
277 #endif
279 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
280 if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
281 printk(KERN_WARNING
282 "Unable to alloc pins for synchronous serial port 0\n");
283 return -EIO;
285 ports[1].enabled = 1;
286 initialize_port(1);
287 #endif
289 #ifdef CONFIG_ETRAXFS
290 printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
291 #else
292 printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
293 #endif
294 return 0;
297 static void __init initialize_port(int portnbr)
299 int __attribute__((unused)) i;
300 struct sync_port *port = &ports[portnbr];
301 reg_sser_rw_cfg cfg = {0};
302 reg_sser_rw_frm_cfg frm_cfg = {0};
303 reg_sser_rw_tr_cfg tr_cfg = {0};
304 reg_sser_rw_rec_cfg rec_cfg = {0};
306 DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr));
308 port->port_nbr = portnbr;
309 port->init_irqs = 1;
311 port->out_rd_ptr = port->out_buffer;
312 port->out_buf_count = 0;
314 port->output = 1;
315 port->input = 0;
317 port->readp = port->flip;
318 port->writep = port->flip;
319 port->in_buffer_size = IN_BUFFER_SIZE;
320 port->inbufchunk = IN_DESCR_SIZE;
321 port->next_rx_desc = &port->in_descr[0];
322 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
323 port->prev_rx_desc->eol = 1;
325 init_waitqueue_head(&port->out_wait_q);
326 init_waitqueue_head(&port->in_wait_q);
328 spin_lock_init(&port->lock);
330 cfg.out_clk_src = regk_sser_intern_clk;
331 cfg.out_clk_pol = regk_sser_pos;
332 cfg.clk_od_mode = regk_sser_no;
333 cfg.clk_dir = regk_sser_out;
334 cfg.gate_clk = regk_sser_no;
335 cfg.base_freq = regk_sser_f29_493;
336 cfg.clk_div = 256;
337 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
339 frm_cfg.wordrate = DEFAULT_WORD_RATE;
340 frm_cfg.type = regk_sser_edge;
341 frm_cfg.frame_pin_dir = regk_sser_out;
342 frm_cfg.frame_pin_use = regk_sser_frm;
343 frm_cfg.status_pin_dir = regk_sser_in;
344 frm_cfg.status_pin_use = regk_sser_hold;
345 frm_cfg.out_on = regk_sser_tr;
346 frm_cfg.tr_delay = 1;
347 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
349 tr_cfg.urun_stop = regk_sser_no;
350 tr_cfg.sample_size = 7;
351 tr_cfg.sh_dir = regk_sser_msbfirst;
352 tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
353 #if 0
354 tr_cfg.rate_ctrl = regk_sser_bulk;
355 tr_cfg.data_pin_use = regk_sser_dout;
356 #else
357 tr_cfg.rate_ctrl = regk_sser_iso;
358 tr_cfg.data_pin_use = regk_sser_dout;
359 #endif
360 tr_cfg.bulk_wspace = 1;
361 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
363 rec_cfg.sample_size = 7;
364 rec_cfg.sh_dir = regk_sser_msbfirst;
365 rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
366 rec_cfg.fifo_thr = regk_sser_inf;
367 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
369 #ifdef SYNC_SER_DMA
370 /* Setup the descriptor ring for dma out/transmit. */
371 for (i = 0; i < NBR_OUT_DESCR; i++) {
372 port->out_descr[i].wait = 0;
373 port->out_descr[i].intr = 1;
374 port->out_descr[i].eol = 0;
375 port->out_descr[i].out_eop = 0;
376 port->out_descr[i].next =
377 (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]);
380 /* Create a ring from the list. */
381 port->out_descr[NBR_OUT_DESCR-1].next =
382 (dma_descr_data *)virt_to_phys(&port->out_descr[0]);
384 /* Setup context for traversing the ring. */
385 port->active_tr_descr = &port->out_descr[0];
386 port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1];
387 port->catch_tr_descr = &port->out_descr[0];
388 #endif
391 static inline int sync_data_avail(struct sync_port *port)
393 int avail;
394 unsigned char *start;
395 unsigned char *end;
397 start = (unsigned char*)port->readp; /* cast away volatile */
398 end = (unsigned char*)port->writep; /* cast away volatile */
399 /* 0123456789 0123456789
400 * ----- - -----
401 * ^rp ^wp ^wp ^rp
404 if (end >= start)
405 avail = end - start;
406 else
407 avail = port->in_buffer_size - (start - end);
408 return avail;
411 static inline int sync_data_avail_to_end(struct sync_port *port)
413 int avail;
414 unsigned char *start;
415 unsigned char *end;
417 start = (unsigned char*)port->readp; /* cast away volatile */
418 end = (unsigned char*)port->writep; /* cast away volatile */
419 /* 0123456789 0123456789
420 * ----- -----
421 * ^rp ^wp ^wp ^rp
424 if (end >= start)
425 avail = end - start;
426 else
427 avail = port->flip + port->in_buffer_size - start;
428 return avail;
431 static int sync_serial_open(struct inode *inode, struct file *file)
433 int dev = iminor(inode);
434 int ret = -EBUSY;
435 sync_port *port;
436 reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
437 reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
439 mutex_lock(&sync_serial_mutex);
440 DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
442 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
444 DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
445 ret = -ENODEV;
446 goto out;
448 port = &ports[dev];
449 /* Allow open this device twice (assuming one reader and one writer) */
450 if (port->busy == 2)
452 DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
453 goto out;
457 if (port->init_irqs) {
458 if (port->use_dma) {
459 if (port == &ports[0]) {
460 #ifdef SYNC_SER_DMA
461 if (request_irq(DMA_OUT_INTR_VECT,
462 tr_interrupt,
464 "synchronous serial 0 dma tr",
465 &ports[0])) {
466 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
467 goto out;
468 } else if (request_irq(DMA_IN_INTR_VECT,
469 rx_interrupt,
471 "synchronous serial 1 dma rx",
472 &ports[0])) {
473 free_irq(DMA_OUT_INTR_VECT, &port[0]);
474 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
475 goto out;
476 } else if (crisv32_request_dma(OUT_DMA_NBR,
477 "synchronous serial 0 dma tr",
478 DMA_VERBOSE_ON_ERROR,
480 REQ_DMA_SYNCSER)) {
481 free_irq(DMA_OUT_INTR_VECT, &port[0]);
482 free_irq(DMA_IN_INTR_VECT, &port[0]);
483 printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
484 goto out;
485 } else if (crisv32_request_dma(IN_DMA_NBR,
486 "synchronous serial 0 dma rec",
487 DMA_VERBOSE_ON_ERROR,
489 REQ_DMA_SYNCSER)) {
490 crisv32_free_dma(OUT_DMA_NBR);
491 free_irq(DMA_OUT_INTR_VECT, &port[0]);
492 free_irq(DMA_IN_INTR_VECT, &port[0]);
493 printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
494 goto out;
496 #endif
498 #ifdef CONFIG_ETRAXFS
499 else if (port == &ports[1]) {
500 #ifdef SYNC_SER_DMA
501 if (request_irq(DMA6_INTR_VECT,
502 tr_interrupt,
504 "synchronous serial 1 dma tr",
505 &ports[1])) {
506 printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
507 goto out;
508 } else if (request_irq(DMA7_INTR_VECT,
509 rx_interrupt,
511 "synchronous serial 1 dma rx",
512 &ports[1])) {
513 free_irq(DMA6_INTR_VECT, &ports[1]);
514 printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
515 goto out;
516 } else if (crisv32_request_dma(
517 SYNC_SER1_TX_DMA_NBR,
518 "synchronous serial 1 dma tr",
519 DMA_VERBOSE_ON_ERROR,
521 dma_sser1)) {
522 free_irq(DMA6_INTR_VECT, &ports[1]);
523 free_irq(DMA7_INTR_VECT, &ports[1]);
524 printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
525 goto out;
526 } else if (crisv32_request_dma(
527 SYNC_SER1_RX_DMA_NBR,
528 "synchronous serial 3 dma rec",
529 DMA_VERBOSE_ON_ERROR,
531 dma_sser1)) {
532 crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
533 free_irq(DMA6_INTR_VECT, &ports[1]);
534 free_irq(DMA7_INTR_VECT, &ports[1]);
535 printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
536 goto out;
538 #endif
540 #endif
541 /* Enable DMAs */
542 REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
543 REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
544 /* Enable DMA IRQs */
545 REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
546 REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
547 /* Set up wordsize = 1 for DMAs. */
548 DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
549 DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
551 start_dma_in(port);
552 port->init_irqs = 0;
553 } else { /* !port->use_dma */
554 #ifdef SYNC_SER_MANUAL
555 if (port == &ports[0]) {
556 if (request_irq(SYNCSER_INTR_VECT,
557 manual_interrupt,
559 "synchronous serial manual irq",
560 &ports[0])) {
561 printk("Can't allocate sync serial manual irq");
562 goto out;
565 #ifdef CONFIG_ETRAXFS
566 else if (port == &ports[1]) {
567 if (request_irq(SSER1_INTR_VECT,
568 manual_interrupt,
570 "synchronous serial manual irq",
571 &ports[1])) {
572 printk(KERN_CRIT "Can't allocate sync serial manual irq");
573 goto out;
576 #endif
577 port->init_irqs = 0;
578 #else
579 panic("sync_serial: Manual mode not supported.\n");
580 #endif /* SYNC_SER_MANUAL */
583 } /* port->init_irqs */
585 port->busy++;
586 ret = 0;
587 out:
588 mutex_unlock(&sync_serial_mutex);
589 return ret;
592 static int sync_serial_release(struct inode *inode, struct file *file)
594 int dev = iminor(inode);
595 sync_port *port;
597 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
599 DEBUG(printk("Invalid minor %d\n", dev));
600 return -ENODEV;
602 port = &ports[dev];
603 if (port->busy)
604 port->busy--;
605 if (!port->busy)
606 /* XXX */ ;
607 return 0;
610 static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
612 int dev = iminor(file->f_path.dentry->d_inode);
613 unsigned int mask = 0;
614 sync_port *port;
615 DEBUGPOLL( static unsigned int prev_mask = 0; );
617 port = &ports[dev];
619 if (!port->started) {
620 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
621 reg_sser_rw_rec_cfg rec_cfg =
622 REG_RD(sser, port->regi_sser, rw_rec_cfg);
623 cfg.en = regk_sser_yes;
624 rec_cfg.rec_en = port->input;
625 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
626 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
627 port->started = 1;
630 poll_wait(file, &port->out_wait_q, wait);
631 poll_wait(file, &port->in_wait_q, wait);
633 /* No active transfer, descriptors are available */
634 if (port->output && !port->tr_running)
635 mask |= POLLOUT | POLLWRNORM;
637 /* Descriptor and buffer space available. */
638 if (port->output &&
639 port->active_tr_descr != port->catch_tr_descr &&
640 port->out_buf_count < OUT_BUFFER_SIZE)
641 mask |= POLLOUT | POLLWRNORM;
643 /* At least an inbufchunk of data */
644 if (port->input && sync_data_avail(port) >= port->inbufchunk)
645 mask |= POLLIN | POLLRDNORM;
647 DEBUGPOLL(if (mask != prev_mask)
648 printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
649 mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
650 prev_mask = mask;
652 return mask;
655 static int sync_serial_ioctl(struct file *file,
656 unsigned int cmd, unsigned long arg)
658 int return_val = 0;
659 int dma_w_size = regk_dma_set_w_size1;
660 int dev = iminor(file->f_path.dentry->d_inode);
661 sync_port *port;
662 reg_sser_rw_tr_cfg tr_cfg;
663 reg_sser_rw_rec_cfg rec_cfg;
664 reg_sser_rw_frm_cfg frm_cfg;
665 reg_sser_rw_cfg gen_cfg;
666 reg_sser_rw_intr_mask intr_mask;
668 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
670 DEBUG(printk("Invalid minor %d\n", dev));
671 return -1;
673 port = &ports[dev];
674 spin_lock_irq(&port->lock);
676 tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
677 rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
678 frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
679 gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
680 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
682 switch(cmd)
684 case SSP_SPEED:
685 if (GET_SPEED(arg) == CODEC)
687 unsigned int freq;
689 gen_cfg.base_freq = regk_sser_f32;
691 /* Clock divider will internally be
692 * gen_cfg.clk_div + 1.
695 freq = GET_FREQ(arg);
696 switch (freq) {
697 case FREQ_32kHz:
698 case FREQ_64kHz:
699 case FREQ_128kHz:
700 case FREQ_256kHz:
701 gen_cfg.clk_div = 125 *
702 (1 << (freq - FREQ_256kHz)) - 1;
703 break;
704 case FREQ_512kHz:
705 gen_cfg.clk_div = 62;
706 break;
707 case FREQ_1MHz:
708 case FREQ_2MHz:
709 case FREQ_4MHz:
710 gen_cfg.clk_div = 8 * (1 << freq) - 1;
711 break;
713 } else {
714 gen_cfg.base_freq = regk_sser_f29_493;
715 switch (GET_SPEED(arg)) {
716 case SSP150:
717 gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
718 break;
719 case SSP300:
720 gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
721 break;
722 case SSP600:
723 gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
724 break;
725 case SSP1200:
726 gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
727 break;
728 case SSP2400:
729 gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
730 break;
731 case SSP4800:
732 gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
733 break;
734 case SSP9600:
735 gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
736 break;
737 case SSP19200:
738 gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
739 break;
740 case SSP28800:
741 gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
742 break;
743 case SSP57600:
744 gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
745 break;
746 case SSP115200:
747 gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
748 break;
749 case SSP230400:
750 gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
751 break;
752 case SSP460800:
753 gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
754 break;
755 case SSP921600:
756 gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
757 break;
758 case SSP3125000:
759 gen_cfg.base_freq = regk_sser_f100;
760 gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
761 break;
765 frm_cfg.wordrate = GET_WORD_RATE(arg);
767 break;
768 case SSP_MODE:
769 switch(arg)
771 case MASTER_OUTPUT:
772 port->output = 1;
773 port->input = 0;
774 frm_cfg.out_on = regk_sser_tr;
775 frm_cfg.frame_pin_dir = regk_sser_out;
776 gen_cfg.clk_dir = regk_sser_out;
777 break;
778 case SLAVE_OUTPUT:
779 port->output = 1;
780 port->input = 0;
781 frm_cfg.frame_pin_dir = regk_sser_in;
782 gen_cfg.clk_dir = regk_sser_in;
783 break;
784 case MASTER_INPUT:
785 port->output = 0;
786 port->input = 1;
787 frm_cfg.frame_pin_dir = regk_sser_out;
788 frm_cfg.out_on = regk_sser_intern_tb;
789 gen_cfg.clk_dir = regk_sser_out;
790 break;
791 case SLAVE_INPUT:
792 port->output = 0;
793 port->input = 1;
794 frm_cfg.frame_pin_dir = regk_sser_in;
795 gen_cfg.clk_dir = regk_sser_in;
796 break;
797 case MASTER_BIDIR:
798 port->output = 1;
799 port->input = 1;
800 frm_cfg.frame_pin_dir = regk_sser_out;
801 frm_cfg.out_on = regk_sser_intern_tb;
802 gen_cfg.clk_dir = regk_sser_out;
803 break;
804 case SLAVE_BIDIR:
805 port->output = 1;
806 port->input = 1;
807 frm_cfg.frame_pin_dir = regk_sser_in;
808 gen_cfg.clk_dir = regk_sser_in;
809 break;
810 default:
811 spin_unlock_irq(&port->lock);
812 return -EINVAL;
814 if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
815 intr_mask.rdav = regk_sser_yes;
816 break;
817 case SSP_FRAME_SYNC:
818 if (arg & NORMAL_SYNC) {
819 frm_cfg.rec_delay = 1;
820 frm_cfg.tr_delay = 1;
822 else if (arg & EARLY_SYNC)
823 frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
824 else if (arg & SECOND_WORD_SYNC) {
825 frm_cfg.rec_delay = 7;
826 frm_cfg.tr_delay = 1;
829 tr_cfg.bulk_wspace = frm_cfg.tr_delay;
830 frm_cfg.early_wend = regk_sser_yes;
831 if (arg & BIT_SYNC)
832 frm_cfg.type = regk_sser_edge;
833 else if (arg & WORD_SYNC)
834 frm_cfg.type = regk_sser_level;
835 else if (arg & EXTENDED_SYNC)
836 frm_cfg.early_wend = regk_sser_no;
838 if (arg & SYNC_ON)
839 frm_cfg.frame_pin_use = regk_sser_frm;
840 else if (arg & SYNC_OFF)
841 frm_cfg.frame_pin_use = regk_sser_gio0;
843 dma_w_size = regk_dma_set_w_size2;
844 if (arg & WORD_SIZE_8) {
845 rec_cfg.sample_size = tr_cfg.sample_size = 7;
846 dma_w_size = regk_dma_set_w_size1;
847 } else if (arg & WORD_SIZE_12)
848 rec_cfg.sample_size = tr_cfg.sample_size = 11;
849 else if (arg & WORD_SIZE_16)
850 rec_cfg.sample_size = tr_cfg.sample_size = 15;
851 else if (arg & WORD_SIZE_24)
852 rec_cfg.sample_size = tr_cfg.sample_size = 23;
853 else if (arg & WORD_SIZE_32)
854 rec_cfg.sample_size = tr_cfg.sample_size = 31;
856 if (arg & BIT_ORDER_MSB)
857 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
858 else if (arg & BIT_ORDER_LSB)
859 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
861 if (arg & FLOW_CONTROL_ENABLE) {
862 frm_cfg.status_pin_use = regk_sser_frm;
863 rec_cfg.fifo_thr = regk_sser_thr16;
864 } else if (arg & FLOW_CONTROL_DISABLE) {
865 frm_cfg.status_pin_use = regk_sser_gio0;
866 rec_cfg.fifo_thr = regk_sser_inf;
869 if (arg & CLOCK_NOT_GATED)
870 gen_cfg.gate_clk = regk_sser_no;
871 else if (arg & CLOCK_GATED)
872 gen_cfg.gate_clk = regk_sser_yes;
874 break;
875 case SSP_IPOLARITY:
876 /* NOTE!! negedge is considered NORMAL */
877 if (arg & CLOCK_NORMAL)
878 rec_cfg.clk_pol = regk_sser_neg;
879 else if (arg & CLOCK_INVERT)
880 rec_cfg.clk_pol = regk_sser_pos;
882 if (arg & FRAME_NORMAL)
883 frm_cfg.level = regk_sser_pos_hi;
884 else if (arg & FRAME_INVERT)
885 frm_cfg.level = regk_sser_neg_lo;
887 if (arg & STATUS_NORMAL)
888 gen_cfg.hold_pol = regk_sser_pos;
889 else if (arg & STATUS_INVERT)
890 gen_cfg.hold_pol = regk_sser_neg;
891 break;
892 case SSP_OPOLARITY:
893 if (arg & CLOCK_NORMAL)
894 gen_cfg.out_clk_pol = regk_sser_pos;
895 else if (arg & CLOCK_INVERT)
896 gen_cfg.out_clk_pol = regk_sser_neg;
898 if (arg & FRAME_NORMAL)
899 frm_cfg.level = regk_sser_pos_hi;
900 else if (arg & FRAME_INVERT)
901 frm_cfg.level = regk_sser_neg_lo;
903 if (arg & STATUS_NORMAL)
904 gen_cfg.hold_pol = regk_sser_pos;
905 else if (arg & STATUS_INVERT)
906 gen_cfg.hold_pol = regk_sser_neg;
907 break;
908 case SSP_SPI:
909 rec_cfg.fifo_thr = regk_sser_inf;
910 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
911 rec_cfg.sample_size = tr_cfg.sample_size = 7;
912 frm_cfg.frame_pin_use = regk_sser_frm;
913 frm_cfg.type = regk_sser_level;
914 frm_cfg.tr_delay = 1;
915 frm_cfg.level = regk_sser_neg_lo;
916 if (arg & SPI_SLAVE)
918 rec_cfg.clk_pol = regk_sser_neg;
919 gen_cfg.clk_dir = regk_sser_in;
920 port->input = 1;
921 port->output = 0;
923 else
925 gen_cfg.out_clk_pol = regk_sser_pos;
926 port->input = 0;
927 port->output = 1;
928 gen_cfg.clk_dir = regk_sser_out;
930 break;
931 case SSP_INBUFCHUNK:
932 break;
933 default:
934 return_val = -1;
938 if (port->started) {
939 rec_cfg.rec_en = port->input;
940 gen_cfg.en = (port->output | port->input);
943 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
944 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
945 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
946 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
947 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
950 if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 |
951 WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) {
952 int en = gen_cfg.en;
953 gen_cfg.en = 0;
954 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
955 /* ##### Should DMA be stoped before we change dma size? */
956 DMA_WR_CMD(port->regi_dmain, dma_w_size);
957 DMA_WR_CMD(port->regi_dmaout, dma_w_size);
958 gen_cfg.en = en;
959 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
962 spin_unlock_irq(&port->lock);
963 return return_val;
966 static long sync_serial_ioctl(struct file *file,
967 unsigned int cmd, unsigned long arg)
969 long ret;
971 mutex_lock(&sync_serial_mutex);
972 ret = sync_serial_ioctl_unlocked(file, cmd, arg);
973 mutex_unlock(&sync_serial_mutex);
975 return ret;
978 /* NOTE: sync_serial_write does not support concurrency */
979 static ssize_t sync_serial_write(struct file *file, const char *buf,
980 size_t count, loff_t *ppos)
982 int dev = iminor(file->f_path.dentry->d_inode);
983 DECLARE_WAITQUEUE(wait, current);
984 struct sync_port *port;
985 int trunc_count;
986 unsigned long flags;
987 int bytes_free;
988 int out_buf_count;
990 unsigned char *rd_ptr; /* First allocated byte in the buffer */
991 unsigned char *wr_ptr; /* First free byte in the buffer */
992 unsigned char *buf_stop_ptr; /* Last byte + 1 */
994 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
995 DEBUG(printk("Invalid minor %d\n", dev));
996 return -ENODEV;
998 port = &ports[dev];
1000 /* |<- OUT_BUFFER_SIZE ->|
1001 * |<- out_buf_count ->|
1002 * |<- trunc_count ->| ...->|
1003 * ______________________________________________________
1004 * | free | data | free |
1005 * |_________|___________________|________________________|
1006 * ^ rd_ptr ^ wr_ptr
1008 DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n",
1009 port->port_nbr, count, port->active_tr_descr,
1010 port->catch_tr_descr));
1012 /* Read variables that may be updated by interrupts */
1013 spin_lock_irqsave(&port->lock, flags);
1014 rd_ptr = port->out_rd_ptr;
1015 out_buf_count = port->out_buf_count;
1016 spin_unlock_irqrestore(&port->lock, flags);
1018 /* Check if resources are available */
1019 if (port->tr_running &&
1020 ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
1021 out_buf_count >= OUT_BUFFER_SIZE)) {
1022 DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev));
1023 return -EAGAIN;
1026 buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE;
1028 /* Determine pointer to the first free byte, before copying. */
1029 wr_ptr = rd_ptr + out_buf_count;
1030 if (wr_ptr >= buf_stop_ptr)
1031 wr_ptr -= OUT_BUFFER_SIZE;
1033 /* If we wrap the ring buffer, let the user space program handle it by
1034 * truncating the data. This could be more elegant, small buffer
1035 * fragments may occur.
1037 bytes_free = OUT_BUFFER_SIZE - out_buf_count;
1038 if (wr_ptr + bytes_free > buf_stop_ptr)
1039 bytes_free = buf_stop_ptr - wr_ptr;
1040 trunc_count = (count < bytes_free) ? count : bytes_free;
1042 if (copy_from_user(wr_ptr, buf, trunc_count))
1043 return -EFAULT;
1045 DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n",
1046 out_buf_count, trunc_count,
1047 port->out_buf_count, port->out_buffer,
1048 wr_ptr, buf_stop_ptr));
1050 /* Make sure transmitter/receiver is running */
1051 if (!port->started) {
1052 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1053 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1054 cfg.en = regk_sser_yes;
1055 rec_cfg.rec_en = port->input;
1056 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1057 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1058 port->started = 1;
1061 /* Setup wait if blocking */
1062 if (!(file->f_flags & O_NONBLOCK)) {
1063 add_wait_queue(&port->out_wait_q, &wait);
1064 set_current_state(TASK_INTERRUPTIBLE);
1067 spin_lock_irqsave(&port->lock, flags);
1068 port->out_buf_count += trunc_count;
1069 if (port->use_dma) {
1070 start_dma_out(port, wr_ptr, trunc_count);
1071 } else if (!port->tr_running) {
1072 reg_sser_rw_intr_mask intr_mask;
1073 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1074 /* Start sender by writing data */
1075 send_word(port);
1076 /* and enable transmitter ready IRQ */
1077 intr_mask.trdy = 1;
1078 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1080 spin_unlock_irqrestore(&port->lock, flags);
1082 /* Exit if non blocking */
1083 if (file->f_flags & O_NONBLOCK) {
1084 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n",
1085 port->port_nbr, trunc_count,
1086 REG_RD_INT(dma, port->regi_dmaout, r_intr)));
1087 return trunc_count;
1090 schedule();
1091 set_current_state(TASK_RUNNING);
1092 remove_wait_queue(&port->out_wait_q, &wait);
1094 if (signal_pending(current))
1095 return -EINTR;
1097 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n",
1098 port->port_nbr, trunc_count));
1099 return trunc_count;
1102 static ssize_t sync_serial_read(struct file * file, char * buf,
1103 size_t count, loff_t *ppos)
1105 int dev = iminor(file->f_path.dentry->d_inode);
1106 int avail;
1107 sync_port *port;
1108 unsigned char* start;
1109 unsigned char* end;
1110 unsigned long flags;
1112 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
1114 DEBUG(printk("Invalid minor %d\n", dev));
1115 return -ENODEV;
1117 port = &ports[dev];
1119 DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
1121 if (!port->started)
1123 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1124 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1125 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1126 cfg.en = regk_sser_yes;
1127 tr_cfg.tr_en = regk_sser_yes;
1128 rec_cfg.rec_en = regk_sser_yes;
1129 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1130 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1131 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1132 port->started = 1;
1135 /* Calculate number of available bytes */
1136 /* Save pointers to avoid that they are modified by interrupt */
1137 spin_lock_irqsave(&port->lock, flags);
1138 start = (unsigned char*)port->readp; /* cast away volatile */
1139 end = (unsigned char*)port->writep; /* cast away volatile */
1140 spin_unlock_irqrestore(&port->lock, flags);
1141 while ((start == end) && !port->full) /* No data */
1143 DEBUGREAD(printk(KERN_DEBUG "&"));
1144 if (file->f_flags & O_NONBLOCK)
1145 return -EAGAIN;
1147 interruptible_sleep_on(&port->in_wait_q);
1148 if (signal_pending(current))
1149 return -EINTR;
1151 spin_lock_irqsave(&port->lock, flags);
1152 start = (unsigned char*)port->readp; /* cast away volatile */
1153 end = (unsigned char*)port->writep; /* cast away volatile */
1154 spin_unlock_irqrestore(&port->lock, flags);
1157 /* Lazy read, never return wrapped data. */
1158 if (port->full)
1159 avail = port->in_buffer_size;
1160 else if (end > start)
1161 avail = end - start;
1162 else
1163 avail = port->flip + port->in_buffer_size - start;
1165 count = count > avail ? avail : count;
1166 if (copy_to_user(buf, start, count))
1167 return -EFAULT;
1168 /* Disable interrupts while updating readp */
1169 spin_lock_irqsave(&port->lock, flags);
1170 port->readp += count;
1171 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1172 port->readp = port->flip;
1173 port->full = 0;
1174 spin_unlock_irqrestore(&port->lock, flags);
1175 DEBUGREAD(printk("r %d\n", count));
1176 return count;
1179 static void send_word(sync_port* port)
1181 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1182 reg_sser_rw_tr_data tr_data = {0};
1184 switch(tr_cfg.sample_size)
1186 case 8:
1187 port->out_buf_count--;
1188 tr_data.data = *port->out_rd_ptr++;
1189 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1190 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1191 port->out_rd_ptr = port->out_buffer;
1192 break;
1193 case 12:
1195 int data = (*port->out_rd_ptr++) << 8;
1196 data |= *port->out_rd_ptr++;
1197 port->out_buf_count -= 2;
1198 tr_data.data = data;
1199 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1200 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1201 port->out_rd_ptr = port->out_buffer;
1203 break;
1204 case 16:
1205 port->out_buf_count -= 2;
1206 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1207 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1208 port->out_rd_ptr += 2;
1209 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1210 port->out_rd_ptr = port->out_buffer;
1211 break;
1212 case 24:
1213 port->out_buf_count -= 3;
1214 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1215 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1216 port->out_rd_ptr += 2;
1217 tr_data.data = *port->out_rd_ptr++;
1218 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1219 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1220 port->out_rd_ptr = port->out_buffer;
1221 break;
1222 case 32:
1223 port->out_buf_count -= 4;
1224 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1225 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1226 port->out_rd_ptr += 2;
1227 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1228 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1229 port->out_rd_ptr += 2;
1230 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1231 port->out_rd_ptr = port->out_buffer;
1232 break;
1236 static void start_dma_out(struct sync_port *port,
1237 const char *data, int count)
1239 port->active_tr_descr->buf = (char *) virt_to_phys((char *) data);
1240 port->active_tr_descr->after = port->active_tr_descr->buf + count;
1241 port->active_tr_descr->intr = 1;
1243 port->active_tr_descr->eol = 1;
1244 port->prev_tr_descr->eol = 0;
1246 DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n",
1247 port->prev_tr_descr, port->active_tr_descr));
1248 port->prev_tr_descr = port->active_tr_descr;
1249 port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next);
1251 if (!port->tr_running) {
1252 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
1253 rw_tr_cfg);
1255 port->out_context.next = 0;
1256 port->out_context.saved_data =
1257 (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
1258 port->out_context.saved_data_buf = port->prev_tr_descr->buf;
1260 DMA_START_CONTEXT(port->regi_dmaout,
1261 virt_to_phys((char *)&port->out_context));
1263 tr_cfg.tr_en = regk_sser_yes;
1264 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1265 DEBUGTRDMA(printk(KERN_DEBUG "dma s\n"););
1266 } else {
1267 DMA_CONTINUE_DATA(port->regi_dmaout);
1268 DEBUGTRDMA(printk(KERN_DEBUG "dma c\n"););
1271 port->tr_running = 1;
1274 static void start_dma_in(sync_port *port)
1276 int i;
1277 char *buf;
1278 port->writep = port->flip;
1280 if (port->writep > port->flip + port->in_buffer_size) {
1281 panic("Offset too large in sync serial driver\n");
1282 return;
1284 buf = (char*)virt_to_phys(port->in_buffer);
1285 for (i = 0; i < NBR_IN_DESCR; i++) {
1286 port->in_descr[i].buf = buf;
1287 port->in_descr[i].after = buf + port->inbufchunk;
1288 port->in_descr[i].intr = 1;
1289 port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
1290 port->in_descr[i].buf = buf;
1291 buf += port->inbufchunk;
1293 /* Link the last descriptor to the first */
1294 port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1295 port->in_descr[i-1].eol = regk_sser_yes;
1296 port->next_rx_desc = &port->in_descr[0];
1297 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
1298 port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1299 port->in_context.saved_data_buf = port->in_descr[0].buf;
1300 DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
1303 #ifdef SYNC_SER_DMA
1304 static irqreturn_t tr_interrupt(int irq, void *dev_id)
1306 reg_dma_r_masked_intr masked;
1307 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1308 reg_dma_rw_stat stat;
1309 int i;
1310 int found = 0;
1311 int stop_sser = 0;
1313 for (i = 0; i < NBR_PORTS; i++) {
1314 sync_port *port = &ports[i];
1315 if (!port->enabled || !port->use_dma)
1316 continue;
1318 /* IRQ active for the port? */
1319 masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
1320 if (!masked.data)
1321 continue;
1323 found = 1;
1325 /* Check if we should stop the DMA transfer */
1326 stat = REG_RD(dma, port->regi_dmaout, rw_stat);
1327 if (stat.list_state == regk_dma_data_at_eol)
1328 stop_sser = 1;
1330 /* Clear IRQ */
1331 REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
1333 if (!stop_sser) {
1334 /* The DMA has completed a descriptor, EOL was not
1335 * encountered, so step relevant descriptor and
1336 * datapointers forward. */
1337 int sent;
1338 sent = port->catch_tr_descr->after -
1339 port->catch_tr_descr->buf;
1340 DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t"
1341 "in descr %p (ac: %p)\n",
1342 port->out_buf_count, sent,
1343 port->out_buf_count - sent,
1344 port->catch_tr_descr,
1345 port->active_tr_descr););
1346 port->out_buf_count -= sent;
1347 port->catch_tr_descr =
1348 phys_to_virt((int) port->catch_tr_descr->next);
1349 port->out_rd_ptr =
1350 phys_to_virt((int) port->catch_tr_descr->buf);
1351 } else {
1352 int i, sent;
1353 /* EOL handler.
1354 * Note that if an EOL was encountered during the irq
1355 * locked section of sync_ser_write the DMA will be
1356 * restarted and the eol flag will be cleared.
1357 * The remaining descriptors will be traversed by
1358 * the descriptor interrupts as usual.
1360 i = 0;
1361 while (!port->catch_tr_descr->eol) {
1362 sent = port->catch_tr_descr->after -
1363 port->catch_tr_descr->buf;
1364 DEBUGOUTBUF(printk(KERN_DEBUG
1365 "traversing descr %p -%d (%d)\n",
1366 port->catch_tr_descr,
1367 sent,
1368 port->out_buf_count));
1369 port->out_buf_count -= sent;
1370 port->catch_tr_descr = phys_to_virt(
1371 (int)port->catch_tr_descr->next);
1372 i++;
1373 if (i >= NBR_OUT_DESCR) {
1374 /* TODO: Reset and recover */
1375 panic("sync_serial: missing eol");
1378 sent = port->catch_tr_descr->after -
1379 port->catch_tr_descr->buf;
1380 DEBUGOUTBUF(printk(KERN_DEBUG
1381 "eol at descr %p -%d (%d)\n",
1382 port->catch_tr_descr,
1383 sent,
1384 port->out_buf_count));
1386 port->out_buf_count -= sent;
1388 /* Update read pointer to first free byte, we
1389 * may already be writing data there. */
1390 port->out_rd_ptr =
1391 phys_to_virt((int) port->catch_tr_descr->after);
1392 if (port->out_rd_ptr > port->out_buffer +
1393 OUT_BUFFER_SIZE)
1394 port->out_rd_ptr = port->out_buffer;
1396 reg_sser_rw_tr_cfg tr_cfg =
1397 REG_RD(sser, port->regi_sser, rw_tr_cfg);
1398 DEBUGTXINT(printk(KERN_DEBUG
1399 "tr_int DMA stop %d, set catch @ %p\n",
1400 port->out_buf_count,
1401 port->active_tr_descr));
1402 if (port->out_buf_count != 0)
1403 printk(KERN_CRIT "sync_ser: buffer not "
1404 "empty after eol.\n");
1405 port->catch_tr_descr = port->active_tr_descr;
1406 port->tr_running = 0;
1407 tr_cfg.tr_en = regk_sser_no;
1408 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1410 /* wake up the waiting process */
1411 wake_up_interruptible(&port->out_wait_q);
1413 return IRQ_RETVAL(found);
1414 } /* tr_interrupt */
1416 static irqreturn_t rx_interrupt(int irq, void *dev_id)
1418 reg_dma_r_masked_intr masked;
1419 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1421 int i;
1422 int found = 0;
1424 for (i = 0; i < NBR_PORTS; i++)
1426 sync_port *port = &ports[i];
1428 if (!port->enabled || !port->use_dma )
1429 continue;
1431 masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
1433 if (masked.data) /* Descriptor interrupt */
1435 found = 1;
1436 while (REG_RD(dma, port->regi_dmain, rw_data) !=
1437 virt_to_phys(port->next_rx_desc)) {
1438 DEBUGRXINT(printk(KERN_DEBUG "!"));
1439 if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
1440 int first_size = port->flip + port->in_buffer_size - port->writep;
1441 memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
1442 memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
1443 port->writep = port->flip + port->inbufchunk - first_size;
1444 } else {
1445 memcpy((char*)port->writep,
1446 phys_to_virt((unsigned)port->next_rx_desc->buf),
1447 port->inbufchunk);
1448 port->writep += port->inbufchunk;
1449 if (port->writep >= port->flip + port->in_buffer_size)
1450 port->writep = port->flip;
1452 if (port->writep == port->readp)
1454 port->full = 1;
1457 port->next_rx_desc->eol = 1;
1458 port->prev_rx_desc->eol = 0;
1459 /* Cache bug workaround */
1460 flush_dma_descr(port->prev_rx_desc, 0);
1461 port->prev_rx_desc = port->next_rx_desc;
1462 port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
1463 /* Cache bug workaround */
1464 flush_dma_descr(port->prev_rx_desc, 1);
1465 /* wake up the waiting process */
1466 wake_up_interruptible(&port->in_wait_q);
1467 DMA_CONTINUE(port->regi_dmain);
1468 REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
1473 return IRQ_RETVAL(found);
1474 } /* rx_interrupt */
1475 #endif /* SYNC_SER_DMA */
1477 #ifdef SYNC_SER_MANUAL
1478 static irqreturn_t manual_interrupt(int irq, void *dev_id)
1480 int i;
1481 int found = 0;
1482 reg_sser_r_masked_intr masked;
1484 for (i = 0; i < NBR_PORTS; i++)
1486 sync_port *port = &ports[i];
1488 if (!port->enabled || port->use_dma)
1490 continue;
1493 masked = REG_RD(sser, port->regi_sser, r_masked_intr);
1494 if (masked.rdav) /* Data received? */
1496 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1497 reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
1498 found = 1;
1499 /* Read data */
1500 switch(rec_cfg.sample_size)
1502 case 8:
1503 *port->writep++ = data.data & 0xff;
1504 break;
1505 case 12:
1506 *port->writep = (data.data & 0x0ff0) >> 4;
1507 *(port->writep + 1) = data.data & 0x0f;
1508 port->writep+=2;
1509 break;
1510 case 16:
1511 *(unsigned short*)port->writep = data.data;
1512 port->writep+=2;
1513 break;
1514 case 24:
1515 *(unsigned int*)port->writep = data.data;
1516 port->writep+=3;
1517 break;
1518 case 32:
1519 *(unsigned int*)port->writep = data.data;
1520 port->writep+=4;
1521 break;
1524 if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
1525 port->writep = port->flip;
1526 if (port->writep == port->readp) {
1527 /* receive buffer overrun, discard oldest data
1529 port->readp++;
1530 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1531 port->readp = port->flip;
1533 if (sync_data_avail(port) >= port->inbufchunk)
1534 wake_up_interruptible(&port->in_wait_q); /* Wake up application */
1537 if (masked.trdy) /* Transmitter ready? */
1539 found = 1;
1540 if (port->out_buf_count > 0) /* More data to send */
1541 send_word(port);
1542 else /* transmission finished */
1544 reg_sser_rw_intr_mask intr_mask;
1545 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1546 intr_mask.trdy = 0;
1547 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1548 wake_up_interruptible(&port->out_wait_q); /* Wake up application */
1552 return IRQ_RETVAL(found);
1554 #endif
1556 module_init(etrax_sync_serial_init);