2 * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
4 * Copyright (c) 2005 Axis Communications AB
6 * Author: Mikael Starvik
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/major.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/smp_lock.h>
18 #include <linux/interrupt.h>
19 #include <linux/poll.h>
20 #include <linux/init.h>
21 #include <linux/timer.h>
22 #include <linux/spinlock.h>
27 #include <hwregs/reg_rdwr.h>
28 #include <hwregs/sser_defs.h>
29 #include <hwregs/dma_defs.h>
30 #include <hwregs/dma.h>
31 #include <hwregs/intr_vect_defs.h>
32 #include <hwregs/intr_vect.h>
33 #include <hwregs/reg_map.h>
34 #include <asm/sync_serial.h>
37 /* The receiver is a bit tricky beacuse of the continuous stream of data.*/
39 /* Three DMA descriptors are linked together. Each DMA descriptor is */
40 /* responsible for port->bufchunk of a common buffer. */
42 /* +---------------------------------------------+ */
43 /* | +----------+ +----------+ +----------+ | */
44 /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
45 /* +----------+ +----------+ +----------+ */
48 /* +-------------------------------------+ */
50 /* +-------------------------------------+ */
51 /* |<- data_avail ->| */
54 /* If the application keeps up the pace readp will be right after writep.*/
55 /* If the application can't keep the pace we have to throw away data. */
56 /* The idea is that readp should be ready with the data pointed out by */
57 /* Descr[i] when the DMA has filled in Descr[i+1]. */
58 /* Otherwise we will discard */
59 /* the rest of the data pointed out by Descr1 and set readp to the start */
62 #define SYNC_SERIAL_MAJOR 125
64 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
65 /* words can be handled */
66 #define IN_BUFFER_SIZE 12288
67 #define IN_DESCR_SIZE 256
68 #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
70 #define OUT_BUFFER_SIZE 1024*8
71 #define NBR_OUT_DESCR 8
73 #define DEFAULT_FRAME_RATE 0
74 #define DEFAULT_WORD_RATE 7
76 /* NOTE: Enabling some debug will likely cause overrun or underrun,
77 * especially if manual mode is use.
86 #define DEBUGOUTBUF(x)
88 typedef struct sync_port
90 reg_scope_instances regi_sser
;
91 reg_scope_instances regi_dmain
;
92 reg_scope_instances regi_dmaout
;
94 char started
; /* 1 if port has been started */
95 char port_nbr
; /* Port 0 or 1 */
96 char busy
; /* 1 if port is busy */
98 char enabled
; /* 1 if port is enabled */
99 char use_dma
; /* 1 if port uses dma */
106 /* Next byte to be read by application */
107 volatile unsigned char *volatile readp
;
108 /* Next byte to be written by etrax */
109 volatile unsigned char *volatile writep
;
111 unsigned int in_buffer_size
;
112 unsigned int inbufchunk
;
113 unsigned char out_buffer
[OUT_BUFFER_SIZE
] __attribute__ ((aligned(32)));
114 unsigned char in_buffer
[IN_BUFFER_SIZE
]__attribute__ ((aligned(32)));
115 unsigned char flip
[IN_BUFFER_SIZE
] __attribute__ ((aligned(32)));
116 struct dma_descr_data
* next_rx_desc
;
117 struct dma_descr_data
* prev_rx_desc
;
119 /* Pointer to the first available descriptor in the ring,
120 * unless active_tr_descr == catch_tr_descr and a dma
121 * transfer is active */
122 struct dma_descr_data
*active_tr_descr
;
124 /* Pointer to the first allocated descriptor in the ring */
125 struct dma_descr_data
*catch_tr_descr
;
127 /* Pointer to the descriptor with the current end-of-list */
128 struct dma_descr_data
*prev_tr_descr
;
131 /* Pointer to the first byte being read by DMA
132 * or current position in out_buffer if not using DMA. */
133 unsigned char *out_rd_ptr
;
135 /* Number of bytes currently locked for being read by DMA */
138 dma_descr_data in_descr
[NBR_IN_DESCR
] __attribute__ ((__aligned__(16)));
139 dma_descr_context in_context
__attribute__ ((__aligned__(32)));
140 dma_descr_data out_descr
[NBR_OUT_DESCR
]
141 __attribute__ ((__aligned__(16)));
142 dma_descr_context out_context
__attribute__ ((__aligned__(32)));
143 wait_queue_head_t out_wait_q
;
144 wait_queue_head_t in_wait_q
;
149 static int etrax_sync_serial_init(void);
150 static void initialize_port(int portnbr
);
151 static inline int sync_data_avail(struct sync_port
*port
);
153 static int sync_serial_open(struct inode
*, struct file
*);
154 static int sync_serial_release(struct inode
*, struct file
*);
155 static unsigned int sync_serial_poll(struct file
*filp
, poll_table
*wait
);
157 static int sync_serial_ioctl(struct inode
*, struct file
*,
158 unsigned int cmd
, unsigned long arg
);
159 static ssize_t
sync_serial_write(struct file
* file
, const char * buf
,
160 size_t count
, loff_t
*ppos
);
161 static ssize_t
sync_serial_read(struct file
*file
, char *buf
,
162 size_t count
, loff_t
*ppos
);
164 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
165 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
166 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
167 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
171 static void send_word(sync_port
* port
);
172 static void start_dma_out(struct sync_port
*port
, const char *data
, int count
);
173 static void start_dma_in(sync_port
* port
);
175 static irqreturn_t
tr_interrupt(int irq
, void *dev_id
);
176 static irqreturn_t
rx_interrupt(int irq
, void *dev_id
);
179 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
180 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
181 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
182 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
183 #define SYNC_SER_MANUAL
185 #ifdef SYNC_SER_MANUAL
186 static irqreturn_t
manual_interrupt(int irq
, void *dev_id
);
189 #ifdef CONFIG_ETRAXFS /* ETRAX FS */
190 #define OUT_DMA_NBR 4
192 #define PINMUX_SSER pinmux_sser0
193 #define SYNCSER_INST regi_sser0
194 #define SYNCSER_INTR_VECT SSER0_INTR_VECT
195 #define OUT_DMA_INST regi_dma4
196 #define IN_DMA_INST regi_dma5
197 #define DMA_OUT_INTR_VECT DMA4_INTR_VECT
198 #define DMA_IN_INTR_VECT DMA5_INTR_VECT
199 #define REQ_DMA_SYNCSER dma_sser0
201 #define OUT_DMA_NBR 6
203 #define PINMUX_SSER pinmux_sser
204 #define SYNCSER_INST regi_sser
205 #define SYNCSER_INTR_VECT SSER_INTR_VECT
206 #define OUT_DMA_INST regi_dma6
207 #define IN_DMA_INST regi_dma7
208 #define DMA_OUT_INTR_VECT DMA6_INTR_VECT
209 #define DMA_IN_INTR_VECT DMA7_INTR_VECT
210 #define REQ_DMA_SYNCSER dma_sser
214 static struct sync_port ports
[]=
217 .regi_sser
= SYNCSER_INST
,
218 .regi_dmaout
= OUT_DMA_INST
,
219 .regi_dmain
= IN_DMA_INST
,
220 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
226 #ifdef CONFIG_ETRAXFS
230 .regi_sser
= regi_sser1
,
231 .regi_dmaout
= regi_dma6
,
232 .regi_dmain
= regi_dma7
,
233 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
242 #define NBR_PORTS ARRAY_SIZE(ports)
244 static const struct file_operations sync_serial_fops
= {
245 .owner
= THIS_MODULE
,
246 .write
= sync_serial_write
,
247 .read
= sync_serial_read
,
248 .poll
= sync_serial_poll
,
249 .ioctl
= sync_serial_ioctl
,
250 .open
= sync_serial_open
,
251 .release
= sync_serial_release
254 static int __init
etrax_sync_serial_init(void)
256 ports
[0].enabled
= 0;
257 #ifdef CONFIG_ETRAXFS
258 ports
[1].enabled
= 0;
260 if (register_chrdev(SYNC_SERIAL_MAJOR
, "sync serial",
261 &sync_serial_fops
) < 0) {
263 "Unable to get major for synchronous serial port\n");
267 /* Initialize Ports */
268 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
269 if (crisv32_pinmux_alloc_fixed(PINMUX_SSER
)) {
271 "Unable to alloc pins for synchronous serial port 0\n");
274 ports
[0].enabled
= 1;
278 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
279 if (crisv32_pinmux_alloc_fixed(pinmux_sser1
)) {
281 "Unable to alloc pins for synchronous serial port 0\n");
284 ports
[1].enabled
= 1;
288 #ifdef CONFIG_ETRAXFS
289 printk(KERN_INFO
"ETRAX FS synchronous serial port driver\n");
291 printk(KERN_INFO
"Artpec-3 synchronous serial port driver\n");
296 static void __init
initialize_port(int portnbr
)
298 int __attribute__((unused
)) i
;
299 struct sync_port
*port
= &ports
[portnbr
];
300 reg_sser_rw_cfg cfg
= {0};
301 reg_sser_rw_frm_cfg frm_cfg
= {0};
302 reg_sser_rw_tr_cfg tr_cfg
= {0};
303 reg_sser_rw_rec_cfg rec_cfg
= {0};
305 DEBUG(printk(KERN_DEBUG
"Init sync serial port %d\n", portnbr
));
307 port
->port_nbr
= portnbr
;
310 port
->out_rd_ptr
= port
->out_buffer
;
311 port
->out_buf_count
= 0;
316 port
->readp
= port
->flip
;
317 port
->writep
= port
->flip
;
318 port
->in_buffer_size
= IN_BUFFER_SIZE
;
319 port
->inbufchunk
= IN_DESCR_SIZE
;
320 port
->next_rx_desc
= &port
->in_descr
[0];
321 port
->prev_rx_desc
= &port
->in_descr
[NBR_IN_DESCR
-1];
322 port
->prev_rx_desc
->eol
= 1;
324 init_waitqueue_head(&port
->out_wait_q
);
325 init_waitqueue_head(&port
->in_wait_q
);
327 spin_lock_init(&port
->lock
);
329 cfg
.out_clk_src
= regk_sser_intern_clk
;
330 cfg
.out_clk_pol
= regk_sser_pos
;
331 cfg
.clk_od_mode
= regk_sser_no
;
332 cfg
.clk_dir
= regk_sser_out
;
333 cfg
.gate_clk
= regk_sser_no
;
334 cfg
.base_freq
= regk_sser_f29_493
;
336 REG_WR(sser
, port
->regi_sser
, rw_cfg
, cfg
);
338 frm_cfg
.wordrate
= DEFAULT_WORD_RATE
;
339 frm_cfg
.type
= regk_sser_edge
;
340 frm_cfg
.frame_pin_dir
= regk_sser_out
;
341 frm_cfg
.frame_pin_use
= regk_sser_frm
;
342 frm_cfg
.status_pin_dir
= regk_sser_in
;
343 frm_cfg
.status_pin_use
= regk_sser_hold
;
344 frm_cfg
.out_on
= regk_sser_tr
;
345 frm_cfg
.tr_delay
= 1;
346 REG_WR(sser
, port
->regi_sser
, rw_frm_cfg
, frm_cfg
);
348 tr_cfg
.urun_stop
= regk_sser_no
;
349 tr_cfg
.sample_size
= 7;
350 tr_cfg
.sh_dir
= regk_sser_msbfirst
;
351 tr_cfg
.use_dma
= port
->use_dma
? regk_sser_yes
: regk_sser_no
;
353 tr_cfg
.rate_ctrl
= regk_sser_bulk
;
354 tr_cfg
.data_pin_use
= regk_sser_dout
;
356 tr_cfg
.rate_ctrl
= regk_sser_iso
;
357 tr_cfg
.data_pin_use
= regk_sser_dout
;
359 tr_cfg
.bulk_wspace
= 1;
360 REG_WR(sser
, port
->regi_sser
, rw_tr_cfg
, tr_cfg
);
362 rec_cfg
.sample_size
= 7;
363 rec_cfg
.sh_dir
= regk_sser_msbfirst
;
364 rec_cfg
.use_dma
= port
->use_dma
? regk_sser_yes
: regk_sser_no
;
365 rec_cfg
.fifo_thr
= regk_sser_inf
;
366 REG_WR(sser
, port
->regi_sser
, rw_rec_cfg
, rec_cfg
);
369 /* Setup the descriptor ring for dma out/transmit. */
370 for (i
= 0; i
< NBR_OUT_DESCR
; i
++) {
371 port
->out_descr
[i
].wait
= 0;
372 port
->out_descr
[i
].intr
= 1;
373 port
->out_descr
[i
].eol
= 0;
374 port
->out_descr
[i
].out_eop
= 0;
375 port
->out_descr
[i
].next
=
376 (dma_descr_data
*)virt_to_phys(&port
->out_descr
[i
+1]);
379 /* Create a ring from the list. */
380 port
->out_descr
[NBR_OUT_DESCR
-1].next
=
381 (dma_descr_data
*)virt_to_phys(&port
->out_descr
[0]);
383 /* Setup context for traversing the ring. */
384 port
->active_tr_descr
= &port
->out_descr
[0];
385 port
->prev_tr_descr
= &port
->out_descr
[NBR_OUT_DESCR
-1];
386 port
->catch_tr_descr
= &port
->out_descr
[0];
390 static inline int sync_data_avail(struct sync_port
*port
)
393 unsigned char *start
;
396 start
= (unsigned char*)port
->readp
; /* cast away volatile */
397 end
= (unsigned char*)port
->writep
; /* cast away volatile */
398 /* 0123456789 0123456789
406 avail
= port
->in_buffer_size
- (start
- end
);
410 static inline int sync_data_avail_to_end(struct sync_port
*port
)
413 unsigned char *start
;
416 start
= (unsigned char*)port
->readp
; /* cast away volatile */
417 end
= (unsigned char*)port
->writep
; /* cast away volatile */
418 /* 0123456789 0123456789
426 avail
= port
->flip
+ port
->in_buffer_size
- start
;
430 static int sync_serial_open(struct inode
*inode
, struct file
*file
)
432 int dev
= iminor(inode
);
435 reg_dma_rw_cfg cfg
= {.en
= regk_dma_yes
};
436 reg_dma_rw_intr_mask intr_mask
= {.data
= regk_dma_yes
};
439 DEBUG(printk(KERN_DEBUG
"Open sync serial port %d\n", dev
));
441 if (dev
< 0 || dev
>= NBR_PORTS
|| !ports
[dev
].enabled
)
443 DEBUG(printk(KERN_DEBUG
"Invalid minor %d\n", dev
));
448 /* Allow open this device twice (assuming one reader and one writer) */
451 DEBUG(printk(KERN_DEBUG
"Device is busy.. \n"));
456 if (port
->init_irqs
) {
458 if (port
== &ports
[0]) {
460 if (request_irq(DMA_OUT_INTR_VECT
,
463 "synchronous serial 0 dma tr",
465 printk(KERN_CRIT
"Can't allocate sync serial port 0 IRQ");
467 } else if (request_irq(DMA_IN_INTR_VECT
,
470 "synchronous serial 1 dma rx",
472 free_irq(DMA_OUT_INTR_VECT
, &port
[0]);
473 printk(KERN_CRIT
"Can't allocate sync serial port 0 IRQ");
475 } else if (crisv32_request_dma(OUT_DMA_NBR
,
476 "synchronous serial 0 dma tr",
477 DMA_VERBOSE_ON_ERROR
,
480 free_irq(DMA_OUT_INTR_VECT
, &port
[0]);
481 free_irq(DMA_IN_INTR_VECT
, &port
[0]);
482 printk(KERN_CRIT
"Can't allocate sync serial port 0 TX DMA channel");
484 } else if (crisv32_request_dma(IN_DMA_NBR
,
485 "synchronous serial 0 dma rec",
486 DMA_VERBOSE_ON_ERROR
,
489 crisv32_free_dma(OUT_DMA_NBR
);
490 free_irq(DMA_OUT_INTR_VECT
, &port
[0]);
491 free_irq(DMA_IN_INTR_VECT
, &port
[0]);
492 printk(KERN_CRIT
"Can't allocate sync serial port 1 RX DMA channel");
497 #ifdef CONFIG_ETRAXFS
498 else if (port
== &ports
[1]) {
500 if (request_irq(DMA6_INTR_VECT
,
503 "synchronous serial 1 dma tr",
505 printk(KERN_CRIT
"Can't allocate sync serial port 1 IRQ");
507 } else if (request_irq(DMA7_INTR_VECT
,
510 "synchronous serial 1 dma rx",
512 free_irq(DMA6_INTR_VECT
, &ports
[1]);
513 printk(KERN_CRIT
"Can't allocate sync serial port 3 IRQ");
515 } else if (crisv32_request_dma(
516 SYNC_SER1_TX_DMA_NBR
,
517 "synchronous serial 1 dma tr",
518 DMA_VERBOSE_ON_ERROR
,
521 free_irq(DMA6_INTR_VECT
, &ports
[1]);
522 free_irq(DMA7_INTR_VECT
, &ports
[1]);
523 printk(KERN_CRIT
"Can't allocate sync serial port 3 TX DMA channel");
525 } else if (crisv32_request_dma(
526 SYNC_SER1_RX_DMA_NBR
,
527 "synchronous serial 3 dma rec",
528 DMA_VERBOSE_ON_ERROR
,
531 crisv32_free_dma(SYNC_SER1_TX_DMA_NBR
);
532 free_irq(DMA6_INTR_VECT
, &ports
[1]);
533 free_irq(DMA7_INTR_VECT
, &ports
[1]);
534 printk(KERN_CRIT
"Can't allocate sync serial port 3 RX DMA channel");
541 REG_WR(dma
, port
->regi_dmain
, rw_cfg
, cfg
);
542 REG_WR(dma
, port
->regi_dmaout
, rw_cfg
, cfg
);
543 /* Enable DMA IRQs */
544 REG_WR(dma
, port
->regi_dmain
, rw_intr_mask
, intr_mask
);
545 REG_WR(dma
, port
->regi_dmaout
, rw_intr_mask
, intr_mask
);
546 /* Set up wordsize = 1 for DMAs. */
547 DMA_WR_CMD (port
->regi_dmain
, regk_dma_set_w_size1
);
548 DMA_WR_CMD (port
->regi_dmaout
, regk_dma_set_w_size1
);
552 } else { /* !port->use_dma */
553 #ifdef SYNC_SER_MANUAL
554 if (port
== &ports
[0]) {
555 if (request_irq(SYNCSER_INTR_VECT
,
558 "synchronous serial manual irq",
560 printk("Can't allocate sync serial manual irq");
564 #ifdef CONFIG_ETRAXFS
565 else if (port
== &ports
[1]) {
566 if (request_irq(SSER1_INTR_VECT
,
569 "synchronous serial manual irq",
571 printk(KERN_CRIT
"Can't allocate sync serial manual irq");
578 panic("sync_serial: Manual mode not supported.\n");
579 #endif /* SYNC_SER_MANUAL */
582 } /* port->init_irqs */
591 static int sync_serial_release(struct inode
*inode
, struct file
*file
)
593 int dev
= iminor(inode
);
596 if (dev
< 0 || dev
>= NBR_PORTS
|| !ports
[dev
].enabled
)
598 DEBUG(printk("Invalid minor %d\n", dev
));
609 static unsigned int sync_serial_poll(struct file
*file
, poll_table
*wait
)
611 int dev
= iminor(file
->f_path
.dentry
->d_inode
);
612 unsigned int mask
= 0;
614 DEBUGPOLL( static unsigned int prev_mask
= 0; );
618 if (!port
->started
) {
619 reg_sser_rw_cfg cfg
= REG_RD(sser
, port
->regi_sser
, rw_cfg
);
620 reg_sser_rw_rec_cfg rec_cfg
=
621 REG_RD(sser
, port
->regi_sser
, rw_rec_cfg
);
622 cfg
.en
= regk_sser_yes
;
623 rec_cfg
.rec_en
= port
->input
;
624 REG_WR(sser
, port
->regi_sser
, rw_cfg
, cfg
);
625 REG_WR(sser
, port
->regi_sser
, rw_rec_cfg
, rec_cfg
);
629 poll_wait(file
, &port
->out_wait_q
, wait
);
630 poll_wait(file
, &port
->in_wait_q
, wait
);
632 /* No active transfer, descriptors are available */
633 if (port
->output
&& !port
->tr_running
)
634 mask
|= POLLOUT
| POLLWRNORM
;
636 /* Descriptor and buffer space available. */
638 port
->active_tr_descr
!= port
->catch_tr_descr
&&
639 port
->out_buf_count
< OUT_BUFFER_SIZE
)
640 mask
|= POLLOUT
| POLLWRNORM
;
642 /* At least an inbufchunk of data */
643 if (port
->input
&& sync_data_avail(port
) >= port
->inbufchunk
)
644 mask
|= POLLIN
| POLLRDNORM
;
646 DEBUGPOLL(if (mask
!= prev_mask
)
647 printk("sync_serial_poll: mask 0x%08X %s %s\n", mask
,
648 mask
&POLLOUT
?"POLLOUT":"", mask
&POLLIN
?"POLLIN":"");
654 static int sync_serial_ioctl(struct inode
*inode
, struct file
*file
,
655 unsigned int cmd
, unsigned long arg
)
658 int dma_w_size
= regk_dma_set_w_size1
;
659 int dev
= iminor(file
->f_path
.dentry
->d_inode
);
661 reg_sser_rw_tr_cfg tr_cfg
;
662 reg_sser_rw_rec_cfg rec_cfg
;
663 reg_sser_rw_frm_cfg frm_cfg
;
664 reg_sser_rw_cfg gen_cfg
;
665 reg_sser_rw_intr_mask intr_mask
;
667 if (dev
< 0 || dev
>= NBR_PORTS
|| !ports
[dev
].enabled
)
669 DEBUG(printk("Invalid minor %d\n", dev
));
673 spin_lock_irq(&port
->lock
);
675 tr_cfg
= REG_RD(sser
, port
->regi_sser
, rw_tr_cfg
);
676 rec_cfg
= REG_RD(sser
, port
->regi_sser
, rw_rec_cfg
);
677 frm_cfg
= REG_RD(sser
, port
->regi_sser
, rw_frm_cfg
);
678 gen_cfg
= REG_RD(sser
, port
->regi_sser
, rw_cfg
);
679 intr_mask
= REG_RD(sser
, port
->regi_sser
, rw_intr_mask
);
684 if (GET_SPEED(arg
) == CODEC
)
688 gen_cfg
.base_freq
= regk_sser_f32
;
690 /* Clock divider will internally be
691 * gen_cfg.clk_div + 1.
694 freq
= GET_FREQ(arg
);
700 gen_cfg
.clk_div
= 125 *
701 (1 << (freq
- FREQ_256kHz
)) - 1;
704 gen_cfg
.clk_div
= 62;
709 gen_cfg
.clk_div
= 8 * (1 << freq
) - 1;
713 gen_cfg
.base_freq
= regk_sser_f29_493
;
714 switch (GET_SPEED(arg
)) {
716 gen_cfg
.clk_div
= 29493000 / (150 * 8) - 1;
719 gen_cfg
.clk_div
= 29493000 / (300 * 8) - 1;
722 gen_cfg
.clk_div
= 29493000 / (600 * 8) - 1;
725 gen_cfg
.clk_div
= 29493000 / (1200 * 8) - 1;
728 gen_cfg
.clk_div
= 29493000 / (2400 * 8) - 1;
731 gen_cfg
.clk_div
= 29493000 / (4800 * 8) - 1;
734 gen_cfg
.clk_div
= 29493000 / (9600 * 8) - 1;
737 gen_cfg
.clk_div
= 29493000 / (19200 * 8) - 1;
740 gen_cfg
.clk_div
= 29493000 / (28800 * 8) - 1;
743 gen_cfg
.clk_div
= 29493000 / (57600 * 8) - 1;
746 gen_cfg
.clk_div
= 29493000 / (115200 * 8) - 1;
749 gen_cfg
.clk_div
= 29493000 / (230400 * 8) - 1;
752 gen_cfg
.clk_div
= 29493000 / (460800 * 8) - 1;
755 gen_cfg
.clk_div
= 29493000 / (921600 * 8) - 1;
758 gen_cfg
.base_freq
= regk_sser_f100
;
759 gen_cfg
.clk_div
= 100000000 / (3125000 * 8) - 1;
764 frm_cfg
.wordrate
= GET_WORD_RATE(arg
);
773 frm_cfg
.out_on
= regk_sser_tr
;
774 frm_cfg
.frame_pin_dir
= regk_sser_out
;
775 gen_cfg
.clk_dir
= regk_sser_out
;
780 frm_cfg
.frame_pin_dir
= regk_sser_in
;
781 gen_cfg
.clk_dir
= regk_sser_in
;
786 frm_cfg
.frame_pin_dir
= regk_sser_out
;
787 frm_cfg
.out_on
= regk_sser_intern_tb
;
788 gen_cfg
.clk_dir
= regk_sser_out
;
793 frm_cfg
.frame_pin_dir
= regk_sser_in
;
794 gen_cfg
.clk_dir
= regk_sser_in
;
799 frm_cfg
.frame_pin_dir
= regk_sser_out
;
800 frm_cfg
.out_on
= regk_sser_intern_tb
;
801 gen_cfg
.clk_dir
= regk_sser_out
;
806 frm_cfg
.frame_pin_dir
= regk_sser_in
;
807 gen_cfg
.clk_dir
= regk_sser_in
;
810 spin_unlock_irq(&port
->lock
);
813 if (!port
->use_dma
|| (arg
== MASTER_OUTPUT
|| arg
== SLAVE_OUTPUT
))
814 intr_mask
.rdav
= regk_sser_yes
;
817 if (arg
& NORMAL_SYNC
) {
818 frm_cfg
.rec_delay
= 1;
819 frm_cfg
.tr_delay
= 1;
821 else if (arg
& EARLY_SYNC
)
822 frm_cfg
.rec_delay
= frm_cfg
.tr_delay
= 0;
823 else if (arg
& SECOND_WORD_SYNC
) {
824 frm_cfg
.rec_delay
= 7;
825 frm_cfg
.tr_delay
= 1;
828 tr_cfg
.bulk_wspace
= frm_cfg
.tr_delay
;
829 frm_cfg
.early_wend
= regk_sser_yes
;
831 frm_cfg
.type
= regk_sser_edge
;
832 else if (arg
& WORD_SYNC
)
833 frm_cfg
.type
= regk_sser_level
;
834 else if (arg
& EXTENDED_SYNC
)
835 frm_cfg
.early_wend
= regk_sser_no
;
838 frm_cfg
.frame_pin_use
= regk_sser_frm
;
839 else if (arg
& SYNC_OFF
)
840 frm_cfg
.frame_pin_use
= regk_sser_gio0
;
842 dma_w_size
= regk_dma_set_w_size2
;
843 if (arg
& WORD_SIZE_8
) {
844 rec_cfg
.sample_size
= tr_cfg
.sample_size
= 7;
845 dma_w_size
= regk_dma_set_w_size1
;
846 } else if (arg
& WORD_SIZE_12
)
847 rec_cfg
.sample_size
= tr_cfg
.sample_size
= 11;
848 else if (arg
& WORD_SIZE_16
)
849 rec_cfg
.sample_size
= tr_cfg
.sample_size
= 15;
850 else if (arg
& WORD_SIZE_24
)
851 rec_cfg
.sample_size
= tr_cfg
.sample_size
= 23;
852 else if (arg
& WORD_SIZE_32
)
853 rec_cfg
.sample_size
= tr_cfg
.sample_size
= 31;
855 if (arg
& BIT_ORDER_MSB
)
856 rec_cfg
.sh_dir
= tr_cfg
.sh_dir
= regk_sser_msbfirst
;
857 else if (arg
& BIT_ORDER_LSB
)
858 rec_cfg
.sh_dir
= tr_cfg
.sh_dir
= regk_sser_lsbfirst
;
860 if (arg
& FLOW_CONTROL_ENABLE
) {
861 frm_cfg
.status_pin_use
= regk_sser_frm
;
862 rec_cfg
.fifo_thr
= regk_sser_thr16
;
863 } else if (arg
& FLOW_CONTROL_DISABLE
) {
864 frm_cfg
.status_pin_use
= regk_sser_gio0
;
865 rec_cfg
.fifo_thr
= regk_sser_inf
;
868 if (arg
& CLOCK_NOT_GATED
)
869 gen_cfg
.gate_clk
= regk_sser_no
;
870 else if (arg
& CLOCK_GATED
)
871 gen_cfg
.gate_clk
= regk_sser_yes
;
875 /* NOTE!! negedge is considered NORMAL */
876 if (arg
& CLOCK_NORMAL
)
877 rec_cfg
.clk_pol
= regk_sser_neg
;
878 else if (arg
& CLOCK_INVERT
)
879 rec_cfg
.clk_pol
= regk_sser_pos
;
881 if (arg
& FRAME_NORMAL
)
882 frm_cfg
.level
= regk_sser_pos_hi
;
883 else if (arg
& FRAME_INVERT
)
884 frm_cfg
.level
= regk_sser_neg_lo
;
886 if (arg
& STATUS_NORMAL
)
887 gen_cfg
.hold_pol
= regk_sser_pos
;
888 else if (arg
& STATUS_INVERT
)
889 gen_cfg
.hold_pol
= regk_sser_neg
;
892 if (arg
& CLOCK_NORMAL
)
893 gen_cfg
.out_clk_pol
= regk_sser_pos
;
894 else if (arg
& CLOCK_INVERT
)
895 gen_cfg
.out_clk_pol
= regk_sser_neg
;
897 if (arg
& FRAME_NORMAL
)
898 frm_cfg
.level
= regk_sser_pos_hi
;
899 else if (arg
& FRAME_INVERT
)
900 frm_cfg
.level
= regk_sser_neg_lo
;
902 if (arg
& STATUS_NORMAL
)
903 gen_cfg
.hold_pol
= regk_sser_pos
;
904 else if (arg
& STATUS_INVERT
)
905 gen_cfg
.hold_pol
= regk_sser_neg
;
908 rec_cfg
.fifo_thr
= regk_sser_inf
;
909 rec_cfg
.sh_dir
= tr_cfg
.sh_dir
= regk_sser_msbfirst
;
910 rec_cfg
.sample_size
= tr_cfg
.sample_size
= 7;
911 frm_cfg
.frame_pin_use
= regk_sser_frm
;
912 frm_cfg
.type
= regk_sser_level
;
913 frm_cfg
.tr_delay
= 1;
914 frm_cfg
.level
= regk_sser_neg_lo
;
917 rec_cfg
.clk_pol
= regk_sser_neg
;
918 gen_cfg
.clk_dir
= regk_sser_in
;
924 gen_cfg
.out_clk_pol
= regk_sser_pos
;
927 gen_cfg
.clk_dir
= regk_sser_out
;
938 rec_cfg
.rec_en
= port
->input
;
939 gen_cfg
.en
= (port
->output
| port
->input
);
942 REG_WR(sser
, port
->regi_sser
, rw_tr_cfg
, tr_cfg
);
943 REG_WR(sser
, port
->regi_sser
, rw_rec_cfg
, rec_cfg
);
944 REG_WR(sser
, port
->regi_sser
, rw_frm_cfg
, frm_cfg
);
945 REG_WR(sser
, port
->regi_sser
, rw_intr_mask
, intr_mask
);
946 REG_WR(sser
, port
->regi_sser
, rw_cfg
, gen_cfg
);
949 if (cmd
== SSP_FRAME_SYNC
&& (arg
& (WORD_SIZE_8
| WORD_SIZE_12
|
950 WORD_SIZE_16
| WORD_SIZE_24
| WORD_SIZE_32
))) {
953 REG_WR(sser
, port
->regi_sser
, rw_cfg
, gen_cfg
);
954 /* ##### Should DMA be stoped before we change dma size? */
955 DMA_WR_CMD(port
->regi_dmain
, dma_w_size
);
956 DMA_WR_CMD(port
->regi_dmaout
, dma_w_size
);
958 REG_WR(sser
, port
->regi_sser
, rw_cfg
, gen_cfg
);
961 spin_unlock_irq(&port
->lock
);
965 /* NOTE: sync_serial_write does not support concurrency */
966 static ssize_t
sync_serial_write(struct file
*file
, const char *buf
,
967 size_t count
, loff_t
*ppos
)
969 int dev
= iminor(file
->f_path
.dentry
->d_inode
);
970 DECLARE_WAITQUEUE(wait
, current
);
971 struct sync_port
*port
;
977 unsigned char *rd_ptr
; /* First allocated byte in the buffer */
978 unsigned char *wr_ptr
; /* First free byte in the buffer */
979 unsigned char *buf_stop_ptr
; /* Last byte + 1 */
981 if (dev
< 0 || dev
>= NBR_PORTS
|| !ports
[dev
].enabled
) {
982 DEBUG(printk("Invalid minor %d\n", dev
));
987 /* |<- OUT_BUFFER_SIZE ->|
988 * |<- out_buf_count ->|
989 * |<- trunc_count ->| ...->|
990 * ______________________________________________________
991 * | free | data | free |
992 * |_________|___________________|________________________|
995 DEBUGWRITE(printk(KERN_DEBUG
"W d%d c %lu a: %p c: %p\n",
996 port
->port_nbr
, count
, port
->active_tr_descr
,
997 port
->catch_tr_descr
));
999 /* Read variables that may be updated by interrupts */
1000 spin_lock_irqsave(&port
->lock
, flags
);
1001 rd_ptr
= port
->out_rd_ptr
;
1002 out_buf_count
= port
->out_buf_count
;
1003 spin_unlock_irqrestore(&port
->lock
, flags
);
1005 /* Check if resources are available */
1006 if (port
->tr_running
&&
1007 ((port
->use_dma
&& port
->active_tr_descr
== port
->catch_tr_descr
) ||
1008 out_buf_count
>= OUT_BUFFER_SIZE
)) {
1009 DEBUGWRITE(printk(KERN_DEBUG
"sser%d full\n", dev
));
1013 buf_stop_ptr
= port
->out_buffer
+ OUT_BUFFER_SIZE
;
1015 /* Determine pointer to the first free byte, before copying. */
1016 wr_ptr
= rd_ptr
+ out_buf_count
;
1017 if (wr_ptr
>= buf_stop_ptr
)
1018 wr_ptr
-= OUT_BUFFER_SIZE
;
1020 /* If we wrap the ring buffer, let the user space program handle it by
1021 * truncating the data. This could be more elegant, small buffer
1022 * fragments may occur.
1024 bytes_free
= OUT_BUFFER_SIZE
- out_buf_count
;
1025 if (wr_ptr
+ bytes_free
> buf_stop_ptr
)
1026 bytes_free
= buf_stop_ptr
- wr_ptr
;
1027 trunc_count
= (count
< bytes_free
) ? count
: bytes_free
;
1029 if (copy_from_user(wr_ptr
, buf
, trunc_count
))
1032 DEBUGOUTBUF(printk(KERN_DEBUG
"%-4d + %-4d = %-4d %p %p %p\n",
1033 out_buf_count
, trunc_count
,
1034 port
->out_buf_count
, port
->out_buffer
,
1035 wr_ptr
, buf_stop_ptr
));
1037 /* Make sure transmitter/receiver is running */
1038 if (!port
->started
) {
1039 reg_sser_rw_cfg cfg
= REG_RD(sser
, port
->regi_sser
, rw_cfg
);
1040 reg_sser_rw_rec_cfg rec_cfg
= REG_RD(sser
, port
->regi_sser
, rw_rec_cfg
);
1041 cfg
.en
= regk_sser_yes
;
1042 rec_cfg
.rec_en
= port
->input
;
1043 REG_WR(sser
, port
->regi_sser
, rw_cfg
, cfg
);
1044 REG_WR(sser
, port
->regi_sser
, rw_rec_cfg
, rec_cfg
);
1048 /* Setup wait if blocking */
1049 if (!(file
->f_flags
& O_NONBLOCK
)) {
1050 add_wait_queue(&port
->out_wait_q
, &wait
);
1051 set_current_state(TASK_INTERRUPTIBLE
);
1054 spin_lock_irqsave(&port
->lock
, flags
);
1055 port
->out_buf_count
+= trunc_count
;
1056 if (port
->use_dma
) {
1057 start_dma_out(port
, wr_ptr
, trunc_count
);
1058 } else if (!port
->tr_running
) {
1059 reg_sser_rw_intr_mask intr_mask
;
1060 intr_mask
= REG_RD(sser
, port
->regi_sser
, rw_intr_mask
);
1061 /* Start sender by writing data */
1063 /* and enable transmitter ready IRQ */
1065 REG_WR(sser
, port
->regi_sser
, rw_intr_mask
, intr_mask
);
1067 spin_unlock_irqrestore(&port
->lock
, flags
);
1069 /* Exit if non blocking */
1070 if (file
->f_flags
& O_NONBLOCK
) {
1071 DEBUGWRITE(printk(KERN_DEBUG
"w d%d c %lu %08x\n",
1072 port
->port_nbr
, trunc_count
,
1073 REG_RD_INT(dma
, port
->regi_dmaout
, r_intr
)));
1078 set_current_state(TASK_RUNNING
);
1079 remove_wait_queue(&port
->out_wait_q
, &wait
);
1081 if (signal_pending(current
))
1084 DEBUGWRITE(printk(KERN_DEBUG
"w d%d c %lu\n",
1085 port
->port_nbr
, trunc_count
));
1089 static ssize_t
sync_serial_read(struct file
* file
, char * buf
,
1090 size_t count
, loff_t
*ppos
)
1092 int dev
= iminor(file
->f_path
.dentry
->d_inode
);
1095 unsigned char* start
;
1097 unsigned long flags
;
1099 if (dev
< 0 || dev
>= NBR_PORTS
|| !ports
[dev
].enabled
)
1101 DEBUG(printk("Invalid minor %d\n", dev
));
1106 DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev
, count
, port
->readp
- port
->flip
, port
->writep
- port
->flip
, port
->in_buffer_size
));
1110 reg_sser_rw_cfg cfg
= REG_RD(sser
, port
->regi_sser
, rw_cfg
);
1111 reg_sser_rw_tr_cfg tr_cfg
= REG_RD(sser
, port
->regi_sser
, rw_tr_cfg
);
1112 reg_sser_rw_rec_cfg rec_cfg
= REG_RD(sser
, port
->regi_sser
, rw_rec_cfg
);
1113 cfg
.en
= regk_sser_yes
;
1114 tr_cfg
.tr_en
= regk_sser_yes
;
1115 rec_cfg
.rec_en
= regk_sser_yes
;
1116 REG_WR(sser
, port
->regi_sser
, rw_cfg
, cfg
);
1117 REG_WR(sser
, port
->regi_sser
, rw_tr_cfg
, tr_cfg
);
1118 REG_WR(sser
, port
->regi_sser
, rw_rec_cfg
, rec_cfg
);
1122 /* Calculate number of available bytes */
1123 /* Save pointers to avoid that they are modified by interrupt */
1124 spin_lock_irqsave(&port
->lock
, flags
);
1125 start
= (unsigned char*)port
->readp
; /* cast away volatile */
1126 end
= (unsigned char*)port
->writep
; /* cast away volatile */
1127 spin_unlock_irqrestore(&port
->lock
, flags
);
1128 while ((start
== end
) && !port
->full
) /* No data */
1130 DEBUGREAD(printk(KERN_DEBUG
"&"));
1131 if (file
->f_flags
& O_NONBLOCK
)
1134 interruptible_sleep_on(&port
->in_wait_q
);
1135 if (signal_pending(current
))
1138 spin_lock_irqsave(&port
->lock
, flags
);
1139 start
= (unsigned char*)port
->readp
; /* cast away volatile */
1140 end
= (unsigned char*)port
->writep
; /* cast away volatile */
1141 spin_unlock_irqrestore(&port
->lock
, flags
);
1144 /* Lazy read, never return wrapped data. */
1146 avail
= port
->in_buffer_size
;
1147 else if (end
> start
)
1148 avail
= end
- start
;
1150 avail
= port
->flip
+ port
->in_buffer_size
- start
;
1152 count
= count
> avail
? avail
: count
;
1153 if (copy_to_user(buf
, start
, count
))
1155 /* Disable interrupts while updating readp */
1156 spin_lock_irqsave(&port
->lock
, flags
);
1157 port
->readp
+= count
;
1158 if (port
->readp
>= port
->flip
+ port
->in_buffer_size
) /* Wrap? */
1159 port
->readp
= port
->flip
;
1161 spin_unlock_irqrestore(&port
->lock
, flags
);
1162 DEBUGREAD(printk("r %d\n", count
));
1166 static void send_word(sync_port
* port
)
1168 reg_sser_rw_tr_cfg tr_cfg
= REG_RD(sser
, port
->regi_sser
, rw_tr_cfg
);
1169 reg_sser_rw_tr_data tr_data
= {0};
1171 switch(tr_cfg
.sample_size
)
1174 port
->out_buf_count
--;
1175 tr_data
.data
= *port
->out_rd_ptr
++;
1176 REG_WR(sser
, port
->regi_sser
, rw_tr_data
, tr_data
);
1177 if (port
->out_rd_ptr
>= port
->out_buffer
+ OUT_BUFFER_SIZE
)
1178 port
->out_rd_ptr
= port
->out_buffer
;
1182 int data
= (*port
->out_rd_ptr
++) << 8;
1183 data
|= *port
->out_rd_ptr
++;
1184 port
->out_buf_count
-= 2;
1185 tr_data
.data
= data
;
1186 REG_WR(sser
, port
->regi_sser
, rw_tr_data
, tr_data
);
1187 if (port
->out_rd_ptr
>= port
->out_buffer
+ OUT_BUFFER_SIZE
)
1188 port
->out_rd_ptr
= port
->out_buffer
;
1192 port
->out_buf_count
-= 2;
1193 tr_data
.data
= *(unsigned short *)port
->out_rd_ptr
;
1194 REG_WR(sser
, port
->regi_sser
, rw_tr_data
, tr_data
);
1195 port
->out_rd_ptr
+= 2;
1196 if (port
->out_rd_ptr
>= port
->out_buffer
+ OUT_BUFFER_SIZE
)
1197 port
->out_rd_ptr
= port
->out_buffer
;
1200 port
->out_buf_count
-= 3;
1201 tr_data
.data
= *(unsigned short *)port
->out_rd_ptr
;
1202 REG_WR(sser
, port
->regi_sser
, rw_tr_data
, tr_data
);
1203 port
->out_rd_ptr
+= 2;
1204 tr_data
.data
= *port
->out_rd_ptr
++;
1205 REG_WR(sser
, port
->regi_sser
, rw_tr_data
, tr_data
);
1206 if (port
->out_rd_ptr
>= port
->out_buffer
+ OUT_BUFFER_SIZE
)
1207 port
->out_rd_ptr
= port
->out_buffer
;
1210 port
->out_buf_count
-= 4;
1211 tr_data
.data
= *(unsigned short *)port
->out_rd_ptr
;
1212 REG_WR(sser
, port
->regi_sser
, rw_tr_data
, tr_data
);
1213 port
->out_rd_ptr
+= 2;
1214 tr_data
.data
= *(unsigned short *)port
->out_rd_ptr
;
1215 REG_WR(sser
, port
->regi_sser
, rw_tr_data
, tr_data
);
1216 port
->out_rd_ptr
+= 2;
1217 if (port
->out_rd_ptr
>= port
->out_buffer
+ OUT_BUFFER_SIZE
)
1218 port
->out_rd_ptr
= port
->out_buffer
;
1223 static void start_dma_out(struct sync_port
*port
,
1224 const char *data
, int count
)
1226 port
->active_tr_descr
->buf
= (char *) virt_to_phys((char *) data
);
1227 port
->active_tr_descr
->after
= port
->active_tr_descr
->buf
+ count
;
1228 port
->active_tr_descr
->intr
= 1;
1230 port
->active_tr_descr
->eol
= 1;
1231 port
->prev_tr_descr
->eol
= 0;
1233 DEBUGTRDMA(printk(KERN_DEBUG
"Inserting eolr:%p eol@:%p\n",
1234 port
->prev_tr_descr
, port
->active_tr_descr
));
1235 port
->prev_tr_descr
= port
->active_tr_descr
;
1236 port
->active_tr_descr
= phys_to_virt((int) port
->active_tr_descr
->next
);
1238 if (!port
->tr_running
) {
1239 reg_sser_rw_tr_cfg tr_cfg
= REG_RD(sser
, port
->regi_sser
,
1242 port
->out_context
.next
= 0;
1243 port
->out_context
.saved_data
=
1244 (dma_descr_data
*)virt_to_phys(port
->prev_tr_descr
);
1245 port
->out_context
.saved_data_buf
= port
->prev_tr_descr
->buf
;
1247 DMA_START_CONTEXT(port
->regi_dmaout
,
1248 virt_to_phys((char *)&port
->out_context
));
1250 tr_cfg
.tr_en
= regk_sser_yes
;
1251 REG_WR(sser
, port
->regi_sser
, rw_tr_cfg
, tr_cfg
);
1252 DEBUGTRDMA(printk(KERN_DEBUG
"dma s\n"););
1254 DMA_CONTINUE_DATA(port
->regi_dmaout
);
1255 DEBUGTRDMA(printk(KERN_DEBUG
"dma c\n"););
1258 port
->tr_running
= 1;
1261 static void start_dma_in(sync_port
*port
)
1265 port
->writep
= port
->flip
;
1267 if (port
->writep
> port
->flip
+ port
->in_buffer_size
) {
1268 panic("Offset too large in sync serial driver\n");
1271 buf
= (char*)virt_to_phys(port
->in_buffer
);
1272 for (i
= 0; i
< NBR_IN_DESCR
; i
++) {
1273 port
->in_descr
[i
].buf
= buf
;
1274 port
->in_descr
[i
].after
= buf
+ port
->inbufchunk
;
1275 port
->in_descr
[i
].intr
= 1;
1276 port
->in_descr
[i
].next
= (dma_descr_data
*)virt_to_phys(&port
->in_descr
[i
+1]);
1277 port
->in_descr
[i
].buf
= buf
;
1278 buf
+= port
->inbufchunk
;
1280 /* Link the last descriptor to the first */
1281 port
->in_descr
[i
-1].next
= (dma_descr_data
*)virt_to_phys(&port
->in_descr
[0]);
1282 port
->in_descr
[i
-1].eol
= regk_sser_yes
;
1283 port
->next_rx_desc
= &port
->in_descr
[0];
1284 port
->prev_rx_desc
= &port
->in_descr
[NBR_IN_DESCR
- 1];
1285 port
->in_context
.saved_data
= (dma_descr_data
*)virt_to_phys(&port
->in_descr
[0]);
1286 port
->in_context
.saved_data_buf
= port
->in_descr
[0].buf
;
1287 DMA_START_CONTEXT(port
->regi_dmain
, virt_to_phys(&port
->in_context
));
1291 static irqreturn_t
tr_interrupt(int irq
, void *dev_id
)
1293 reg_dma_r_masked_intr masked
;
1294 reg_dma_rw_ack_intr ack_intr
= {.data
= regk_dma_yes
};
1295 reg_dma_rw_stat stat
;
1300 for (i
= 0; i
< NBR_PORTS
; i
++) {
1301 sync_port
*port
= &ports
[i
];
1302 if (!port
->enabled
|| !port
->use_dma
)
1305 /* IRQ active for the port? */
1306 masked
= REG_RD(dma
, port
->regi_dmaout
, r_masked_intr
);
1312 /* Check if we should stop the DMA transfer */
1313 stat
= REG_RD(dma
, port
->regi_dmaout
, rw_stat
);
1314 if (stat
.list_state
== regk_dma_data_at_eol
)
1318 REG_WR(dma
, port
->regi_dmaout
, rw_ack_intr
, ack_intr
);
1321 /* The DMA has completed a descriptor, EOL was not
1322 * encountered, so step relevant descriptor and
1323 * datapointers forward. */
1325 sent
= port
->catch_tr_descr
->after
-
1326 port
->catch_tr_descr
->buf
;
1327 DEBUGTXINT(printk(KERN_DEBUG
"%-4d - %-4d = %-4d\t"
1328 "in descr %p (ac: %p)\n",
1329 port
->out_buf_count
, sent
,
1330 port
->out_buf_count
- sent
,
1331 port
->catch_tr_descr
,
1332 port
->active_tr_descr
););
1333 port
->out_buf_count
-= sent
;
1334 port
->catch_tr_descr
=
1335 phys_to_virt((int) port
->catch_tr_descr
->next
);
1337 phys_to_virt((int) port
->catch_tr_descr
->buf
);
1341 * Note that if an EOL was encountered during the irq
1342 * locked section of sync_ser_write the DMA will be
1343 * restarted and the eol flag will be cleared.
1344 * The remaining descriptors will be traversed by
1345 * the descriptor interrupts as usual.
1348 while (!port
->catch_tr_descr
->eol
) {
1349 sent
= port
->catch_tr_descr
->after
-
1350 port
->catch_tr_descr
->buf
;
1351 DEBUGOUTBUF(printk(KERN_DEBUG
1352 "traversing descr %p -%d (%d)\n",
1353 port
->catch_tr_descr
,
1355 port
->out_buf_count
));
1356 port
->out_buf_count
-= sent
;
1357 port
->catch_tr_descr
= phys_to_virt(
1358 (int)port
->catch_tr_descr
->next
);
1360 if (i
>= NBR_OUT_DESCR
) {
1361 /* TODO: Reset and recover */
1362 panic("sync_serial: missing eol");
1365 sent
= port
->catch_tr_descr
->after
-
1366 port
->catch_tr_descr
->buf
;
1367 DEBUGOUTBUF(printk(KERN_DEBUG
1368 "eol at descr %p -%d (%d)\n",
1369 port
->catch_tr_descr
,
1371 port
->out_buf_count
));
1373 port
->out_buf_count
-= sent
;
1375 /* Update read pointer to first free byte, we
1376 * may already be writing data there. */
1378 phys_to_virt((int) port
->catch_tr_descr
->after
);
1379 if (port
->out_rd_ptr
> port
->out_buffer
+
1381 port
->out_rd_ptr
= port
->out_buffer
;
1383 reg_sser_rw_tr_cfg tr_cfg
=
1384 REG_RD(sser
, port
->regi_sser
, rw_tr_cfg
);
1385 DEBUGTXINT(printk(KERN_DEBUG
1386 "tr_int DMA stop %d, set catch @ %p\n",
1387 port
->out_buf_count
,
1388 port
->active_tr_descr
));
1389 if (port
->out_buf_count
!= 0)
1390 printk(KERN_CRIT
"sync_ser: buffer not "
1391 "empty after eol.\n");
1392 port
->catch_tr_descr
= port
->active_tr_descr
;
1393 port
->tr_running
= 0;
1394 tr_cfg
.tr_en
= regk_sser_no
;
1395 REG_WR(sser
, port
->regi_sser
, rw_tr_cfg
, tr_cfg
);
1397 /* wake up the waiting process */
1398 wake_up_interruptible(&port
->out_wait_q
);
1400 return IRQ_RETVAL(found
);
1401 } /* tr_interrupt */
1403 static irqreturn_t
rx_interrupt(int irq
, void *dev_id
)
1405 reg_dma_r_masked_intr masked
;
1406 reg_dma_rw_ack_intr ack_intr
= {.data
= regk_dma_yes
};
1411 for (i
= 0; i
< NBR_PORTS
; i
++)
1413 sync_port
*port
= &ports
[i
];
1415 if (!port
->enabled
|| !port
->use_dma
)
1418 masked
= REG_RD(dma
, port
->regi_dmain
, r_masked_intr
);
1420 if (masked
.data
) /* Descriptor interrupt */
1423 while (REG_RD(dma
, port
->regi_dmain
, rw_data
) !=
1424 virt_to_phys(port
->next_rx_desc
)) {
1425 DEBUGRXINT(printk(KERN_DEBUG
"!"));
1426 if (port
->writep
+ port
->inbufchunk
> port
->flip
+ port
->in_buffer_size
) {
1427 int first_size
= port
->flip
+ port
->in_buffer_size
- port
->writep
;
1428 memcpy((char*)port
->writep
, phys_to_virt((unsigned)port
->next_rx_desc
->buf
), first_size
);
1429 memcpy(port
->flip
, phys_to_virt((unsigned)port
->next_rx_desc
->buf
+first_size
), port
->inbufchunk
- first_size
);
1430 port
->writep
= port
->flip
+ port
->inbufchunk
- first_size
;
1432 memcpy((char*)port
->writep
,
1433 phys_to_virt((unsigned)port
->next_rx_desc
->buf
),
1435 port
->writep
+= port
->inbufchunk
;
1436 if (port
->writep
>= port
->flip
+ port
->in_buffer_size
)
1437 port
->writep
= port
->flip
;
1439 if (port
->writep
== port
->readp
)
1444 port
->next_rx_desc
->eol
= 1;
1445 port
->prev_rx_desc
->eol
= 0;
1446 /* Cache bug workaround */
1447 flush_dma_descr(port
->prev_rx_desc
, 0);
1448 port
->prev_rx_desc
= port
->next_rx_desc
;
1449 port
->next_rx_desc
= phys_to_virt((unsigned)port
->next_rx_desc
->next
);
1450 /* Cache bug workaround */
1451 flush_dma_descr(port
->prev_rx_desc
, 1);
1452 /* wake up the waiting process */
1453 wake_up_interruptible(&port
->in_wait_q
);
1454 DMA_CONTINUE(port
->regi_dmain
);
1455 REG_WR(dma
, port
->regi_dmain
, rw_ack_intr
, ack_intr
);
1460 return IRQ_RETVAL(found
);
1461 } /* rx_interrupt */
1462 #endif /* SYNC_SER_DMA */
1464 #ifdef SYNC_SER_MANUAL
1465 static irqreturn_t
manual_interrupt(int irq
, void *dev_id
)
1469 reg_sser_r_masked_intr masked
;
1471 for (i
= 0; i
< NBR_PORTS
; i
++)
1473 sync_port
*port
= &ports
[i
];
1475 if (!port
->enabled
|| port
->use_dma
)
1480 masked
= REG_RD(sser
, port
->regi_sser
, r_masked_intr
);
1481 if (masked
.rdav
) /* Data received? */
1483 reg_sser_rw_rec_cfg rec_cfg
= REG_RD(sser
, port
->regi_sser
, rw_rec_cfg
);
1484 reg_sser_r_rec_data data
= REG_RD(sser
, port
->regi_sser
, r_rec_data
);
1487 switch(rec_cfg
.sample_size
)
1490 *port
->writep
++ = data
.data
& 0xff;
1493 *port
->writep
= (data
.data
& 0x0ff0) >> 4;
1494 *(port
->writep
+ 1) = data
.data
& 0x0f;
1498 *(unsigned short*)port
->writep
= data
.data
;
1502 *(unsigned int*)port
->writep
= data
.data
;
1506 *(unsigned int*)port
->writep
= data
.data
;
1511 if (port
->writep
>= port
->flip
+ port
->in_buffer_size
) /* Wrap? */
1512 port
->writep
= port
->flip
;
1513 if (port
->writep
== port
->readp
) {
1514 /* receive buffer overrun, discard oldest data
1517 if (port
->readp
>= port
->flip
+ port
->in_buffer_size
) /* Wrap? */
1518 port
->readp
= port
->flip
;
1520 if (sync_data_avail(port
) >= port
->inbufchunk
)
1521 wake_up_interruptible(&port
->in_wait_q
); /* Wake up application */
1524 if (masked
.trdy
) /* Transmitter ready? */
1527 if (port
->out_buf_count
> 0) /* More data to send */
1529 else /* transmission finished */
1531 reg_sser_rw_intr_mask intr_mask
;
1532 intr_mask
= REG_RD(sser
, port
->regi_sser
, rw_intr_mask
);
1534 REG_WR(sser
, port
->regi_sser
, rw_intr_mask
, intr_mask
);
1535 wake_up_interruptible(&port
->out_wait_q
); /* Wake up application */
1539 return IRQ_RETVAL(found
);
1543 module_init(etrax_sync_serial_init
);