2 * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
4 * Copyright (c) 2005 Axis Communications AB
6 * Author: Mikael Starvik
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/major.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/poll.h>
19 #include <linux/init.h>
20 #include <linux/timer.h>
21 #include <linux/spinlock.h>
26 #include <hwregs/reg_rdwr.h>
27 #include <hwregs/sser_defs.h>
28 #include <hwregs/dma_defs.h>
29 #include <hwregs/dma.h>
30 #include <hwregs/intr_vect_defs.h>
31 #include <hwregs/intr_vect.h>
32 #include <hwregs/reg_map.h>
33 #include <asm/sync_serial.h>
36 /* The receiver is a bit tricky beacuse of the continuous stream of data.*/
38 /* Three DMA descriptors are linked together. Each DMA descriptor is */
39 /* responsible for port->bufchunk of a common buffer. */
41 /* +---------------------------------------------+ */
42 /* | +----------+ +----------+ +----------+ | */
43 /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
44 /* +----------+ +----------+ +----------+ */
47 /* +-------------------------------------+ */
49 /* +-------------------------------------+ */
50 /* |<- data_avail ->| */
53 /* If the application keeps up the pace readp will be right after writep.*/
54 /* If the application can't keep the pace we have to throw away data. */
55 /* The idea is that readp should be ready with the data pointed out by */
56 /* Descr[i] when the DMA has filled in Descr[i+1]. */
57 /* Otherwise we will discard */
58 /* the rest of the data pointed out by Descr1 and set readp to the start */
61 #define SYNC_SERIAL_MAJOR 125
63 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
64 /* words can be handled */
65 #define IN_BUFFER_SIZE 12288
66 #define IN_DESCR_SIZE 256
67 #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
69 #define OUT_BUFFER_SIZE 1024*8
70 #define NBR_OUT_DESCR 8
72 #define DEFAULT_FRAME_RATE 0
73 #define DEFAULT_WORD_RATE 7
75 /* NOTE: Enabling some debug will likely cause overrun or underrun,
76 * especially if manual mode is use.
85 #define DEBUGOUTBUF(x)
87 typedef struct sync_port
89 reg_scope_instances regi_sser
;
90 reg_scope_instances regi_dmain
;
91 reg_scope_instances regi_dmaout
;
93 char started
; /* 1 if port has been started */
94 char port_nbr
; /* Port 0 or 1 */
95 char busy
; /* 1 if port is busy */
97 char enabled
; /* 1 if port is enabled */
98 char use_dma
; /* 1 if port uses dma */
105 /* Next byte to be read by application */
106 volatile unsigned char *volatile readp
;
107 /* Next byte to be written by etrax */
108 volatile unsigned char *volatile writep
;
110 unsigned int in_buffer_size
;
111 unsigned int inbufchunk
;
112 unsigned char out_buffer
[OUT_BUFFER_SIZE
] __attribute__ ((aligned(32)));
113 unsigned char in_buffer
[IN_BUFFER_SIZE
]__attribute__ ((aligned(32)));
114 unsigned char flip
[IN_BUFFER_SIZE
] __attribute__ ((aligned(32)));
115 struct dma_descr_data
* next_rx_desc
;
116 struct dma_descr_data
* prev_rx_desc
;
118 /* Pointer to the first available descriptor in the ring,
119 * unless active_tr_descr == catch_tr_descr and a dma
120 * transfer is active */
121 struct dma_descr_data
*active_tr_descr
;
123 /* Pointer to the first allocated descriptor in the ring */
124 struct dma_descr_data
*catch_tr_descr
;
126 /* Pointer to the descriptor with the current end-of-list */
127 struct dma_descr_data
*prev_tr_descr
;
130 /* Pointer to the first byte being read by DMA
131 * or current position in out_buffer if not using DMA. */
132 unsigned char *out_rd_ptr
;
134 /* Number of bytes currently locked for being read by DMA */
137 dma_descr_data in_descr
[NBR_IN_DESCR
] __attribute__ ((__aligned__(16)));
138 dma_descr_context in_context
__attribute__ ((__aligned__(32)));
139 dma_descr_data out_descr
[NBR_OUT_DESCR
]
140 __attribute__ ((__aligned__(16)));
141 dma_descr_context out_context
__attribute__ ((__aligned__(32)));
142 wait_queue_head_t out_wait_q
;
143 wait_queue_head_t in_wait_q
;
148 static int etrax_sync_serial_init(void);
149 static void initialize_port(int portnbr
);
150 static inline int sync_data_avail(struct sync_port
*port
);
152 static int sync_serial_open(struct inode
*, struct file
*);
153 static int sync_serial_release(struct inode
*, struct file
*);
154 static unsigned int sync_serial_poll(struct file
*filp
, poll_table
*wait
);
156 static int sync_serial_ioctl(struct inode
*, struct file
*,
157 unsigned int cmd
, unsigned long arg
);
158 static ssize_t
sync_serial_write(struct file
* file
, const char * buf
,
159 size_t count
, loff_t
*ppos
);
160 static ssize_t
sync_serial_read(struct file
*file
, char *buf
,
161 size_t count
, loff_t
*ppos
);
163 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
164 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
165 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
166 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
170 static void send_word(sync_port
* port
);
171 static void start_dma_out(struct sync_port
*port
, const char *data
, int count
);
172 static void start_dma_in(sync_port
* port
);
174 static irqreturn_t
tr_interrupt(int irq
, void *dev_id
);
175 static irqreturn_t
rx_interrupt(int irq
, void *dev_id
);
178 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
179 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
180 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
181 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
182 #define SYNC_SER_MANUAL
184 #ifdef SYNC_SER_MANUAL
185 static irqreturn_t
manual_interrupt(int irq
, void *dev_id
);
188 #ifdef CONFIG_ETRAXFS /* ETRAX FS */
189 #define OUT_DMA_NBR 4
191 #define PINMUX_SSER pinmux_sser0
192 #define SYNCSER_INST regi_sser0
193 #define SYNCSER_INTR_VECT SSER0_INTR_VECT
194 #define OUT_DMA_INST regi_dma4
195 #define IN_DMA_INST regi_dma5
196 #define DMA_OUT_INTR_VECT DMA4_INTR_VECT
197 #define DMA_IN_INTR_VECT DMA5_INTR_VECT
198 #define REQ_DMA_SYNCSER dma_sser0
200 #define OUT_DMA_NBR 6
202 #define PINMUX_SSER pinmux_sser
203 #define SYNCSER_INST regi_sser
204 #define SYNCSER_INTR_VECT SSER_INTR_VECT
205 #define OUT_DMA_INST regi_dma6
206 #define IN_DMA_INST regi_dma7
207 #define DMA_OUT_INTR_VECT DMA6_INTR_VECT
208 #define DMA_IN_INTR_VECT DMA7_INTR_VECT
209 #define REQ_DMA_SYNCSER dma_sser
213 static struct sync_port ports
[]=
216 .regi_sser
= SYNCSER_INST
,
217 .regi_dmaout
= OUT_DMA_INST
,
218 .regi_dmain
= IN_DMA_INST
,
219 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
225 #ifdef CONFIG_ETRAXFS
229 .regi_sser
= regi_sser1
,
230 .regi_dmaout
= regi_dma6
,
231 .regi_dmain
= regi_dma7
,
232 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
241 #define NBR_PORTS ARRAY_SIZE(ports)
243 static const struct file_operations sync_serial_fops
= {
244 .owner
= THIS_MODULE
,
245 .write
= sync_serial_write
,
246 .read
= sync_serial_read
,
247 .poll
= sync_serial_poll
,
248 .ioctl
= sync_serial_ioctl
,
249 .open
= sync_serial_open
,
250 .release
= sync_serial_release
253 static int __init
etrax_sync_serial_init(void)
255 ports
[0].enabled
= 0;
256 #ifdef CONFIG_ETRAXFS
257 ports
[1].enabled
= 0;
259 if (register_chrdev(SYNC_SERIAL_MAJOR
, "sync serial",
260 &sync_serial_fops
) < 0) {
262 "Unable to get major for synchronous serial port\n");
266 /* Initialize Ports */
267 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
268 if (crisv32_pinmux_alloc_fixed(PINMUX_SSER
)) {
270 "Unable to alloc pins for synchronous serial port 0\n");
273 ports
[0].enabled
= 1;
277 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
278 if (crisv32_pinmux_alloc_fixed(pinmux_sser1
)) {
280 "Unable to alloc pins for synchronous serial port 0\n");
283 ports
[1].enabled
= 1;
287 #ifdef CONFIG_ETRAXFS
288 printk(KERN_INFO
"ETRAX FS synchronous serial port driver\n");
290 printk(KERN_INFO
"Artpec-3 synchronous serial port driver\n");
295 static void __init
initialize_port(int portnbr
)
297 int __attribute__((unused
)) i
;
298 struct sync_port
*port
= &ports
[portnbr
];
299 reg_sser_rw_cfg cfg
= {0};
300 reg_sser_rw_frm_cfg frm_cfg
= {0};
301 reg_sser_rw_tr_cfg tr_cfg
= {0};
302 reg_sser_rw_rec_cfg rec_cfg
= {0};
304 DEBUG(printk(KERN_DEBUG
"Init sync serial port %d\n", portnbr
));
306 port
->port_nbr
= portnbr
;
309 port
->out_rd_ptr
= port
->out_buffer
;
310 port
->out_buf_count
= 0;
315 port
->readp
= port
->flip
;
316 port
->writep
= port
->flip
;
317 port
->in_buffer_size
= IN_BUFFER_SIZE
;
318 port
->inbufchunk
= IN_DESCR_SIZE
;
319 port
->next_rx_desc
= &port
->in_descr
[0];
320 port
->prev_rx_desc
= &port
->in_descr
[NBR_IN_DESCR
-1];
321 port
->prev_rx_desc
->eol
= 1;
323 init_waitqueue_head(&port
->out_wait_q
);
324 init_waitqueue_head(&port
->in_wait_q
);
326 spin_lock_init(&port
->lock
);
328 cfg
.out_clk_src
= regk_sser_intern_clk
;
329 cfg
.out_clk_pol
= regk_sser_pos
;
330 cfg
.clk_od_mode
= regk_sser_no
;
331 cfg
.clk_dir
= regk_sser_out
;
332 cfg
.gate_clk
= regk_sser_no
;
333 cfg
.base_freq
= regk_sser_f29_493
;
335 REG_WR(sser
, port
->regi_sser
, rw_cfg
, cfg
);
337 frm_cfg
.wordrate
= DEFAULT_WORD_RATE
;
338 frm_cfg
.type
= regk_sser_edge
;
339 frm_cfg
.frame_pin_dir
= regk_sser_out
;
340 frm_cfg
.frame_pin_use
= regk_sser_frm
;
341 frm_cfg
.status_pin_dir
= regk_sser_in
;
342 frm_cfg
.status_pin_use
= regk_sser_hold
;
343 frm_cfg
.out_on
= regk_sser_tr
;
344 frm_cfg
.tr_delay
= 1;
345 REG_WR(sser
, port
->regi_sser
, rw_frm_cfg
, frm_cfg
);
347 tr_cfg
.urun_stop
= regk_sser_no
;
348 tr_cfg
.sample_size
= 7;
349 tr_cfg
.sh_dir
= regk_sser_msbfirst
;
350 tr_cfg
.use_dma
= port
->use_dma
? regk_sser_yes
: regk_sser_no
;
352 tr_cfg
.rate_ctrl
= regk_sser_bulk
;
353 tr_cfg
.data_pin_use
= regk_sser_dout
;
355 tr_cfg
.rate_ctrl
= regk_sser_iso
;
356 tr_cfg
.data_pin_use
= regk_sser_dout
;
358 tr_cfg
.bulk_wspace
= 1;
359 REG_WR(sser
, port
->regi_sser
, rw_tr_cfg
, tr_cfg
);
361 rec_cfg
.sample_size
= 7;
362 rec_cfg
.sh_dir
= regk_sser_msbfirst
;
363 rec_cfg
.use_dma
= port
->use_dma
? regk_sser_yes
: regk_sser_no
;
364 rec_cfg
.fifo_thr
= regk_sser_inf
;
365 REG_WR(sser
, port
->regi_sser
, rw_rec_cfg
, rec_cfg
);
368 /* Setup the descriptor ring for dma out/transmit. */
369 for (i
= 0; i
< NBR_OUT_DESCR
; i
++) {
370 port
->out_descr
[i
].wait
= 0;
371 port
->out_descr
[i
].intr
= 1;
372 port
->out_descr
[i
].eol
= 0;
373 port
->out_descr
[i
].out_eop
= 0;
374 port
->out_descr
[i
].next
=
375 (dma_descr_data
*)virt_to_phys(&port
->out_descr
[i
+1]);
378 /* Create a ring from the list. */
379 port
->out_descr
[NBR_OUT_DESCR
-1].next
=
380 (dma_descr_data
*)virt_to_phys(&port
->out_descr
[0]);
382 /* Setup context for traversing the ring. */
383 port
->active_tr_descr
= &port
->out_descr
[0];
384 port
->prev_tr_descr
= &port
->out_descr
[NBR_OUT_DESCR
-1];
385 port
->catch_tr_descr
= &port
->out_descr
[0];
389 static inline int sync_data_avail(struct sync_port
*port
)
392 unsigned char *start
;
395 start
= (unsigned char*)port
->readp
; /* cast away volatile */
396 end
= (unsigned char*)port
->writep
; /* cast away volatile */
397 /* 0123456789 0123456789
405 avail
= port
->in_buffer_size
- (start
- end
);
409 static inline int sync_data_avail_to_end(struct sync_port
*port
)
412 unsigned char *start
;
415 start
= (unsigned char*)port
->readp
; /* cast away volatile */
416 end
= (unsigned char*)port
->writep
; /* cast away volatile */
417 /* 0123456789 0123456789
425 avail
= port
->flip
+ port
->in_buffer_size
- start
;
429 static int sync_serial_open(struct inode
*inode
, struct file
*file
)
431 int dev
= iminor(inode
);
433 reg_dma_rw_cfg cfg
= {.en
= regk_dma_yes
};
434 reg_dma_rw_intr_mask intr_mask
= {.data
= regk_dma_yes
};
436 DEBUG(printk(KERN_DEBUG
"Open sync serial port %d\n", dev
));
438 if (dev
< 0 || dev
>= NBR_PORTS
|| !ports
[dev
].enabled
)
440 DEBUG(printk(KERN_DEBUG
"Invalid minor %d\n", dev
));
444 /* Allow open this device twice (assuming one reader and one writer) */
447 DEBUG(printk(KERN_DEBUG
"Device is busy.. \n"));
452 if (port
->init_irqs
) {
454 if (port
== &ports
[0]) {
456 if (request_irq(DMA_OUT_INTR_VECT
,
459 "synchronous serial 0 dma tr",
461 printk(KERN_CRIT
"Can't allocate sync serial port 0 IRQ");
463 } else if (request_irq(DMA_IN_INTR_VECT
,
466 "synchronous serial 1 dma rx",
468 free_irq(DMA_OUT_INTR_VECT
, &port
[0]);
469 printk(KERN_CRIT
"Can't allocate sync serial port 0 IRQ");
471 } else if (crisv32_request_dma(OUT_DMA_NBR
,
472 "synchronous serial 0 dma tr",
473 DMA_VERBOSE_ON_ERROR
,
476 free_irq(DMA_OUT_INTR_VECT
, &port
[0]);
477 free_irq(DMA_IN_INTR_VECT
, &port
[0]);
478 printk(KERN_CRIT
"Can't allocate sync serial port 0 TX DMA channel");
480 } else if (crisv32_request_dma(IN_DMA_NBR
,
481 "synchronous serial 0 dma rec",
482 DMA_VERBOSE_ON_ERROR
,
485 crisv32_free_dma(OUT_DMA_NBR
);
486 free_irq(DMA_OUT_INTR_VECT
, &port
[0]);
487 free_irq(DMA_IN_INTR_VECT
, &port
[0]);
488 printk(KERN_CRIT
"Can't allocate sync serial port 1 RX DMA channel");
493 #ifdef CONFIG_ETRAXFS
494 else if (port
== &ports
[1]) {
496 if (request_irq(DMA6_INTR_VECT
,
499 "synchronous serial 1 dma tr",
501 printk(KERN_CRIT
"Can't allocate sync serial port 1 IRQ");
503 } else if (request_irq(DMA7_INTR_VECT
,
506 "synchronous serial 1 dma rx",
508 free_irq(DMA6_INTR_VECT
, &ports
[1]);
509 printk(KERN_CRIT
"Can't allocate sync serial port 3 IRQ");
511 } else if (crisv32_request_dma(
512 SYNC_SER1_TX_DMA_NBR
,
513 "synchronous serial 1 dma tr",
514 DMA_VERBOSE_ON_ERROR
,
517 free_irq(DMA6_INTR_VECT
, &ports
[1]);
518 free_irq(DMA7_INTR_VECT
, &ports
[1]);
519 printk(KERN_CRIT
"Can't allocate sync serial port 3 TX DMA channel");
521 } else if (crisv32_request_dma(
522 SYNC_SER1_RX_DMA_NBR
,
523 "synchronous serial 3 dma rec",
524 DMA_VERBOSE_ON_ERROR
,
527 crisv32_free_dma(SYNC_SER1_TX_DMA_NBR
);
528 free_irq(DMA6_INTR_VECT
, &ports
[1]);
529 free_irq(DMA7_INTR_VECT
, &ports
[1]);
530 printk(KERN_CRIT
"Can't allocate sync serial port 3 RX DMA channel");
537 REG_WR(dma
, port
->regi_dmain
, rw_cfg
, cfg
);
538 REG_WR(dma
, port
->regi_dmaout
, rw_cfg
, cfg
);
539 /* Enable DMA IRQs */
540 REG_WR(dma
, port
->regi_dmain
, rw_intr_mask
, intr_mask
);
541 REG_WR(dma
, port
->regi_dmaout
, rw_intr_mask
, intr_mask
);
542 /* Set up wordsize = 1 for DMAs. */
543 DMA_WR_CMD (port
->regi_dmain
, regk_dma_set_w_size1
);
544 DMA_WR_CMD (port
->regi_dmaout
, regk_dma_set_w_size1
);
548 } else { /* !port->use_dma */
549 #ifdef SYNC_SER_MANUAL
550 if (port
== &ports
[0]) {
551 if (request_irq(SYNCSER_INTR_VECT
,
554 "synchronous serial manual irq",
556 printk("Can't allocate sync serial manual irq");
560 #ifdef CONFIG_ETRAXFS
561 else if (port
== &ports
[1]) {
562 if (request_irq(SSER1_INTR_VECT
,
565 "synchronous serial manual irq",
567 printk(KERN_CRIT
"Can't allocate sync serial manual irq");
574 panic("sync_serial: Manual mode not supported.\n");
575 #endif /* SYNC_SER_MANUAL */
578 } /* port->init_irqs */
584 static int sync_serial_release(struct inode
*inode
, struct file
*file
)
586 int dev
= iminor(inode
);
589 if (dev
< 0 || dev
>= NBR_PORTS
|| !ports
[dev
].enabled
)
591 DEBUG(printk("Invalid minor %d\n", dev
));
602 static unsigned int sync_serial_poll(struct file
*file
, poll_table
*wait
)
604 int dev
= iminor(file
->f_path
.dentry
->d_inode
);
605 unsigned int mask
= 0;
607 DEBUGPOLL( static unsigned int prev_mask
= 0; );
611 if (!port
->started
) {
612 reg_sser_rw_cfg cfg
= REG_RD(sser
, port
->regi_sser
, rw_cfg
);
613 reg_sser_rw_rec_cfg rec_cfg
=
614 REG_RD(sser
, port
->regi_sser
, rw_rec_cfg
);
615 cfg
.en
= regk_sser_yes
;
616 rec_cfg
.rec_en
= port
->input
;
617 REG_WR(sser
, port
->regi_sser
, rw_cfg
, cfg
);
618 REG_WR(sser
, port
->regi_sser
, rw_rec_cfg
, rec_cfg
);
622 poll_wait(file
, &port
->out_wait_q
, wait
);
623 poll_wait(file
, &port
->in_wait_q
, wait
);
625 /* No active transfer, descriptors are available */
626 if (port
->output
&& !port
->tr_running
)
627 mask
|= POLLOUT
| POLLWRNORM
;
629 /* Descriptor and buffer space available. */
631 port
->active_tr_descr
!= port
->catch_tr_descr
&&
632 port
->out_buf_count
< OUT_BUFFER_SIZE
)
633 mask
|= POLLOUT
| POLLWRNORM
;
635 /* At least an inbufchunk of data */
636 if (port
->input
&& sync_data_avail(port
) >= port
->inbufchunk
)
637 mask
|= POLLIN
| POLLRDNORM
;
639 DEBUGPOLL(if (mask
!= prev_mask
)
640 printk("sync_serial_poll: mask 0x%08X %s %s\n", mask
,
641 mask
&POLLOUT
?"POLLOUT":"", mask
&POLLIN
?"POLLIN":"");
647 static int sync_serial_ioctl(struct inode
*inode
, struct file
*file
,
648 unsigned int cmd
, unsigned long arg
)
651 int dma_w_size
= regk_dma_set_w_size1
;
652 int dev
= iminor(file
->f_path
.dentry
->d_inode
);
654 reg_sser_rw_tr_cfg tr_cfg
;
655 reg_sser_rw_rec_cfg rec_cfg
;
656 reg_sser_rw_frm_cfg frm_cfg
;
657 reg_sser_rw_cfg gen_cfg
;
658 reg_sser_rw_intr_mask intr_mask
;
660 if (dev
< 0 || dev
>= NBR_PORTS
|| !ports
[dev
].enabled
)
662 DEBUG(printk("Invalid minor %d\n", dev
));
666 spin_lock_irq(&port
->lock
);
668 tr_cfg
= REG_RD(sser
, port
->regi_sser
, rw_tr_cfg
);
669 rec_cfg
= REG_RD(sser
, port
->regi_sser
, rw_rec_cfg
);
670 frm_cfg
= REG_RD(sser
, port
->regi_sser
, rw_frm_cfg
);
671 gen_cfg
= REG_RD(sser
, port
->regi_sser
, rw_cfg
);
672 intr_mask
= REG_RD(sser
, port
->regi_sser
, rw_intr_mask
);
677 if (GET_SPEED(arg
) == CODEC
)
681 gen_cfg
.base_freq
= regk_sser_f32
;
683 /* Clock divider will internally be
684 * gen_cfg.clk_div + 1.
687 freq
= GET_FREQ(arg
);
693 gen_cfg
.clk_div
= 125 *
694 (1 << (freq
- FREQ_256kHz
)) - 1;
697 gen_cfg
.clk_div
= 62;
702 gen_cfg
.clk_div
= 8 * (1 << freq
) - 1;
706 gen_cfg
.base_freq
= regk_sser_f29_493
;
707 switch (GET_SPEED(arg
)) {
709 gen_cfg
.clk_div
= 29493000 / (150 * 8) - 1;
712 gen_cfg
.clk_div
= 29493000 / (300 * 8) - 1;
715 gen_cfg
.clk_div
= 29493000 / (600 * 8) - 1;
718 gen_cfg
.clk_div
= 29493000 / (1200 * 8) - 1;
721 gen_cfg
.clk_div
= 29493000 / (2400 * 8) - 1;
724 gen_cfg
.clk_div
= 29493000 / (4800 * 8) - 1;
727 gen_cfg
.clk_div
= 29493000 / (9600 * 8) - 1;
730 gen_cfg
.clk_div
= 29493000 / (19200 * 8) - 1;
733 gen_cfg
.clk_div
= 29493000 / (28800 * 8) - 1;
736 gen_cfg
.clk_div
= 29493000 / (57600 * 8) - 1;
739 gen_cfg
.clk_div
= 29493000 / (115200 * 8) - 1;
742 gen_cfg
.clk_div
= 29493000 / (230400 * 8) - 1;
745 gen_cfg
.clk_div
= 29493000 / (460800 * 8) - 1;
748 gen_cfg
.clk_div
= 29493000 / (921600 * 8) - 1;
751 gen_cfg
.base_freq
= regk_sser_f100
;
752 gen_cfg
.clk_div
= 100000000 / (3125000 * 8) - 1;
757 frm_cfg
.wordrate
= GET_WORD_RATE(arg
);
766 frm_cfg
.out_on
= regk_sser_tr
;
767 frm_cfg
.frame_pin_dir
= regk_sser_out
;
768 gen_cfg
.clk_dir
= regk_sser_out
;
773 frm_cfg
.frame_pin_dir
= regk_sser_in
;
774 gen_cfg
.clk_dir
= regk_sser_in
;
779 frm_cfg
.frame_pin_dir
= regk_sser_out
;
780 frm_cfg
.out_on
= regk_sser_intern_tb
;
781 gen_cfg
.clk_dir
= regk_sser_out
;
786 frm_cfg
.frame_pin_dir
= regk_sser_in
;
787 gen_cfg
.clk_dir
= regk_sser_in
;
792 frm_cfg
.frame_pin_dir
= regk_sser_out
;
793 frm_cfg
.out_on
= regk_sser_intern_tb
;
794 gen_cfg
.clk_dir
= regk_sser_out
;
799 frm_cfg
.frame_pin_dir
= regk_sser_in
;
800 gen_cfg
.clk_dir
= regk_sser_in
;
803 spin_unlock_irq(&port
->lock
);
806 if (!port
->use_dma
|| (arg
== MASTER_OUTPUT
|| arg
== SLAVE_OUTPUT
))
807 intr_mask
.rdav
= regk_sser_yes
;
810 if (arg
& NORMAL_SYNC
) {
811 frm_cfg
.rec_delay
= 1;
812 frm_cfg
.tr_delay
= 1;
814 else if (arg
& EARLY_SYNC
)
815 frm_cfg
.rec_delay
= frm_cfg
.tr_delay
= 0;
816 else if (arg
& SECOND_WORD_SYNC
) {
817 frm_cfg
.rec_delay
= 7;
818 frm_cfg
.tr_delay
= 1;
821 tr_cfg
.bulk_wspace
= frm_cfg
.tr_delay
;
822 frm_cfg
.early_wend
= regk_sser_yes
;
824 frm_cfg
.type
= regk_sser_edge
;
825 else if (arg
& WORD_SYNC
)
826 frm_cfg
.type
= regk_sser_level
;
827 else if (arg
& EXTENDED_SYNC
)
828 frm_cfg
.early_wend
= regk_sser_no
;
831 frm_cfg
.frame_pin_use
= regk_sser_frm
;
832 else if (arg
& SYNC_OFF
)
833 frm_cfg
.frame_pin_use
= regk_sser_gio0
;
835 dma_w_size
= regk_dma_set_w_size2
;
836 if (arg
& WORD_SIZE_8
) {
837 rec_cfg
.sample_size
= tr_cfg
.sample_size
= 7;
838 dma_w_size
= regk_dma_set_w_size1
;
839 } else if (arg
& WORD_SIZE_12
)
840 rec_cfg
.sample_size
= tr_cfg
.sample_size
= 11;
841 else if (arg
& WORD_SIZE_16
)
842 rec_cfg
.sample_size
= tr_cfg
.sample_size
= 15;
843 else if (arg
& WORD_SIZE_24
)
844 rec_cfg
.sample_size
= tr_cfg
.sample_size
= 23;
845 else if (arg
& WORD_SIZE_32
)
846 rec_cfg
.sample_size
= tr_cfg
.sample_size
= 31;
848 if (arg
& BIT_ORDER_MSB
)
849 rec_cfg
.sh_dir
= tr_cfg
.sh_dir
= regk_sser_msbfirst
;
850 else if (arg
& BIT_ORDER_LSB
)
851 rec_cfg
.sh_dir
= tr_cfg
.sh_dir
= regk_sser_lsbfirst
;
853 if (arg
& FLOW_CONTROL_ENABLE
) {
854 frm_cfg
.status_pin_use
= regk_sser_frm
;
855 rec_cfg
.fifo_thr
= regk_sser_thr16
;
856 } else if (arg
& FLOW_CONTROL_DISABLE
) {
857 frm_cfg
.status_pin_use
= regk_sser_gio0
;
858 rec_cfg
.fifo_thr
= regk_sser_inf
;
861 if (arg
& CLOCK_NOT_GATED
)
862 gen_cfg
.gate_clk
= regk_sser_no
;
863 else if (arg
& CLOCK_GATED
)
864 gen_cfg
.gate_clk
= regk_sser_yes
;
868 /* NOTE!! negedge is considered NORMAL */
869 if (arg
& CLOCK_NORMAL
)
870 rec_cfg
.clk_pol
= regk_sser_neg
;
871 else if (arg
& CLOCK_INVERT
)
872 rec_cfg
.clk_pol
= regk_sser_pos
;
874 if (arg
& FRAME_NORMAL
)
875 frm_cfg
.level
= regk_sser_pos_hi
;
876 else if (arg
& FRAME_INVERT
)
877 frm_cfg
.level
= regk_sser_neg_lo
;
879 if (arg
& STATUS_NORMAL
)
880 gen_cfg
.hold_pol
= regk_sser_pos
;
881 else if (arg
& STATUS_INVERT
)
882 gen_cfg
.hold_pol
= regk_sser_neg
;
885 if (arg
& CLOCK_NORMAL
)
886 gen_cfg
.out_clk_pol
= regk_sser_pos
;
887 else if (arg
& CLOCK_INVERT
)
888 gen_cfg
.out_clk_pol
= regk_sser_neg
;
890 if (arg
& FRAME_NORMAL
)
891 frm_cfg
.level
= regk_sser_pos_hi
;
892 else if (arg
& FRAME_INVERT
)
893 frm_cfg
.level
= regk_sser_neg_lo
;
895 if (arg
& STATUS_NORMAL
)
896 gen_cfg
.hold_pol
= regk_sser_pos
;
897 else if (arg
& STATUS_INVERT
)
898 gen_cfg
.hold_pol
= regk_sser_neg
;
901 rec_cfg
.fifo_thr
= regk_sser_inf
;
902 rec_cfg
.sh_dir
= tr_cfg
.sh_dir
= regk_sser_msbfirst
;
903 rec_cfg
.sample_size
= tr_cfg
.sample_size
= 7;
904 frm_cfg
.frame_pin_use
= regk_sser_frm
;
905 frm_cfg
.type
= regk_sser_level
;
906 frm_cfg
.tr_delay
= 1;
907 frm_cfg
.level
= regk_sser_neg_lo
;
910 rec_cfg
.clk_pol
= regk_sser_neg
;
911 gen_cfg
.clk_dir
= regk_sser_in
;
917 gen_cfg
.out_clk_pol
= regk_sser_pos
;
920 gen_cfg
.clk_dir
= regk_sser_out
;
931 rec_cfg
.rec_en
= port
->input
;
932 gen_cfg
.en
= (port
->output
| port
->input
);
935 REG_WR(sser
, port
->regi_sser
, rw_tr_cfg
, tr_cfg
);
936 REG_WR(sser
, port
->regi_sser
, rw_rec_cfg
, rec_cfg
);
937 REG_WR(sser
, port
->regi_sser
, rw_frm_cfg
, frm_cfg
);
938 REG_WR(sser
, port
->regi_sser
, rw_intr_mask
, intr_mask
);
939 REG_WR(sser
, port
->regi_sser
, rw_cfg
, gen_cfg
);
942 if (cmd
== SSP_FRAME_SYNC
&& (arg
& (WORD_SIZE_8
| WORD_SIZE_12
|
943 WORD_SIZE_16
| WORD_SIZE_24
| WORD_SIZE_32
))) {
946 REG_WR(sser
, port
->regi_sser
, rw_cfg
, gen_cfg
);
947 /* ##### Should DMA be stoped before we change dma size? */
948 DMA_WR_CMD(port
->regi_dmain
, dma_w_size
);
949 DMA_WR_CMD(port
->regi_dmaout
, dma_w_size
);
951 REG_WR(sser
, port
->regi_sser
, rw_cfg
, gen_cfg
);
954 spin_unlock_irq(&port
->lock
);
958 /* NOTE: sync_serial_write does not support concurrency */
959 static ssize_t
sync_serial_write(struct file
*file
, const char *buf
,
960 size_t count
, loff_t
*ppos
)
962 int dev
= iminor(file
->f_path
.dentry
->d_inode
);
963 DECLARE_WAITQUEUE(wait
, current
);
964 struct sync_port
*port
;
970 unsigned char *rd_ptr
; /* First allocated byte in the buffer */
971 unsigned char *wr_ptr
; /* First free byte in the buffer */
972 unsigned char *buf_stop_ptr
; /* Last byte + 1 */
974 if (dev
< 0 || dev
>= NBR_PORTS
|| !ports
[dev
].enabled
) {
975 DEBUG(printk("Invalid minor %d\n", dev
));
980 /* |<- OUT_BUFFER_SIZE ->|
981 * |<- out_buf_count ->|
982 * |<- trunc_count ->| ...->|
983 * ______________________________________________________
984 * | free | data | free |
985 * |_________|___________________|________________________|
988 DEBUGWRITE(printk(KERN_DEBUG
"W d%d c %lu a: %p c: %p\n",
989 port
->port_nbr
, count
, port
->active_tr_descr
,
990 port
->catch_tr_descr
));
992 /* Read variables that may be updated by interrupts */
993 spin_lock_irqsave(&port
->lock
, flags
);
994 rd_ptr
= port
->out_rd_ptr
;
995 out_buf_count
= port
->out_buf_count
;
996 spin_unlock_irqrestore(&port
->lock
, flags
);
998 /* Check if resources are available */
999 if (port
->tr_running
&&
1000 ((port
->use_dma
&& port
->active_tr_descr
== port
->catch_tr_descr
) ||
1001 out_buf_count
>= OUT_BUFFER_SIZE
)) {
1002 DEBUGWRITE(printk(KERN_DEBUG
"sser%d full\n", dev
));
1006 buf_stop_ptr
= port
->out_buffer
+ OUT_BUFFER_SIZE
;
1008 /* Determine pointer to the first free byte, before copying. */
1009 wr_ptr
= rd_ptr
+ out_buf_count
;
1010 if (wr_ptr
>= buf_stop_ptr
)
1011 wr_ptr
-= OUT_BUFFER_SIZE
;
1013 /* If we wrap the ring buffer, let the user space program handle it by
1014 * truncating the data. This could be more elegant, small buffer
1015 * fragments may occur.
1017 bytes_free
= OUT_BUFFER_SIZE
- out_buf_count
;
1018 if (wr_ptr
+ bytes_free
> buf_stop_ptr
)
1019 bytes_free
= buf_stop_ptr
- wr_ptr
;
1020 trunc_count
= (count
< bytes_free
) ? count
: bytes_free
;
1022 if (copy_from_user(wr_ptr
, buf
, trunc_count
))
1025 DEBUGOUTBUF(printk(KERN_DEBUG
"%-4d + %-4d = %-4d %p %p %p\n",
1026 out_buf_count
, trunc_count
,
1027 port
->out_buf_count
, port
->out_buffer
,
1028 wr_ptr
, buf_stop_ptr
));
1030 /* Make sure transmitter/receiver is running */
1031 if (!port
->started
) {
1032 reg_sser_rw_cfg cfg
= REG_RD(sser
, port
->regi_sser
, rw_cfg
);
1033 reg_sser_rw_rec_cfg rec_cfg
= REG_RD(sser
, port
->regi_sser
, rw_rec_cfg
);
1034 cfg
.en
= regk_sser_yes
;
1035 rec_cfg
.rec_en
= port
->input
;
1036 REG_WR(sser
, port
->regi_sser
, rw_cfg
, cfg
);
1037 REG_WR(sser
, port
->regi_sser
, rw_rec_cfg
, rec_cfg
);
1041 /* Setup wait if blocking */
1042 if (!(file
->f_flags
& O_NONBLOCK
)) {
1043 add_wait_queue(&port
->out_wait_q
, &wait
);
1044 set_current_state(TASK_INTERRUPTIBLE
);
1047 spin_lock_irqsave(&port
->lock
, flags
);
1048 port
->out_buf_count
+= trunc_count
;
1049 if (port
->use_dma
) {
1050 start_dma_out(port
, wr_ptr
, trunc_count
);
1051 } else if (!port
->tr_running
) {
1052 reg_sser_rw_intr_mask intr_mask
;
1053 intr_mask
= REG_RD(sser
, port
->regi_sser
, rw_intr_mask
);
1054 /* Start sender by writing data */
1056 /* and enable transmitter ready IRQ */
1058 REG_WR(sser
, port
->regi_sser
, rw_intr_mask
, intr_mask
);
1060 spin_unlock_irqrestore(&port
->lock
, flags
);
1062 /* Exit if non blocking */
1063 if (file
->f_flags
& O_NONBLOCK
) {
1064 DEBUGWRITE(printk(KERN_DEBUG
"w d%d c %lu %08x\n",
1065 port
->port_nbr
, trunc_count
,
1066 REG_RD_INT(dma
, port
->regi_dmaout
, r_intr
)));
1071 set_current_state(TASK_RUNNING
);
1072 remove_wait_queue(&port
->out_wait_q
, &wait
);
1074 if (signal_pending(current
))
1077 DEBUGWRITE(printk(KERN_DEBUG
"w d%d c %lu\n",
1078 port
->port_nbr
, trunc_count
));
1082 static ssize_t
sync_serial_read(struct file
* file
, char * buf
,
1083 size_t count
, loff_t
*ppos
)
1085 int dev
= iminor(file
->f_path
.dentry
->d_inode
);
1088 unsigned char* start
;
1090 unsigned long flags
;
1092 if (dev
< 0 || dev
>= NBR_PORTS
|| !ports
[dev
].enabled
)
1094 DEBUG(printk("Invalid minor %d\n", dev
));
1099 DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev
, count
, port
->readp
- port
->flip
, port
->writep
- port
->flip
, port
->in_buffer_size
));
1103 reg_sser_rw_cfg cfg
= REG_RD(sser
, port
->regi_sser
, rw_cfg
);
1104 reg_sser_rw_tr_cfg tr_cfg
= REG_RD(sser
, port
->regi_sser
, rw_tr_cfg
);
1105 reg_sser_rw_rec_cfg rec_cfg
= REG_RD(sser
, port
->regi_sser
, rw_rec_cfg
);
1106 cfg
.en
= regk_sser_yes
;
1107 tr_cfg
.tr_en
= regk_sser_yes
;
1108 rec_cfg
.rec_en
= regk_sser_yes
;
1109 REG_WR(sser
, port
->regi_sser
, rw_cfg
, cfg
);
1110 REG_WR(sser
, port
->regi_sser
, rw_tr_cfg
, tr_cfg
);
1111 REG_WR(sser
, port
->regi_sser
, rw_rec_cfg
, rec_cfg
);
1115 /* Calculate number of available bytes */
1116 /* Save pointers to avoid that they are modified by interrupt */
1117 spin_lock_irqsave(&port
->lock
, flags
);
1118 start
= (unsigned char*)port
->readp
; /* cast away volatile */
1119 end
= (unsigned char*)port
->writep
; /* cast away volatile */
1120 spin_unlock_irqrestore(&port
->lock
, flags
);
1121 while ((start
== end
) && !port
->full
) /* No data */
1123 DEBUGREAD(printk(KERN_DEBUG
"&"));
1124 if (file
->f_flags
& O_NONBLOCK
)
1127 interruptible_sleep_on(&port
->in_wait_q
);
1128 if (signal_pending(current
))
1131 spin_lock_irqsave(&port
->lock
, flags
);
1132 start
= (unsigned char*)port
->readp
; /* cast away volatile */
1133 end
= (unsigned char*)port
->writep
; /* cast away volatile */
1134 spin_unlock_irqrestore(&port
->lock
, flags
);
1137 /* Lazy read, never return wrapped data. */
1139 avail
= port
->in_buffer_size
;
1140 else if (end
> start
)
1141 avail
= end
- start
;
1143 avail
= port
->flip
+ port
->in_buffer_size
- start
;
1145 count
= count
> avail
? avail
: count
;
1146 if (copy_to_user(buf
, start
, count
))
1148 /* Disable interrupts while updating readp */
1149 spin_lock_irqsave(&port
->lock
, flags
);
1150 port
->readp
+= count
;
1151 if (port
->readp
>= port
->flip
+ port
->in_buffer_size
) /* Wrap? */
1152 port
->readp
= port
->flip
;
1154 spin_unlock_irqrestore(&port
->lock
, flags
);
1155 DEBUGREAD(printk("r %d\n", count
));
1159 static void send_word(sync_port
* port
)
1161 reg_sser_rw_tr_cfg tr_cfg
= REG_RD(sser
, port
->regi_sser
, rw_tr_cfg
);
1162 reg_sser_rw_tr_data tr_data
= {0};
1164 switch(tr_cfg
.sample_size
)
1167 port
->out_buf_count
--;
1168 tr_data
.data
= *port
->out_rd_ptr
++;
1169 REG_WR(sser
, port
->regi_sser
, rw_tr_data
, tr_data
);
1170 if (port
->out_rd_ptr
>= port
->out_buffer
+ OUT_BUFFER_SIZE
)
1171 port
->out_rd_ptr
= port
->out_buffer
;
1175 int data
= (*port
->out_rd_ptr
++) << 8;
1176 data
|= *port
->out_rd_ptr
++;
1177 port
->out_buf_count
-= 2;
1178 tr_data
.data
= data
;
1179 REG_WR(sser
, port
->regi_sser
, rw_tr_data
, tr_data
);
1180 if (port
->out_rd_ptr
>= port
->out_buffer
+ OUT_BUFFER_SIZE
)
1181 port
->out_rd_ptr
= port
->out_buffer
;
1185 port
->out_buf_count
-= 2;
1186 tr_data
.data
= *(unsigned short *)port
->out_rd_ptr
;
1187 REG_WR(sser
, port
->regi_sser
, rw_tr_data
, tr_data
);
1188 port
->out_rd_ptr
+= 2;
1189 if (port
->out_rd_ptr
>= port
->out_buffer
+ OUT_BUFFER_SIZE
)
1190 port
->out_rd_ptr
= port
->out_buffer
;
1193 port
->out_buf_count
-= 3;
1194 tr_data
.data
= *(unsigned short *)port
->out_rd_ptr
;
1195 REG_WR(sser
, port
->regi_sser
, rw_tr_data
, tr_data
);
1196 port
->out_rd_ptr
+= 2;
1197 tr_data
.data
= *port
->out_rd_ptr
++;
1198 REG_WR(sser
, port
->regi_sser
, rw_tr_data
, tr_data
);
1199 if (port
->out_rd_ptr
>= port
->out_buffer
+ OUT_BUFFER_SIZE
)
1200 port
->out_rd_ptr
= port
->out_buffer
;
1203 port
->out_buf_count
-= 4;
1204 tr_data
.data
= *(unsigned short *)port
->out_rd_ptr
;
1205 REG_WR(sser
, port
->regi_sser
, rw_tr_data
, tr_data
);
1206 port
->out_rd_ptr
+= 2;
1207 tr_data
.data
= *(unsigned short *)port
->out_rd_ptr
;
1208 REG_WR(sser
, port
->regi_sser
, rw_tr_data
, tr_data
);
1209 port
->out_rd_ptr
+= 2;
1210 if (port
->out_rd_ptr
>= port
->out_buffer
+ OUT_BUFFER_SIZE
)
1211 port
->out_rd_ptr
= port
->out_buffer
;
1216 static void start_dma_out(struct sync_port
*port
,
1217 const char *data
, int count
)
1219 port
->active_tr_descr
->buf
= (char *) virt_to_phys((char *) data
);
1220 port
->active_tr_descr
->after
= port
->active_tr_descr
->buf
+ count
;
1221 port
->active_tr_descr
->intr
= 1;
1223 port
->active_tr_descr
->eol
= 1;
1224 port
->prev_tr_descr
->eol
= 0;
1226 DEBUGTRDMA(printk(KERN_DEBUG
"Inserting eolr:%p eol@:%p\n",
1227 port
->prev_tr_descr
, port
->active_tr_descr
));
1228 port
->prev_tr_descr
= port
->active_tr_descr
;
1229 port
->active_tr_descr
= phys_to_virt((int) port
->active_tr_descr
->next
);
1231 if (!port
->tr_running
) {
1232 reg_sser_rw_tr_cfg tr_cfg
= REG_RD(sser
, port
->regi_sser
,
1235 port
->out_context
.next
= 0;
1236 port
->out_context
.saved_data
=
1237 (dma_descr_data
*)virt_to_phys(port
->prev_tr_descr
);
1238 port
->out_context
.saved_data_buf
= port
->prev_tr_descr
->buf
;
1240 DMA_START_CONTEXT(port
->regi_dmaout
,
1241 virt_to_phys((char *)&port
->out_context
));
1243 tr_cfg
.tr_en
= regk_sser_yes
;
1244 REG_WR(sser
, port
->regi_sser
, rw_tr_cfg
, tr_cfg
);
1245 DEBUGTRDMA(printk(KERN_DEBUG
"dma s\n"););
1247 DMA_CONTINUE_DATA(port
->regi_dmaout
);
1248 DEBUGTRDMA(printk(KERN_DEBUG
"dma c\n"););
1251 port
->tr_running
= 1;
1254 static void start_dma_in(sync_port
*port
)
1258 port
->writep
= port
->flip
;
1260 if (port
->writep
> port
->flip
+ port
->in_buffer_size
) {
1261 panic("Offset too large in sync serial driver\n");
1264 buf
= (char*)virt_to_phys(port
->in_buffer
);
1265 for (i
= 0; i
< NBR_IN_DESCR
; i
++) {
1266 port
->in_descr
[i
].buf
= buf
;
1267 port
->in_descr
[i
].after
= buf
+ port
->inbufchunk
;
1268 port
->in_descr
[i
].intr
= 1;
1269 port
->in_descr
[i
].next
= (dma_descr_data
*)virt_to_phys(&port
->in_descr
[i
+1]);
1270 port
->in_descr
[i
].buf
= buf
;
1271 buf
+= port
->inbufchunk
;
1273 /* Link the last descriptor to the first */
1274 port
->in_descr
[i
-1].next
= (dma_descr_data
*)virt_to_phys(&port
->in_descr
[0]);
1275 port
->in_descr
[i
-1].eol
= regk_sser_yes
;
1276 port
->next_rx_desc
= &port
->in_descr
[0];
1277 port
->prev_rx_desc
= &port
->in_descr
[NBR_IN_DESCR
- 1];
1278 port
->in_context
.saved_data
= (dma_descr_data
*)virt_to_phys(&port
->in_descr
[0]);
1279 port
->in_context
.saved_data_buf
= port
->in_descr
[0].buf
;
1280 DMA_START_CONTEXT(port
->regi_dmain
, virt_to_phys(&port
->in_context
));
1284 static irqreturn_t
tr_interrupt(int irq
, void *dev_id
)
1286 reg_dma_r_masked_intr masked
;
1287 reg_dma_rw_ack_intr ack_intr
= {.data
= regk_dma_yes
};
1288 reg_dma_rw_stat stat
;
1293 for (i
= 0; i
< NBR_PORTS
; i
++) {
1294 sync_port
*port
= &ports
[i
];
1295 if (!port
->enabled
|| !port
->use_dma
)
1298 /* IRQ active for the port? */
1299 masked
= REG_RD(dma
, port
->regi_dmaout
, r_masked_intr
);
1305 /* Check if we should stop the DMA transfer */
1306 stat
= REG_RD(dma
, port
->regi_dmaout
, rw_stat
);
1307 if (stat
.list_state
== regk_dma_data_at_eol
)
1311 REG_WR(dma
, port
->regi_dmaout
, rw_ack_intr
, ack_intr
);
1314 /* The DMA has completed a descriptor, EOL was not
1315 * encountered, so step relevant descriptor and
1316 * datapointers forward. */
1318 sent
= port
->catch_tr_descr
->after
-
1319 port
->catch_tr_descr
->buf
;
1320 DEBUGTXINT(printk(KERN_DEBUG
"%-4d - %-4d = %-4d\t"
1321 "in descr %p (ac: %p)\n",
1322 port
->out_buf_count
, sent
,
1323 port
->out_buf_count
- sent
,
1324 port
->catch_tr_descr
,
1325 port
->active_tr_descr
););
1326 port
->out_buf_count
-= sent
;
1327 port
->catch_tr_descr
=
1328 phys_to_virt((int) port
->catch_tr_descr
->next
);
1330 phys_to_virt((int) port
->catch_tr_descr
->buf
);
1334 * Note that if an EOL was encountered during the irq
1335 * locked section of sync_ser_write the DMA will be
1336 * restarted and the eol flag will be cleared.
1337 * The remaining descriptors will be traversed by
1338 * the descriptor interrupts as usual.
1341 while (!port
->catch_tr_descr
->eol
) {
1342 sent
= port
->catch_tr_descr
->after
-
1343 port
->catch_tr_descr
->buf
;
1344 DEBUGOUTBUF(printk(KERN_DEBUG
1345 "traversing descr %p -%d (%d)\n",
1346 port
->catch_tr_descr
,
1348 port
->out_buf_count
));
1349 port
->out_buf_count
-= sent
;
1350 port
->catch_tr_descr
= phys_to_virt(
1351 (int)port
->catch_tr_descr
->next
);
1353 if (i
>= NBR_OUT_DESCR
) {
1354 /* TODO: Reset and recover */
1355 panic("sync_serial: missing eol");
1358 sent
= port
->catch_tr_descr
->after
-
1359 port
->catch_tr_descr
->buf
;
1360 DEBUGOUTBUF(printk(KERN_DEBUG
1361 "eol at descr %p -%d (%d)\n",
1362 port
->catch_tr_descr
,
1364 port
->out_buf_count
));
1366 port
->out_buf_count
-= sent
;
1368 /* Update read pointer to first free byte, we
1369 * may already be writing data there. */
1371 phys_to_virt((int) port
->catch_tr_descr
->after
);
1372 if (port
->out_rd_ptr
> port
->out_buffer
+
1374 port
->out_rd_ptr
= port
->out_buffer
;
1376 reg_sser_rw_tr_cfg tr_cfg
=
1377 REG_RD(sser
, port
->regi_sser
, rw_tr_cfg
);
1378 DEBUGTXINT(printk(KERN_DEBUG
1379 "tr_int DMA stop %d, set catch @ %p\n",
1380 port
->out_buf_count
,
1381 port
->active_tr_descr
));
1382 if (port
->out_buf_count
!= 0)
1383 printk(KERN_CRIT
"sync_ser: buffer not "
1384 "empty after eol.\n");
1385 port
->catch_tr_descr
= port
->active_tr_descr
;
1386 port
->tr_running
= 0;
1387 tr_cfg
.tr_en
= regk_sser_no
;
1388 REG_WR(sser
, port
->regi_sser
, rw_tr_cfg
, tr_cfg
);
1390 /* wake up the waiting process */
1391 wake_up_interruptible(&port
->out_wait_q
);
1393 return IRQ_RETVAL(found
);
1394 } /* tr_interrupt */
1396 static irqreturn_t
rx_interrupt(int irq
, void *dev_id
)
1398 reg_dma_r_masked_intr masked
;
1399 reg_dma_rw_ack_intr ack_intr
= {.data
= regk_dma_yes
};
1404 for (i
= 0; i
< NBR_PORTS
; i
++)
1406 sync_port
*port
= &ports
[i
];
1408 if (!port
->enabled
|| !port
->use_dma
)
1411 masked
= REG_RD(dma
, port
->regi_dmain
, r_masked_intr
);
1413 if (masked
.data
) /* Descriptor interrupt */
1416 while (REG_RD(dma
, port
->regi_dmain
, rw_data
) !=
1417 virt_to_phys(port
->next_rx_desc
)) {
1418 DEBUGRXINT(printk(KERN_DEBUG
"!"));
1419 if (port
->writep
+ port
->inbufchunk
> port
->flip
+ port
->in_buffer_size
) {
1420 int first_size
= port
->flip
+ port
->in_buffer_size
- port
->writep
;
1421 memcpy((char*)port
->writep
, phys_to_virt((unsigned)port
->next_rx_desc
->buf
), first_size
);
1422 memcpy(port
->flip
, phys_to_virt((unsigned)port
->next_rx_desc
->buf
+first_size
), port
->inbufchunk
- first_size
);
1423 port
->writep
= port
->flip
+ port
->inbufchunk
- first_size
;
1425 memcpy((char*)port
->writep
,
1426 phys_to_virt((unsigned)port
->next_rx_desc
->buf
),
1428 port
->writep
+= port
->inbufchunk
;
1429 if (port
->writep
>= port
->flip
+ port
->in_buffer_size
)
1430 port
->writep
= port
->flip
;
1432 if (port
->writep
== port
->readp
)
1437 port
->next_rx_desc
->eol
= 1;
1438 port
->prev_rx_desc
->eol
= 0;
1439 /* Cache bug workaround */
1440 flush_dma_descr(port
->prev_rx_desc
, 0);
1441 port
->prev_rx_desc
= port
->next_rx_desc
;
1442 port
->next_rx_desc
= phys_to_virt((unsigned)port
->next_rx_desc
->next
);
1443 /* Cache bug workaround */
1444 flush_dma_descr(port
->prev_rx_desc
, 1);
1445 /* wake up the waiting process */
1446 wake_up_interruptible(&port
->in_wait_q
);
1447 DMA_CONTINUE(port
->regi_dmain
);
1448 REG_WR(dma
, port
->regi_dmain
, rw_ack_intr
, ack_intr
);
1453 return IRQ_RETVAL(found
);
1454 } /* rx_interrupt */
1455 #endif /* SYNC_SER_DMA */
1457 #ifdef SYNC_SER_MANUAL
1458 static irqreturn_t
manual_interrupt(int irq
, void *dev_id
)
1462 reg_sser_r_masked_intr masked
;
1464 for (i
= 0; i
< NBR_PORTS
; i
++)
1466 sync_port
*port
= &ports
[i
];
1468 if (!port
->enabled
|| port
->use_dma
)
1473 masked
= REG_RD(sser
, port
->regi_sser
, r_masked_intr
);
1474 if (masked
.rdav
) /* Data received? */
1476 reg_sser_rw_rec_cfg rec_cfg
= REG_RD(sser
, port
->regi_sser
, rw_rec_cfg
);
1477 reg_sser_r_rec_data data
= REG_RD(sser
, port
->regi_sser
, r_rec_data
);
1480 switch(rec_cfg
.sample_size
)
1483 *port
->writep
++ = data
.data
& 0xff;
1486 *port
->writep
= (data
.data
& 0x0ff0) >> 4;
1487 *(port
->writep
+ 1) = data
.data
& 0x0f;
1491 *(unsigned short*)port
->writep
= data
.data
;
1495 *(unsigned int*)port
->writep
= data
.data
;
1499 *(unsigned int*)port
->writep
= data
.data
;
1504 if (port
->writep
>= port
->flip
+ port
->in_buffer_size
) /* Wrap? */
1505 port
->writep
= port
->flip
;
1506 if (port
->writep
== port
->readp
) {
1507 /* receive buffer overrun, discard oldest data
1510 if (port
->readp
>= port
->flip
+ port
->in_buffer_size
) /* Wrap? */
1511 port
->readp
= port
->flip
;
1513 if (sync_data_avail(port
) >= port
->inbufchunk
)
1514 wake_up_interruptible(&port
->in_wait_q
); /* Wake up application */
1517 if (masked
.trdy
) /* Transmitter ready? */
1520 if (port
->out_buf_count
> 0) /* More data to send */
1522 else /* transmission finished */
1524 reg_sser_rw_intr_mask intr_mask
;
1525 intr_mask
= REG_RD(sser
, port
->regi_sser
, rw_intr_mask
);
1527 REG_WR(sser
, port
->regi_sser
, rw_intr_mask
, intr_mask
);
1528 wake_up_interruptible(&port
->out_wait_q
); /* Wake up application */
1532 return IRQ_RETVAL(found
);
1536 module_init(etrax_sync_serial_init
);