2 * Driver for CSR SiRFprimaII onboard UARTs.
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
6 * Licensed under GPLv2 or later.
9 #include <linux/module.h>
10 #include <linux/ioport.h>
11 #include <linux/platform_device.h>
12 #include <linux/init.h>
13 #include <linux/sysrq.h>
14 #include <linux/console.h>
15 #include <linux/tty.h>
16 #include <linux/tty_flip.h>
17 #include <linux/serial_core.h>
18 #include <linux/serial.h>
19 #include <linux/clk.h>
21 #include <linux/slab.h>
23 #include <linux/of_gpio.h>
24 #include <linux/dmaengine.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/sirfsoc_dma.h>
29 #include <asm/mach/irq.h>
31 #include "sirfsoc_uart.h"
34 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port
*sirfport
, int count
);
36 sirfsoc_uart_pio_rx_chars(struct uart_port
*port
, unsigned int max_rx_count
);
37 static struct uart_driver sirfsoc_uart_drv
;
39 static void sirfsoc_uart_tx_dma_complete_callback(void *param
);
40 static void sirfsoc_uart_start_next_rx_dma(struct uart_port
*port
);
41 static void sirfsoc_uart_rx_dma_complete_callback(void *param
);
42 static const struct sirfsoc_baudrate_to_regv baudrate_to_regv
[] = {
63 static struct sirfsoc_uart_port sirfsoc_uart_ports
[SIRFSOC_UART_NR
] = {
67 .flags
= UPF_BOOT_AUTOCONF
,
74 .flags
= UPF_BOOT_AUTOCONF
,
81 .flags
= UPF_BOOT_AUTOCONF
,
88 .flags
= UPF_BOOT_AUTOCONF
,
95 .flags
= UPF_BOOT_AUTOCONF
,
102 .flags
= UPF_BOOT_AUTOCONF
,
108 static inline struct sirfsoc_uart_port
*to_sirfport(struct uart_port
*port
)
110 return container_of(port
, struct sirfsoc_uart_port
, port
);
113 static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port
*port
)
116 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
117 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
118 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
119 reg
= rd_regl(port
, ureg
->sirfsoc_tx_fifo_status
);
121 return (reg
& ufifo_st
->ff_empty(port
->line
)) ? TIOCSER_TEMT
: 0;
124 static unsigned int sirfsoc_uart_get_mctrl(struct uart_port
*port
)
126 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
127 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
128 if (!sirfport
->hw_flow_ctrl
|| !sirfport
->ms_enabled
)
130 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
131 if (!(rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) &
132 SIRFUART_AFC_CTS_STATUS
))
137 if (!gpio_get_value(sirfport
->cts_gpio
))
143 return TIOCM_CAR
| TIOCM_DSR
;
145 return TIOCM_CAR
| TIOCM_DSR
| TIOCM_CTS
;
148 static void sirfsoc_uart_set_mctrl(struct uart_port
*port
, unsigned int mctrl
)
150 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
151 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
152 unsigned int assert = mctrl
& TIOCM_RTS
;
153 unsigned int val
= assert ? SIRFUART_AFC_CTRL_RX_THD
: 0x0;
154 unsigned int current_val
;
156 if (!sirfport
->hw_flow_ctrl
|| !sirfport
->ms_enabled
)
158 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
159 current_val
= rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) & ~0xFF;
161 wr_regl(port
, ureg
->sirfsoc_afc_ctrl
, val
);
164 gpio_set_value(sirfport
->rts_gpio
, 1);
166 gpio_set_value(sirfport
->rts_gpio
, 0);
170 static void sirfsoc_uart_stop_tx(struct uart_port
*port
)
172 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
173 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
174 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
176 if (IS_DMA_CHAN_VALID(sirfport
->tx_dma_no
)) {
177 if (sirfport
->tx_dma_state
== TX_DMA_RUNNING
) {
178 dmaengine_pause(sirfport
->tx_dma_chan
);
179 sirfport
->tx_dma_state
= TX_DMA_PAUSE
;
181 if (!sirfport
->is_marco
)
182 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
183 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
184 ~uint_en
->sirfsoc_txfifo_empty_en
);
186 wr_regl(port
, SIRFUART_INT_EN_CLR
,
187 uint_en
->sirfsoc_txfifo_empty_en
);
190 if (!sirfport
->is_marco
)
191 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
192 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
193 ~uint_en
->sirfsoc_txfifo_empty_en
);
195 wr_regl(port
, SIRFUART_INT_EN_CLR
,
196 uint_en
->sirfsoc_txfifo_empty_en
);
200 static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port
*sirfport
)
202 struct uart_port
*port
= &sirfport
->port
;
203 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
204 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
205 struct circ_buf
*xmit
= &port
->state
->xmit
;
206 unsigned long tran_size
;
207 unsigned long tran_start
;
208 unsigned long pio_tx_size
;
210 tran_size
= CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
);
211 tran_start
= (unsigned long)(xmit
->buf
+ xmit
->tail
);
212 if (uart_circ_empty(xmit
) || uart_tx_stopped(port
) ||
215 if (sirfport
->tx_dma_state
== TX_DMA_PAUSE
) {
216 dmaengine_resume(sirfport
->tx_dma_chan
);
219 if (sirfport
->tx_dma_state
== TX_DMA_RUNNING
)
221 if (!sirfport
->is_marco
)
222 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
223 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)&
224 ~(uint_en
->sirfsoc_txfifo_empty_en
));
226 wr_regl(port
, SIRFUART_INT_EN_CLR
,
227 uint_en
->sirfsoc_txfifo_empty_en
);
229 * DMA requires buffer address and buffer length are both aligned with
230 * 4 bytes, so we use PIO for
231 * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
232 * bytes, and move to DMA for the left part aligned with 4bytes
233 * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
234 * part first, move to PIO for the left 1~3 bytes
236 if (tran_size
< 4 || BYTES_TO_ALIGN(tran_start
)) {
237 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_STOP
);
238 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
,
239 rd_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
)|
241 if (BYTES_TO_ALIGN(tran_start
)) {
242 pio_tx_size
= sirfsoc_uart_pio_tx_chars(sirfport
,
243 BYTES_TO_ALIGN(tran_start
));
244 tran_size
-= pio_tx_size
;
247 sirfsoc_uart_pio_tx_chars(sirfport
, tran_size
);
248 if (!sirfport
->is_marco
)
249 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
250 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)|
251 uint_en
->sirfsoc_txfifo_empty_en
);
253 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
254 uint_en
->sirfsoc_txfifo_empty_en
);
255 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_START
);
257 /* tx transfer mode switch into dma mode */
258 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_STOP
);
259 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
,
260 rd_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
)&
262 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_START
);
265 sirfport
->tx_dma_addr
= dma_map_single(port
->dev
,
266 xmit
->buf
+ xmit
->tail
,
267 tran_size
, DMA_TO_DEVICE
);
268 sirfport
->tx_dma_desc
= dmaengine_prep_slave_single(
269 sirfport
->tx_dma_chan
, sirfport
->tx_dma_addr
,
270 tran_size
, DMA_MEM_TO_DEV
, DMA_PREP_INTERRUPT
);
271 if (!sirfport
->tx_dma_desc
) {
272 dev_err(port
->dev
, "DMA prep slave single fail\n");
275 sirfport
->tx_dma_desc
->callback
=
276 sirfsoc_uart_tx_dma_complete_callback
;
277 sirfport
->tx_dma_desc
->callback_param
= (void *)sirfport
;
278 sirfport
->transfer_size
= tran_size
;
280 dmaengine_submit(sirfport
->tx_dma_desc
);
281 dma_async_issue_pending(sirfport
->tx_dma_chan
);
282 sirfport
->tx_dma_state
= TX_DMA_RUNNING
;
286 static void sirfsoc_uart_start_tx(struct uart_port
*port
)
288 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
289 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
290 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
291 if (IS_DMA_CHAN_VALID(sirfport
->tx_dma_no
))
292 sirfsoc_uart_tx_with_dma(sirfport
);
294 sirfsoc_uart_pio_tx_chars(sirfport
, 1);
295 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_START
);
296 if (!sirfport
->is_marco
)
297 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
298 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)|
299 uint_en
->sirfsoc_txfifo_empty_en
);
301 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
302 uint_en
->sirfsoc_txfifo_empty_en
);
306 static void sirfsoc_uart_stop_rx(struct uart_port
*port
)
308 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
309 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
310 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
312 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
313 if (IS_DMA_CHAN_VALID(sirfport
->rx_dma_no
)) {
314 if (!sirfport
->is_marco
)
315 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
316 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
317 ~(SIRFUART_RX_DMA_INT_EN(port
, uint_en
) |
318 uint_en
->sirfsoc_rx_done_en
));
320 wr_regl(port
, SIRFUART_INT_EN_CLR
,
321 SIRFUART_RX_DMA_INT_EN(port
, uint_en
)|
322 uint_en
->sirfsoc_rx_done_en
);
323 dmaengine_terminate_all(sirfport
->rx_dma_chan
);
325 if (!sirfport
->is_marco
)
326 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
327 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)&
328 ~(SIRFUART_RX_IO_INT_EN(port
, uint_en
)));
330 wr_regl(port
, SIRFUART_INT_EN_CLR
,
331 SIRFUART_RX_IO_INT_EN(port
, uint_en
));
335 static void sirfsoc_uart_disable_ms(struct uart_port
*port
)
337 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
338 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
339 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
341 if (!sirfport
->hw_flow_ctrl
)
343 sirfport
->ms_enabled
= false;
344 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
345 wr_regl(port
, ureg
->sirfsoc_afc_ctrl
,
346 rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) & ~0x3FF);
347 if (!sirfport
->is_marco
)
348 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
349 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)&
350 ~uint_en
->sirfsoc_cts_en
);
352 wr_regl(port
, SIRFUART_INT_EN_CLR
,
353 uint_en
->sirfsoc_cts_en
);
355 disable_irq(gpio_to_irq(sirfport
->cts_gpio
));
358 static irqreturn_t
sirfsoc_uart_usp_cts_handler(int irq
, void *dev_id
)
360 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)dev_id
;
361 struct uart_port
*port
= &sirfport
->port
;
362 spin_lock(&port
->lock
);
363 if (gpio_is_valid(sirfport
->cts_gpio
) && sirfport
->ms_enabled
)
364 uart_handle_cts_change(port
,
365 !gpio_get_value(sirfport
->cts_gpio
));
366 spin_unlock(&port
->lock
);
370 static void sirfsoc_uart_enable_ms(struct uart_port
*port
)
372 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
373 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
374 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
376 if (!sirfport
->hw_flow_ctrl
)
378 sirfport
->ms_enabled
= true;
379 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
380 wr_regl(port
, ureg
->sirfsoc_afc_ctrl
,
381 rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) |
382 SIRFUART_AFC_TX_EN
| SIRFUART_AFC_RX_EN
);
383 if (!sirfport
->is_marco
)
384 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
385 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)
386 | uint_en
->sirfsoc_cts_en
);
388 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
389 uint_en
->sirfsoc_cts_en
);
391 enable_irq(gpio_to_irq(sirfport
->cts_gpio
));
394 static void sirfsoc_uart_break_ctl(struct uart_port
*port
, int break_state
)
396 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
397 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
398 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
399 unsigned long ulcon
= rd_regl(port
, ureg
->sirfsoc_line_ctrl
);
401 ulcon
|= SIRFUART_SET_BREAK
;
403 ulcon
&= ~SIRFUART_SET_BREAK
;
404 wr_regl(port
, ureg
->sirfsoc_line_ctrl
, ulcon
);
409 sirfsoc_uart_pio_rx_chars(struct uart_port
*port
, unsigned int max_rx_count
)
411 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
412 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
413 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
414 unsigned int ch
, rx_count
= 0;
415 struct tty_struct
*tty
;
416 tty
= tty_port_tty_get(&port
->state
->port
);
419 while (!(rd_regl(port
, ureg
->sirfsoc_rx_fifo_status
) &
420 ufifo_st
->ff_empty(port
->line
))) {
421 ch
= rd_regl(port
, ureg
->sirfsoc_rx_fifo_data
) |
423 if (unlikely(uart_handle_sysrq_char(port
, ch
)))
425 uart_insert_char(port
, 0, 0, ch
, TTY_NORMAL
);
427 if (rx_count
>= max_rx_count
)
431 sirfport
->rx_io_count
+= rx_count
;
432 port
->icount
.rx
+= rx_count
;
438 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port
*sirfport
, int count
)
440 struct uart_port
*port
= &sirfport
->port
;
441 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
442 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
443 struct circ_buf
*xmit
= &port
->state
->xmit
;
444 unsigned int num_tx
= 0;
445 while (!uart_circ_empty(xmit
) &&
446 !(rd_regl(port
, ureg
->sirfsoc_tx_fifo_status
) &
447 ufifo_st
->ff_full(port
->line
)) &&
449 wr_regl(port
, ureg
->sirfsoc_tx_fifo_data
,
450 xmit
->buf
[xmit
->tail
]);
451 xmit
->tail
= (xmit
->tail
+ 1) & (UART_XMIT_SIZE
- 1);
455 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
456 uart_write_wakeup(port
);
460 static void sirfsoc_uart_tx_dma_complete_callback(void *param
)
462 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)param
;
463 struct uart_port
*port
= &sirfport
->port
;
464 struct circ_buf
*xmit
= &port
->state
->xmit
;
467 spin_lock_irqsave(&port
->lock
, flags
);
468 xmit
->tail
= (xmit
->tail
+ sirfport
->transfer_size
) &
469 (UART_XMIT_SIZE
- 1);
470 port
->icount
.tx
+= sirfport
->transfer_size
;
471 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
472 uart_write_wakeup(port
);
473 if (sirfport
->tx_dma_addr
)
474 dma_unmap_single(port
->dev
, sirfport
->tx_dma_addr
,
475 sirfport
->transfer_size
, DMA_TO_DEVICE
);
476 sirfport
->tx_dma_state
= TX_DMA_IDLE
;
477 sirfsoc_uart_tx_with_dma(sirfport
);
478 spin_unlock_irqrestore(&port
->lock
, flags
);
481 static void sirfsoc_uart_insert_rx_buf_to_tty(
482 struct sirfsoc_uart_port
*sirfport
, int count
)
484 struct uart_port
*port
= &sirfport
->port
;
485 struct tty_port
*tport
= &port
->state
->port
;
488 inserted
= tty_insert_flip_string(tport
,
489 sirfport
->rx_dma_items
[sirfport
->rx_completed
].xmit
.buf
, count
);
490 port
->icount
.rx
+= inserted
;
493 static void sirfsoc_rx_submit_one_dma_desc(struct uart_port
*port
, int index
)
495 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
497 sirfport
->rx_dma_items
[index
].xmit
.tail
=
498 sirfport
->rx_dma_items
[index
].xmit
.head
= 0;
499 sirfport
->rx_dma_items
[index
].desc
=
500 dmaengine_prep_slave_single(sirfport
->rx_dma_chan
,
501 sirfport
->rx_dma_items
[index
].dma_addr
, SIRFSOC_RX_DMA_BUF_SIZE
,
502 DMA_DEV_TO_MEM
, DMA_PREP_INTERRUPT
);
503 if (!sirfport
->rx_dma_items
[index
].desc
) {
504 dev_err(port
->dev
, "DMA slave single fail\n");
507 sirfport
->rx_dma_items
[index
].desc
->callback
=
508 sirfsoc_uart_rx_dma_complete_callback
;
509 sirfport
->rx_dma_items
[index
].desc
->callback_param
= sirfport
;
510 sirfport
->rx_dma_items
[index
].cookie
=
511 dmaengine_submit(sirfport
->rx_dma_items
[index
].desc
);
512 dma_async_issue_pending(sirfport
->rx_dma_chan
);
515 static void sirfsoc_rx_tmo_process_tl(unsigned long param
)
517 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)param
;
518 struct uart_port
*port
= &sirfport
->port
;
519 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
520 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
521 struct sirfsoc_int_status
*uint_st
= &sirfport
->uart_reg
->uart_int_st
;
525 spin_lock_irqsave(&port
->lock
, flags
);
526 while (sirfport
->rx_completed
!= sirfport
->rx_issued
) {
527 sirfsoc_uart_insert_rx_buf_to_tty(sirfport
,
528 SIRFSOC_RX_DMA_BUF_SIZE
);
529 sirfsoc_rx_submit_one_dma_desc(port
, sirfport
->rx_completed
++);
530 sirfport
->rx_completed
%= SIRFSOC_RX_LOOP_BUF_CNT
;
532 count
= CIRC_CNT(sirfport
->rx_dma_items
[sirfport
->rx_issued
].xmit
.head
,
533 sirfport
->rx_dma_items
[sirfport
->rx_issued
].xmit
.tail
,
534 SIRFSOC_RX_DMA_BUF_SIZE
);
536 sirfsoc_uart_insert_rx_buf_to_tty(sirfport
, count
);
537 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
538 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) |
540 sirfsoc_uart_pio_rx_chars(port
, 4 - sirfport
->rx_io_count
);
541 if (sirfport
->rx_io_count
== 4) {
542 sirfport
->rx_io_count
= 0;
543 wr_regl(port
, ureg
->sirfsoc_int_st_reg
,
544 uint_st
->sirfsoc_rx_done
);
545 if (!sirfport
->is_marco
)
546 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
547 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
548 ~(uint_en
->sirfsoc_rx_done_en
));
550 wr_regl(port
, SIRFUART_INT_EN_CLR
,
551 uint_en
->sirfsoc_rx_done_en
);
552 sirfsoc_uart_start_next_rx_dma(port
);
554 wr_regl(port
, ureg
->sirfsoc_int_st_reg
,
555 uint_st
->sirfsoc_rx_done
);
556 if (!sirfport
->is_marco
)
557 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
558 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) |
559 (uint_en
->sirfsoc_rx_done_en
));
561 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
562 uint_en
->sirfsoc_rx_done_en
);
564 spin_unlock_irqrestore(&port
->lock
, flags
);
565 tty_flip_buffer_push(&port
->state
->port
);
568 static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port
*sirfport
)
570 struct uart_port
*port
= &sirfport
->port
;
571 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
572 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
573 struct dma_tx_state tx_state
;
574 dmaengine_tx_status(sirfport
->rx_dma_chan
,
575 sirfport
->rx_dma_items
[sirfport
->rx_issued
].cookie
, &tx_state
);
576 dmaengine_terminate_all(sirfport
->rx_dma_chan
);
577 sirfport
->rx_dma_items
[sirfport
->rx_issued
].xmit
.head
=
578 SIRFSOC_RX_DMA_BUF_SIZE
- tx_state
.residue
;
579 if (!sirfport
->is_marco
)
580 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
581 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
582 ~(uint_en
->sirfsoc_rx_timeout_en
));
584 wr_regl(port
, SIRFUART_INT_EN_CLR
,
585 uint_en
->sirfsoc_rx_timeout_en
);
586 tasklet_schedule(&sirfport
->rx_tmo_process_tasklet
);
589 static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port
*sirfport
)
591 struct uart_port
*port
= &sirfport
->port
;
592 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
593 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
594 struct sirfsoc_int_status
*uint_st
= &sirfport
->uart_reg
->uart_int_st
;
596 sirfsoc_uart_pio_rx_chars(port
, 4 - sirfport
->rx_io_count
);
597 if (sirfport
->rx_io_count
== 4) {
598 sirfport
->rx_io_count
= 0;
599 if (!sirfport
->is_marco
)
600 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
601 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
602 ~(uint_en
->sirfsoc_rx_done_en
));
604 wr_regl(port
, SIRFUART_INT_EN_CLR
,
605 uint_en
->sirfsoc_rx_done_en
);
606 wr_regl(port
, ureg
->sirfsoc_int_st_reg
,
607 uint_st
->sirfsoc_rx_timeout
);
608 sirfsoc_uart_start_next_rx_dma(port
);
612 static irqreturn_t
sirfsoc_uart_isr(int irq
, void *dev_id
)
614 unsigned long intr_status
;
615 unsigned long cts_status
;
616 unsigned long flag
= TTY_NORMAL
;
617 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)dev_id
;
618 struct uart_port
*port
= &sirfport
->port
;
619 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
620 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
621 struct sirfsoc_int_status
*uint_st
= &sirfport
->uart_reg
->uart_int_st
;
622 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
623 struct uart_state
*state
= port
->state
;
624 struct circ_buf
*xmit
= &port
->state
->xmit
;
625 spin_lock(&port
->lock
);
626 intr_status
= rd_regl(port
, ureg
->sirfsoc_int_st_reg
);
627 wr_regl(port
, ureg
->sirfsoc_int_st_reg
, intr_status
);
628 intr_status
&= rd_regl(port
, ureg
->sirfsoc_int_en_reg
);
629 if (unlikely(intr_status
& (SIRFUART_ERR_INT_STAT(port
, uint_st
)))) {
630 if (intr_status
& uint_st
->sirfsoc_rxd_brk
) {
632 if (uart_handle_break(port
))
635 if (intr_status
& uint_st
->sirfsoc_rx_oflow
)
636 port
->icount
.overrun
++;
637 if (intr_status
& uint_st
->sirfsoc_frm_err
) {
638 port
->icount
.frame
++;
641 if (intr_status
& uint_st
->sirfsoc_parity_err
)
643 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_RESET
);
644 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
645 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_START
);
646 intr_status
&= port
->read_status_mask
;
647 uart_insert_char(port
, intr_status
,
648 uint_en
->sirfsoc_rx_oflow_en
, 0, flag
);
651 if ((sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) &&
652 (intr_status
& SIRFUART_CTS_INT_ST(uint_st
)) &&
653 !sirfport
->tx_dma_state
) {
654 cts_status
= rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) &
655 SIRFUART_AFC_CTS_STATUS
;
660 uart_handle_cts_change(port
, cts_status
);
661 wake_up_interruptible(&state
->port
.delta_msr_wait
);
663 if (IS_DMA_CHAN_VALID(sirfport
->rx_dma_no
)) {
664 if (intr_status
& uint_st
->sirfsoc_rx_timeout
)
665 sirfsoc_uart_handle_rx_tmo(sirfport
);
666 if (intr_status
& uint_st
->sirfsoc_rx_done
)
667 sirfsoc_uart_handle_rx_done(sirfport
);
669 if (intr_status
& SIRFUART_RX_IO_INT_ST(uint_st
))
670 sirfsoc_uart_pio_rx_chars(port
,
671 SIRFSOC_UART_IO_RX_MAX_CNT
);
673 spin_unlock(&port
->lock
);
674 tty_flip_buffer_push(&state
->port
);
675 spin_lock(&port
->lock
);
676 if (intr_status
& uint_st
->sirfsoc_txfifo_empty
) {
677 if (IS_DMA_CHAN_VALID(sirfport
->tx_dma_no
))
678 sirfsoc_uart_tx_with_dma(sirfport
);
680 if (uart_circ_empty(xmit
) || uart_tx_stopped(port
)) {
681 spin_unlock(&port
->lock
);
684 sirfsoc_uart_pio_tx_chars(sirfport
,
685 SIRFSOC_UART_IO_TX_REASONABLE_CNT
);
686 if ((uart_circ_empty(xmit
)) &&
687 (rd_regl(port
, ureg
->sirfsoc_tx_fifo_status
) &
688 ufifo_st
->ff_empty(port
->line
)))
689 sirfsoc_uart_stop_tx(port
);
693 spin_unlock(&port
->lock
);
698 static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param
)
700 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)param
;
701 struct uart_port
*port
= &sirfport
->port
;
703 spin_lock_irqsave(&port
->lock
, flags
);
704 while (sirfport
->rx_completed
!= sirfport
->rx_issued
) {
705 sirfsoc_uart_insert_rx_buf_to_tty(sirfport
,
706 SIRFSOC_RX_DMA_BUF_SIZE
);
707 sirfsoc_rx_submit_one_dma_desc(port
, sirfport
->rx_completed
++);
708 sirfport
->rx_completed
%= SIRFSOC_RX_LOOP_BUF_CNT
;
710 spin_unlock_irqrestore(&port
->lock
, flags
);
711 tty_flip_buffer_push(&port
->state
->port
);
714 static void sirfsoc_uart_rx_dma_complete_callback(void *param
)
716 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)param
;
719 spin_lock_irqsave(&sirfport
->port
.lock
, flags
);
720 sirfport
->rx_issued
++;
721 sirfport
->rx_issued
%= SIRFSOC_RX_LOOP_BUF_CNT
;
722 tasklet_schedule(&sirfport
->rx_dma_complete_tasklet
);
723 spin_unlock_irqrestore(&sirfport
->port
.lock
, flags
);
726 /* submit rx dma task into dmaengine */
727 static void sirfsoc_uart_start_next_rx_dma(struct uart_port
*port
)
729 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
730 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
731 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
733 sirfport
->rx_io_count
= 0;
734 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
735 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) &
737 for (i
= 0; i
< SIRFSOC_RX_LOOP_BUF_CNT
; i
++)
738 sirfsoc_rx_submit_one_dma_desc(port
, i
);
739 sirfport
->rx_completed
= sirfport
->rx_issued
= 0;
740 if (!sirfport
->is_marco
)
741 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
742 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) |
743 SIRFUART_RX_DMA_INT_EN(port
, uint_en
));
745 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
746 SIRFUART_RX_DMA_INT_EN(port
, uint_en
));
749 static void sirfsoc_uart_start_rx(struct uart_port
*port
)
751 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
752 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
753 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
755 sirfport
->rx_io_count
= 0;
756 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_RESET
);
757 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
758 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_START
);
759 if (IS_DMA_CHAN_VALID(sirfport
->rx_dma_no
))
760 sirfsoc_uart_start_next_rx_dma(port
);
762 if (!sirfport
->is_marco
)
763 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
764 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) |
765 SIRFUART_RX_IO_INT_EN(port
, uint_en
));
767 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
768 SIRFUART_RX_IO_INT_EN(port
, uint_en
));
773 sirfsoc_usp_calc_sample_div(unsigned long set_rate
,
774 unsigned long ioclk_rate
, unsigned long *sample_reg
)
776 unsigned long min_delta
= ~0UL;
777 unsigned short sample_div
;
778 unsigned long ioclk_div
= 0;
779 unsigned long temp_delta
;
781 for (sample_div
= SIRF_MIN_SAMPLE_DIV
;
782 sample_div
<= SIRF_MAX_SAMPLE_DIV
; sample_div
++) {
783 temp_delta
= ioclk_rate
-
784 (ioclk_rate
+ (set_rate
* sample_div
) / 2)
785 / (set_rate
* sample_div
) * set_rate
* sample_div
;
787 temp_delta
= (temp_delta
> 0) ? temp_delta
: -temp_delta
;
788 if (temp_delta
< min_delta
) {
789 ioclk_div
= (2 * ioclk_rate
/
790 (set_rate
* sample_div
) + 1) / 2 - 1;
791 if (ioclk_div
> SIRF_IOCLK_DIV_MAX
)
793 min_delta
= temp_delta
;
794 *sample_reg
= sample_div
;
803 sirfsoc_uart_calc_sample_div(unsigned long baud_rate
,
804 unsigned long ioclk_rate
, unsigned long *set_baud
)
806 unsigned long min_delta
= ~0UL;
807 unsigned short sample_div
;
808 unsigned int regv
= 0;
809 unsigned long ioclk_div
;
810 unsigned long baud_tmp
;
813 for (sample_div
= SIRF_MIN_SAMPLE_DIV
;
814 sample_div
<= SIRF_MAX_SAMPLE_DIV
; sample_div
++) {
815 ioclk_div
= (ioclk_rate
/ (baud_rate
* (sample_div
+ 1))) - 1;
816 if (ioclk_div
> SIRF_IOCLK_DIV_MAX
)
818 baud_tmp
= ioclk_rate
/ ((ioclk_div
+ 1) * (sample_div
+ 1));
819 temp_delta
= baud_tmp
- baud_rate
;
820 temp_delta
= (temp_delta
> 0) ? temp_delta
: -temp_delta
;
821 if (temp_delta
< min_delta
) {
822 regv
= regv
& (~SIRF_IOCLK_DIV_MASK
);
823 regv
= regv
| ioclk_div
;
824 regv
= regv
& (~SIRF_SAMPLE_DIV_MASK
);
825 regv
= regv
| (sample_div
<< SIRF_SAMPLE_DIV_SHIFT
);
826 min_delta
= temp_delta
;
827 *set_baud
= baud_tmp
;
833 static void sirfsoc_uart_set_termios(struct uart_port
*port
,
834 struct ktermios
*termios
,
835 struct ktermios
*old
)
837 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
838 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
839 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
840 unsigned long config_reg
= 0;
841 unsigned long baud_rate
;
842 unsigned long set_baud
;
845 unsigned int clk_div_reg
= 0;
846 unsigned long txfifo_op_reg
, ioclk_rate
;
847 unsigned long rx_time_out
;
849 u32 data_bit_len
, stop_bit_len
, len_val
;
850 unsigned long sample_div_reg
= 0xf;
851 ioclk_rate
= port
->uartclk
;
853 switch (termios
->c_cflag
& CSIZE
) {
857 config_reg
|= SIRFUART_DATA_BIT_LEN_8
;
861 config_reg
|= SIRFUART_DATA_BIT_LEN_7
;
865 config_reg
|= SIRFUART_DATA_BIT_LEN_6
;
869 config_reg
|= SIRFUART_DATA_BIT_LEN_5
;
872 if (termios
->c_cflag
& CSTOPB
) {
873 config_reg
|= SIRFUART_STOP_BIT_LEN_2
;
878 spin_lock_irqsave(&port
->lock
, flags
);
879 port
->read_status_mask
= uint_en
->sirfsoc_rx_oflow_en
;
880 port
->ignore_status_mask
= 0;
881 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
882 if (termios
->c_iflag
& INPCK
)
883 port
->read_status_mask
|= uint_en
->sirfsoc_frm_err_en
|
884 uint_en
->sirfsoc_parity_err_en
;
886 if (termios
->c_iflag
& INPCK
)
887 port
->read_status_mask
|= uint_en
->sirfsoc_frm_err_en
;
889 if (termios
->c_iflag
& (IGNBRK
| BRKINT
| PARMRK
))
890 port
->read_status_mask
|= uint_en
->sirfsoc_rxd_brk_en
;
891 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
892 if (termios
->c_iflag
& IGNPAR
)
893 port
->ignore_status_mask
|=
894 uint_en
->sirfsoc_frm_err_en
|
895 uint_en
->sirfsoc_parity_err_en
;
896 if (termios
->c_cflag
& PARENB
) {
897 if (termios
->c_cflag
& CMSPAR
) {
898 if (termios
->c_cflag
& PARODD
)
899 config_reg
|= SIRFUART_STICK_BIT_MARK
;
901 config_reg
|= SIRFUART_STICK_BIT_SPACE
;
902 } else if (termios
->c_cflag
& PARODD
) {
903 config_reg
|= SIRFUART_STICK_BIT_ODD
;
905 config_reg
|= SIRFUART_STICK_BIT_EVEN
;
909 if (termios
->c_iflag
& IGNPAR
)
910 port
->ignore_status_mask
|=
911 uint_en
->sirfsoc_frm_err_en
;
912 if (termios
->c_cflag
& PARENB
)
914 "USP-UART not support parity err\n");
916 if (termios
->c_iflag
& IGNBRK
) {
917 port
->ignore_status_mask
|=
918 uint_en
->sirfsoc_rxd_brk_en
;
919 if (termios
->c_iflag
& IGNPAR
)
920 port
->ignore_status_mask
|=
921 uint_en
->sirfsoc_rx_oflow_en
;
923 if ((termios
->c_cflag
& CREAD
) == 0)
924 port
->ignore_status_mask
|= SIRFUART_DUMMY_READ
;
925 /* Hardware Flow Control Settings */
926 if (UART_ENABLE_MS(port
, termios
->c_cflag
)) {
927 if (!sirfport
->ms_enabled
)
928 sirfsoc_uart_enable_ms(port
);
930 if (sirfport
->ms_enabled
)
931 sirfsoc_uart_disable_ms(port
);
933 baud_rate
= uart_get_baud_rate(port
, termios
, old
, 0, 4000000);
934 if (ioclk_rate
== 150000000) {
935 for (ic
= 0; ic
< SIRF_BAUD_RATE_SUPPORT_NR
; ic
++)
936 if (baud_rate
== baudrate_to_regv
[ic
].baud_rate
)
937 clk_div_reg
= baudrate_to_regv
[ic
].reg_val
;
939 set_baud
= baud_rate
;
940 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
941 if (unlikely(clk_div_reg
== 0))
942 clk_div_reg
= sirfsoc_uart_calc_sample_div(baud_rate
,
943 ioclk_rate
, &set_baud
);
944 wr_regl(port
, ureg
->sirfsoc_divisor
, clk_div_reg
);
946 clk_div_reg
= sirfsoc_usp_calc_sample_div(baud_rate
,
947 ioclk_rate
, &sample_div_reg
);
949 set_baud
= ((ioclk_rate
/ (clk_div_reg
+1) - 1) /
950 (sample_div_reg
+ 1));
951 /* setting usp mode 2 */
952 len_val
= ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET
) |
953 (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET
));
954 len_val
|= ((clk_div_reg
& SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK
)
955 << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET
);
956 wr_regl(port
, ureg
->sirfsoc_mode2
, len_val
);
958 if (tty_termios_baud_rate(termios
))
959 tty_termios_encode_baud_rate(termios
, set_baud
, set_baud
);
960 /* set receive timeout && data bits len */
961 rx_time_out
= SIRFSOC_UART_RX_TIMEOUT(set_baud
, 20000);
962 rx_time_out
= SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out
);
963 txfifo_op_reg
= rd_regl(port
, ureg
->sirfsoc_tx_fifo_op
);
964 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_STOP
);
965 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
,
966 (txfifo_op_reg
& ~SIRFUART_FIFO_START
));
967 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
968 config_reg
|= SIRFUART_RECV_TIMEOUT(port
, rx_time_out
);
969 wr_regl(port
, ureg
->sirfsoc_line_ctrl
, config_reg
);
972 len_val
= (data_bit_len
- 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET
;
973 len_val
|= (data_bit_len
+ 1 + stop_bit_len
- 1) <<
974 SIRFSOC_USP_TX_FRAME_LEN_OFFSET
;
975 len_val
|= ((data_bit_len
- 1) <<
976 SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET
);
977 len_val
|= (((clk_div_reg
& 0xc00) >> 10) <<
978 SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET
);
979 wr_regl(port
, ureg
->sirfsoc_tx_frame_ctrl
, len_val
);
981 len_val
= (data_bit_len
- 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET
;
982 len_val
|= (data_bit_len
+ 1 + stop_bit_len
- 1) <<
983 SIRFSOC_USP_RX_FRAME_LEN_OFFSET
;
984 len_val
|= (data_bit_len
- 1) <<
985 SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET
;
986 len_val
|= (((clk_div_reg
& 0xf000) >> 12) <<
987 SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET
);
988 wr_regl(port
, ureg
->sirfsoc_rx_frame_ctrl
, len_val
);
990 wr_regl(port
, ureg
->sirfsoc_async_param_reg
,
991 (SIRFUART_RECV_TIMEOUT(port
, rx_time_out
)) |
992 (sample_div_reg
& SIRFSOC_USP_ASYNC_DIV2_MASK
) <<
993 SIRFSOC_USP_ASYNC_DIV2_OFFSET
);
995 if (IS_DMA_CHAN_VALID(sirfport
->tx_dma_no
))
996 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
, SIRFUART_DMA_MODE
);
998 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
, SIRFUART_IO_MODE
);
999 if (IS_DMA_CHAN_VALID(sirfport
->rx_dma_no
))
1000 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
, SIRFUART_DMA_MODE
);
1002 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
, SIRFUART_IO_MODE
);
1003 /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
1004 if (set_baud
< 1000000)
1008 wr_regl(port
, ureg
->sirfsoc_tx_fifo_ctrl
,
1009 SIRFUART_FIFO_THD(port
) / threshold_div
);
1010 wr_regl(port
, ureg
->sirfsoc_rx_fifo_ctrl
,
1011 SIRFUART_FIFO_THD(port
) / threshold_div
);
1012 txfifo_op_reg
|= SIRFUART_FIFO_START
;
1013 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, txfifo_op_reg
);
1014 uart_update_timeout(port
, termios
->c_cflag
, set_baud
);
1015 sirfsoc_uart_start_rx(port
);
1016 wr_regl(port
, ureg
->sirfsoc_tx_rx_en
, SIRFUART_TX_EN
| SIRFUART_RX_EN
);
1017 spin_unlock_irqrestore(&port
->lock
, flags
);
1020 static unsigned int sirfsoc_uart_init_tx_dma(struct uart_port
*port
)
1022 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1023 dma_cap_mask_t dma_mask
;
1024 struct dma_slave_config tx_slv_cfg
= {
1028 dma_cap_zero(dma_mask
);
1029 dma_cap_set(DMA_SLAVE
, dma_mask
);
1030 sirfport
->tx_dma_chan
= dma_request_channel(dma_mask
,
1031 (dma_filter_fn
)sirfsoc_dma_filter_id
,
1032 (void *)sirfport
->tx_dma_no
);
1033 if (!sirfport
->tx_dma_chan
) {
1034 dev_err(port
->dev
, "Uart Request Dma Channel Fail %d\n",
1035 sirfport
->tx_dma_no
);
1036 return -EPROBE_DEFER
;
1038 dmaengine_slave_config(sirfport
->tx_dma_chan
, &tx_slv_cfg
);
1043 static unsigned int sirfsoc_uart_init_rx_dma(struct uart_port
*port
)
1045 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1046 dma_cap_mask_t dma_mask
;
1049 struct dma_slave_config slv_cfg
= {
1053 dma_cap_zero(dma_mask
);
1054 dma_cap_set(DMA_SLAVE
, dma_mask
);
1055 sirfport
->rx_dma_chan
= dma_request_channel(dma_mask
,
1056 (dma_filter_fn
)sirfsoc_dma_filter_id
,
1057 (void *)sirfport
->rx_dma_no
);
1058 if (!sirfport
->rx_dma_chan
) {
1059 dev_err(port
->dev
, "Uart Request Dma Channel Fail %d\n",
1060 sirfport
->rx_dma_no
);
1061 ret
= -EPROBE_DEFER
;
1064 for (i
= 0; i
< SIRFSOC_RX_LOOP_BUF_CNT
; i
++) {
1065 sirfport
->rx_dma_items
[i
].xmit
.buf
=
1066 dma_alloc_coherent(port
->dev
, SIRFSOC_RX_DMA_BUF_SIZE
,
1067 &sirfport
->rx_dma_items
[i
].dma_addr
, GFP_KERNEL
);
1068 if (!sirfport
->rx_dma_items
[i
].xmit
.buf
) {
1069 dev_err(port
->dev
, "Uart alloc bufa failed\n");
1071 goto alloc_coherent_err
;
1073 sirfport
->rx_dma_items
[i
].xmit
.head
=
1074 sirfport
->rx_dma_items
[i
].xmit
.tail
= 0;
1076 dmaengine_slave_config(sirfport
->rx_dma_chan
, &slv_cfg
);
1080 for (j
= 0; j
< i
; j
++)
1081 dma_free_coherent(port
->dev
, SIRFSOC_RX_DMA_BUF_SIZE
,
1082 sirfport
->rx_dma_items
[j
].xmit
.buf
,
1083 sirfport
->rx_dma_items
[j
].dma_addr
);
1084 dma_release_channel(sirfport
->rx_dma_chan
);
1089 static void sirfsoc_uart_uninit_tx_dma(struct sirfsoc_uart_port
*sirfport
)
1091 dmaengine_terminate_all(sirfport
->tx_dma_chan
);
1092 dma_release_channel(sirfport
->tx_dma_chan
);
1095 static void sirfsoc_uart_uninit_rx_dma(struct sirfsoc_uart_port
*sirfport
)
1098 struct uart_port
*port
= &sirfport
->port
;
1099 dmaengine_terminate_all(sirfport
->rx_dma_chan
);
1100 dma_release_channel(sirfport
->rx_dma_chan
);
1101 for (i
= 0; i
< SIRFSOC_RX_LOOP_BUF_CNT
; i
++)
1102 dma_free_coherent(port
->dev
, SIRFSOC_RX_DMA_BUF_SIZE
,
1103 sirfport
->rx_dma_items
[i
].xmit
.buf
,
1104 sirfport
->rx_dma_items
[i
].dma_addr
);
1107 static int sirfsoc_uart_startup(struct uart_port
*port
)
1109 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1110 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
1111 unsigned int index
= port
->line
;
1113 set_irq_flags(port
->irq
, IRQF_VALID
| IRQF_NOAUTOEN
);
1114 ret
= request_irq(port
->irq
,
1120 dev_err(port
->dev
, "UART%d request IRQ line (%d) failed.\n",
1125 /* initial hardware settings */
1126 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
,
1127 rd_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
) |
1129 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
1130 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) |
1132 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_len
, 0);
1133 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_len
, 0);
1134 wr_regl(port
, ureg
->sirfsoc_tx_rx_en
, SIRFUART_RX_EN
| SIRFUART_TX_EN
);
1135 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
)
1136 wr_regl(port
, ureg
->sirfsoc_mode1
,
1137 SIRFSOC_USP_ENDIAN_CTRL_LSBF
|
1139 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_RESET
);
1140 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, 0);
1141 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_RESET
);
1142 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
1143 wr_regl(port
, ureg
->sirfsoc_tx_fifo_ctrl
, SIRFUART_FIFO_THD(port
));
1144 wr_regl(port
, ureg
->sirfsoc_rx_fifo_ctrl
, SIRFUART_FIFO_THD(port
));
1146 if (IS_DMA_CHAN_VALID(sirfport
->rx_dma_no
)) {
1147 ret
= sirfsoc_uart_init_rx_dma(port
);
1150 wr_regl(port
, ureg
->sirfsoc_rx_fifo_level_chk
,
1151 SIRFUART_RX_FIFO_CHK_SC(port
->line
, 0x4) |
1152 SIRFUART_RX_FIFO_CHK_LC(port
->line
, 0xe) |
1153 SIRFUART_RX_FIFO_CHK_HC(port
->line
, 0x1b));
1155 if (IS_DMA_CHAN_VALID(sirfport
->tx_dma_no
)) {
1156 sirfsoc_uart_init_tx_dma(port
);
1157 sirfport
->tx_dma_state
= TX_DMA_IDLE
;
1158 wr_regl(port
, ureg
->sirfsoc_tx_fifo_level_chk
,
1159 SIRFUART_TX_FIFO_CHK_SC(port
->line
, 0x1b) |
1160 SIRFUART_TX_FIFO_CHK_LC(port
->line
, 0xe) |
1161 SIRFUART_TX_FIFO_CHK_HC(port
->line
, 0x4));
1163 sirfport
->ms_enabled
= false;
1164 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
&&
1165 sirfport
->hw_flow_ctrl
) {
1166 set_irq_flags(gpio_to_irq(sirfport
->cts_gpio
),
1167 IRQF_VALID
| IRQF_NOAUTOEN
);
1168 ret
= request_irq(gpio_to_irq(sirfport
->cts_gpio
),
1169 sirfsoc_uart_usp_cts_handler
, IRQF_TRIGGER_FALLING
|
1170 IRQF_TRIGGER_RISING
, "usp_cts_irq", sirfport
);
1172 dev_err(port
->dev
, "UART-USP:request gpio irq fail\n");
1177 enable_irq(port
->irq
);
1181 free_irq(port
->irq
, sirfport
);
1186 static void sirfsoc_uart_shutdown(struct uart_port
*port
)
1188 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1189 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
1190 if (!sirfport
->is_marco
)
1191 wr_regl(port
, ureg
->sirfsoc_int_en_reg
, 0);
1193 wr_regl(port
, SIRFUART_INT_EN_CLR
, ~0UL);
1195 free_irq(port
->irq
, sirfport
);
1196 if (sirfport
->ms_enabled
)
1197 sirfsoc_uart_disable_ms(port
);
1198 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
&&
1199 sirfport
->hw_flow_ctrl
) {
1200 gpio_set_value(sirfport
->rts_gpio
, 1);
1201 free_irq(gpio_to_irq(sirfport
->cts_gpio
), sirfport
);
1203 if (IS_DMA_CHAN_VALID(sirfport
->rx_dma_no
))
1204 sirfsoc_uart_uninit_rx_dma(sirfport
);
1205 if (IS_DMA_CHAN_VALID(sirfport
->tx_dma_no
)) {
1206 sirfsoc_uart_uninit_tx_dma(sirfport
);
1207 sirfport
->tx_dma_state
= TX_DMA_IDLE
;
1211 static const char *sirfsoc_uart_type(struct uart_port
*port
)
1213 return port
->type
== SIRFSOC_PORT_TYPE
? SIRFUART_PORT_NAME
: NULL
;
1216 static int sirfsoc_uart_request_port(struct uart_port
*port
)
1218 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1219 struct sirfsoc_uart_param
*uart_param
= &sirfport
->uart_reg
->uart_param
;
1221 ret
= request_mem_region(port
->mapbase
,
1222 SIRFUART_MAP_SIZE
, uart_param
->port_name
);
1223 return ret
? 0 : -EBUSY
;
1226 static void sirfsoc_uart_release_port(struct uart_port
*port
)
1228 release_mem_region(port
->mapbase
, SIRFUART_MAP_SIZE
);
1231 static void sirfsoc_uart_config_port(struct uart_port
*port
, int flags
)
1233 if (flags
& UART_CONFIG_TYPE
) {
1234 port
->type
= SIRFSOC_PORT_TYPE
;
1235 sirfsoc_uart_request_port(port
);
1239 static struct uart_ops sirfsoc_uart_ops
= {
1240 .tx_empty
= sirfsoc_uart_tx_empty
,
1241 .get_mctrl
= sirfsoc_uart_get_mctrl
,
1242 .set_mctrl
= sirfsoc_uart_set_mctrl
,
1243 .stop_tx
= sirfsoc_uart_stop_tx
,
1244 .start_tx
= sirfsoc_uart_start_tx
,
1245 .stop_rx
= sirfsoc_uart_stop_rx
,
1246 .enable_ms
= sirfsoc_uart_enable_ms
,
1247 .break_ctl
= sirfsoc_uart_break_ctl
,
1248 .startup
= sirfsoc_uart_startup
,
1249 .shutdown
= sirfsoc_uart_shutdown
,
1250 .set_termios
= sirfsoc_uart_set_termios
,
1251 .type
= sirfsoc_uart_type
,
1252 .release_port
= sirfsoc_uart_release_port
,
1253 .request_port
= sirfsoc_uart_request_port
,
1254 .config_port
= sirfsoc_uart_config_port
,
1257 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1259 sirfsoc_uart_console_setup(struct console
*co
, char *options
)
1261 unsigned int baud
= 115200;
1262 unsigned int bits
= 8;
1263 unsigned int parity
= 'n';
1264 unsigned int flow
= 'n';
1265 struct uart_port
*port
= &sirfsoc_uart_ports
[co
->index
].port
;
1266 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1267 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
1268 if (co
->index
< 0 || co
->index
>= SIRFSOC_UART_NR
)
1274 /* enable usp in mode1 register */
1275 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
)
1276 wr_regl(port
, ureg
->sirfsoc_mode1
, SIRFSOC_USP_EN
|
1277 SIRFSOC_USP_ENDIAN_CTRL_LSBF
);
1279 uart_parse_options(options
, &baud
, &parity
, &bits
, &flow
);
1282 /* default console tx/rx transfer using io mode */
1283 sirfport
->rx_dma_no
= UNVALID_DMA_CHAN
;
1284 sirfport
->tx_dma_no
= UNVALID_DMA_CHAN
;
1285 return uart_set_options(port
, co
, baud
, parity
, bits
, flow
);
1288 static void sirfsoc_uart_console_putchar(struct uart_port
*port
, int ch
)
1290 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1291 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
1292 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
1293 while (rd_regl(port
,
1294 ureg
->sirfsoc_tx_fifo_status
) & ufifo_st
->ff_full(port
->line
))
1296 wr_regb(port
, ureg
->sirfsoc_tx_fifo_data
, ch
);
1299 static void sirfsoc_uart_console_write(struct console
*co
, const char *s
,
1302 struct uart_port
*port
= &sirfsoc_uart_ports
[co
->index
].port
;
1303 uart_console_write(port
, s
, count
, sirfsoc_uart_console_putchar
);
1306 static struct console sirfsoc_uart_console
= {
1307 .name
= SIRFSOC_UART_NAME
,
1308 .device
= uart_console_device
,
1309 .flags
= CON_PRINTBUFFER
,
1311 .write
= sirfsoc_uart_console_write
,
1312 .setup
= sirfsoc_uart_console_setup
,
1313 .data
= &sirfsoc_uart_drv
,
1316 static int __init
sirfsoc_uart_console_init(void)
1318 register_console(&sirfsoc_uart_console
);
1321 console_initcall(sirfsoc_uart_console_init
);
1324 static struct uart_driver sirfsoc_uart_drv
= {
1325 .owner
= THIS_MODULE
,
1326 .driver_name
= SIRFUART_PORT_NAME
,
1327 .nr
= SIRFSOC_UART_NR
,
1328 .dev_name
= SIRFSOC_UART_NAME
,
1329 .major
= SIRFSOC_UART_MAJOR
,
1330 .minor
= SIRFSOC_UART_MINOR
,
1331 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1332 .cons
= &sirfsoc_uart_console
,
1338 static struct of_device_id sirfsoc_uart_ids
[] = {
1339 { .compatible
= "sirf,prima2-uart", .data
= &sirfsoc_uart
,},
1340 { .compatible
= "sirf,marco-uart", .data
= &sirfsoc_uart
},
1341 { .compatible
= "sirf,prima2-usp-uart", .data
= &sirfsoc_usp
},
1344 MODULE_DEVICE_TABLE(of
, sirfsoc_uart_ids
);
1346 static int sirfsoc_uart_probe(struct platform_device
*pdev
)
1348 struct sirfsoc_uart_port
*sirfport
;
1349 struct uart_port
*port
;
1350 struct resource
*res
;
1352 const struct of_device_id
*match
;
1354 match
= of_match_node(sirfsoc_uart_ids
, pdev
->dev
.of_node
);
1355 if (of_property_read_u32(pdev
->dev
.of_node
, "cell-index", &pdev
->id
)) {
1357 "Unable to find cell-index in uart node.\n");
1361 if (of_device_is_compatible(pdev
->dev
.of_node
, "sirf,prima2-usp-uart"))
1362 pdev
->id
+= ((struct sirfsoc_uart_register
*)
1363 match
->data
)->uart_param
.register_uart_nr
;
1364 sirfport
= &sirfsoc_uart_ports
[pdev
->id
];
1365 port
= &sirfport
->port
;
1366 port
->dev
= &pdev
->dev
;
1367 port
->private_data
= sirfport
;
1368 sirfport
->uart_reg
= (struct sirfsoc_uart_register
*)match
->data
;
1370 sirfport
->hw_flow_ctrl
= of_property_read_bool(pdev
->dev
.of_node
,
1371 "sirf,uart-has-rtscts");
1372 if (of_device_is_compatible(pdev
->dev
.of_node
, "sirf,prima2-uart")) {
1373 sirfport
->uart_reg
->uart_type
= SIRF_REAL_UART
;
1374 if (of_property_read_u32(pdev
->dev
.of_node
,
1375 "sirf,uart-dma-rx-channel",
1376 &sirfport
->rx_dma_no
))
1377 sirfport
->rx_dma_no
= UNVALID_DMA_CHAN
;
1378 if (of_property_read_u32(pdev
->dev
.of_node
,
1379 "sirf,uart-dma-tx-channel",
1380 &sirfport
->tx_dma_no
))
1381 sirfport
->tx_dma_no
= UNVALID_DMA_CHAN
;
1383 if (of_device_is_compatible(pdev
->dev
.of_node
, "sirf,prima2-usp-uart")) {
1384 sirfport
->uart_reg
->uart_type
= SIRF_USP_UART
;
1385 if (of_property_read_u32(pdev
->dev
.of_node
,
1386 "sirf,usp-dma-rx-channel",
1387 &sirfport
->rx_dma_no
))
1388 sirfport
->rx_dma_no
= UNVALID_DMA_CHAN
;
1389 if (of_property_read_u32(pdev
->dev
.of_node
,
1390 "sirf,usp-dma-tx-channel",
1391 &sirfport
->tx_dma_no
))
1392 sirfport
->tx_dma_no
= UNVALID_DMA_CHAN
;
1393 if (!sirfport
->hw_flow_ctrl
)
1394 goto usp_no_flow_control
;
1395 if (of_find_property(pdev
->dev
.of_node
, "cts-gpios", NULL
))
1396 sirfport
->cts_gpio
= of_get_named_gpio(
1397 pdev
->dev
.of_node
, "cts-gpios", 0);
1399 sirfport
->cts_gpio
= -1;
1400 if (of_find_property(pdev
->dev
.of_node
, "rts-gpios", NULL
))
1401 sirfport
->rts_gpio
= of_get_named_gpio(
1402 pdev
->dev
.of_node
, "rts-gpios", 0);
1404 sirfport
->rts_gpio
= -1;
1406 if ((!gpio_is_valid(sirfport
->cts_gpio
) ||
1407 !gpio_is_valid(sirfport
->rts_gpio
))) {
1410 "Usp flow control must have cts and rts gpio");
1413 ret
= devm_gpio_request(&pdev
->dev
, sirfport
->cts_gpio
,
1416 dev_err(&pdev
->dev
, "Unable request cts gpio");
1419 gpio_direction_input(sirfport
->cts_gpio
);
1420 ret
= devm_gpio_request(&pdev
->dev
, sirfport
->rts_gpio
,
1423 dev_err(&pdev
->dev
, "Unable request rts gpio");
1426 gpio_direction_output(sirfport
->rts_gpio
, 1);
1428 usp_no_flow_control
:
1429 if (of_device_is_compatible(pdev
->dev
.of_node
, "sirf,marco-uart"))
1430 sirfport
->is_marco
= true;
1432 if (of_property_read_u32(pdev
->dev
.of_node
,
1436 "Unable to find fifosize in uart node.\n");
1441 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1443 dev_err(&pdev
->dev
, "Insufficient resources.\n");
1447 tasklet_init(&sirfport
->rx_dma_complete_tasklet
,
1448 sirfsoc_uart_rx_dma_complete_tl
, (unsigned long)sirfport
);
1449 tasklet_init(&sirfport
->rx_tmo_process_tasklet
,
1450 sirfsoc_rx_tmo_process_tl
, (unsigned long)sirfport
);
1451 port
->mapbase
= res
->start
;
1452 port
->membase
= devm_ioremap(&pdev
->dev
, res
->start
, resource_size(res
));
1453 if (!port
->membase
) {
1454 dev_err(&pdev
->dev
, "Cannot remap resource.\n");
1458 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
1460 dev_err(&pdev
->dev
, "Insufficient resources.\n");
1464 port
->irq
= res
->start
;
1466 sirfport
->clk
= clk_get(&pdev
->dev
, NULL
);
1467 if (IS_ERR(sirfport
->clk
)) {
1468 ret
= PTR_ERR(sirfport
->clk
);
1471 clk_prepare_enable(sirfport
->clk
);
1472 port
->uartclk
= clk_get_rate(sirfport
->clk
);
1474 port
->ops
= &sirfsoc_uart_ops
;
1475 spin_lock_init(&port
->lock
);
1477 platform_set_drvdata(pdev
, sirfport
);
1478 ret
= uart_add_one_port(&sirfsoc_uart_drv
, port
);
1480 dev_err(&pdev
->dev
, "Cannot add UART port(%d).\n", pdev
->id
);
1487 clk_disable_unprepare(sirfport
->clk
);
1488 clk_put(sirfport
->clk
);
1493 static int sirfsoc_uart_remove(struct platform_device
*pdev
)
1495 struct sirfsoc_uart_port
*sirfport
= platform_get_drvdata(pdev
);
1496 struct uart_port
*port
= &sirfport
->port
;
1497 clk_disable_unprepare(sirfport
->clk
);
1498 clk_put(sirfport
->clk
);
1499 uart_remove_one_port(&sirfsoc_uart_drv
, port
);
1504 sirfsoc_uart_suspend(struct platform_device
*pdev
, pm_message_t state
)
1506 struct sirfsoc_uart_port
*sirfport
= platform_get_drvdata(pdev
);
1507 struct uart_port
*port
= &sirfport
->port
;
1508 uart_suspend_port(&sirfsoc_uart_drv
, port
);
1512 static int sirfsoc_uart_resume(struct platform_device
*pdev
)
1514 struct sirfsoc_uart_port
*sirfport
= platform_get_drvdata(pdev
);
1515 struct uart_port
*port
= &sirfport
->port
;
1516 uart_resume_port(&sirfsoc_uart_drv
, port
);
1520 static struct platform_driver sirfsoc_uart_driver
= {
1521 .probe
= sirfsoc_uart_probe
,
1522 .remove
= sirfsoc_uart_remove
,
1523 .suspend
= sirfsoc_uart_suspend
,
1524 .resume
= sirfsoc_uart_resume
,
1526 .name
= SIRFUART_PORT_NAME
,
1527 .owner
= THIS_MODULE
,
1528 .of_match_table
= sirfsoc_uart_ids
,
1532 static int __init
sirfsoc_uart_init(void)
1536 ret
= uart_register_driver(&sirfsoc_uart_drv
);
1540 ret
= platform_driver_register(&sirfsoc_uart_driver
);
1542 uart_unregister_driver(&sirfsoc_uart_drv
);
1546 module_init(sirfsoc_uart_init
);
1548 static void __exit
sirfsoc_uart_exit(void)
1550 platform_driver_unregister(&sirfsoc_uart_driver
);
1551 uart_unregister_driver(&sirfsoc_uart_drv
);
1553 module_exit(sirfsoc_uart_exit
);
1555 MODULE_LICENSE("GPL v2");
1556 MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
1557 MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");