2 * Driver for CSR SiRFprimaII onboard UARTs.
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
6 * Licensed under GPLv2 or later.
9 #include <linux/module.h>
10 #include <linux/ioport.h>
11 #include <linux/platform_device.h>
12 #include <linux/init.h>
13 #include <linux/sysrq.h>
14 #include <linux/console.h>
15 #include <linux/tty.h>
16 #include <linux/tty_flip.h>
17 #include <linux/serial_core.h>
18 #include <linux/serial.h>
19 #include <linux/clk.h>
21 #include <linux/slab.h>
23 #include <linux/of_gpio.h>
24 #include <linux/dmaengine.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
28 #include <asm/mach/irq.h>
30 #include "sirfsoc_uart.h"
33 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port
*sirfport
, int count
);
35 sirfsoc_uart_pio_rx_chars(struct uart_port
*port
, unsigned int max_rx_count
);
36 static struct uart_driver sirfsoc_uart_drv
;
38 static void sirfsoc_uart_tx_dma_complete_callback(void *param
);
39 static const struct sirfsoc_baudrate_to_regv baudrate_to_regv
[] = {
60 static struct sirfsoc_uart_port
*sirf_ports
[SIRFSOC_UART_NR
];
62 static inline struct sirfsoc_uart_port
*to_sirfport(struct uart_port
*port
)
64 return container_of(port
, struct sirfsoc_uart_port
, port
);
67 static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port
*port
)
70 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
71 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
72 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
73 reg
= rd_regl(port
, ureg
->sirfsoc_tx_fifo_status
);
74 return (reg
& ufifo_st
->ff_empty(port
)) ? TIOCSER_TEMT
: 0;
77 static unsigned int sirfsoc_uart_get_mctrl(struct uart_port
*port
)
79 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
80 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
81 if (!sirfport
->hw_flow_ctrl
|| !sirfport
->ms_enabled
)
83 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
84 if (!(rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) &
85 SIRFUART_AFC_CTS_STATUS
))
90 if (!gpio_get_value(sirfport
->cts_gpio
))
96 return TIOCM_CAR
| TIOCM_DSR
;
98 return TIOCM_CAR
| TIOCM_DSR
| TIOCM_CTS
;
101 static void sirfsoc_uart_set_mctrl(struct uart_port
*port
, unsigned int mctrl
)
103 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
104 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
105 unsigned int assert = mctrl
& TIOCM_RTS
;
106 unsigned int val
= assert ? SIRFUART_AFC_CTRL_RX_THD
: 0x0;
107 unsigned int current_val
;
109 if (mctrl
& TIOCM_LOOP
) {
110 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
)
111 wr_regl(port
, ureg
->sirfsoc_line_ctrl
,
112 rd_regl(port
, ureg
->sirfsoc_line_ctrl
) |
115 wr_regl(port
, ureg
->sirfsoc_mode1
,
116 rd_regl(port
, ureg
->sirfsoc_mode1
) |
117 SIRFSOC_USP_LOOP_BACK_CTRL
);
119 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
)
120 wr_regl(port
, ureg
->sirfsoc_line_ctrl
,
121 rd_regl(port
, ureg
->sirfsoc_line_ctrl
) &
122 ~SIRFUART_LOOP_BACK
);
124 wr_regl(port
, ureg
->sirfsoc_mode1
,
125 rd_regl(port
, ureg
->sirfsoc_mode1
) &
126 ~SIRFSOC_USP_LOOP_BACK_CTRL
);
129 if (!sirfport
->hw_flow_ctrl
|| !sirfport
->ms_enabled
)
131 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
132 current_val
= rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) & ~0xFF;
134 wr_regl(port
, ureg
->sirfsoc_afc_ctrl
, val
);
137 gpio_set_value(sirfport
->rts_gpio
, 1);
139 gpio_set_value(sirfport
->rts_gpio
, 0);
143 static void sirfsoc_uart_stop_tx(struct uart_port
*port
)
145 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
146 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
147 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
149 if (sirfport
->tx_dma_chan
) {
150 if (sirfport
->tx_dma_state
== TX_DMA_RUNNING
) {
151 dmaengine_pause(sirfport
->tx_dma_chan
);
152 sirfport
->tx_dma_state
= TX_DMA_PAUSE
;
154 if (!sirfport
->is_atlas7
)
155 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
156 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
157 ~uint_en
->sirfsoc_txfifo_empty_en
);
159 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
,
160 uint_en
->sirfsoc_txfifo_empty_en
);
163 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
)
164 wr_regl(port
, ureg
->sirfsoc_tx_rx_en
, rd_regl(port
,
165 ureg
->sirfsoc_tx_rx_en
) & ~SIRFUART_TX_EN
);
166 if (!sirfport
->is_atlas7
)
167 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
168 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
169 ~uint_en
->sirfsoc_txfifo_empty_en
);
171 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
,
172 uint_en
->sirfsoc_txfifo_empty_en
);
176 static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port
*sirfport
)
178 struct uart_port
*port
= &sirfport
->port
;
179 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
180 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
181 struct circ_buf
*xmit
= &port
->state
->xmit
;
182 unsigned long tran_size
;
183 unsigned long tran_start
;
184 unsigned long pio_tx_size
;
186 tran_size
= CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
);
187 tran_start
= (unsigned long)(xmit
->buf
+ xmit
->tail
);
188 if (uart_circ_empty(xmit
) || uart_tx_stopped(port
) ||
191 if (sirfport
->tx_dma_state
== TX_DMA_PAUSE
) {
192 dmaengine_resume(sirfport
->tx_dma_chan
);
195 if (sirfport
->tx_dma_state
== TX_DMA_RUNNING
)
197 if (!sirfport
->is_atlas7
)
198 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
199 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)&
200 ~(uint_en
->sirfsoc_txfifo_empty_en
));
202 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
,
203 uint_en
->sirfsoc_txfifo_empty_en
);
205 * DMA requires buffer address and buffer length are both aligned with
206 * 4 bytes, so we use PIO for
207 * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
208 * bytes, and move to DMA for the left part aligned with 4bytes
209 * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
210 * part first, move to PIO for the left 1~3 bytes
212 if (tran_size
< 4 || BYTES_TO_ALIGN(tran_start
)) {
213 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_STOP
);
214 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
,
215 rd_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
)|
217 if (BYTES_TO_ALIGN(tran_start
)) {
218 pio_tx_size
= sirfsoc_uart_pio_tx_chars(sirfport
,
219 BYTES_TO_ALIGN(tran_start
));
220 tran_size
-= pio_tx_size
;
223 sirfsoc_uart_pio_tx_chars(sirfport
, tran_size
);
224 if (!sirfport
->is_atlas7
)
225 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
226 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)|
227 uint_en
->sirfsoc_txfifo_empty_en
);
229 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
230 uint_en
->sirfsoc_txfifo_empty_en
);
231 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_START
);
233 /* tx transfer mode switch into dma mode */
234 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_STOP
);
235 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
,
236 rd_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
)&
238 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_START
);
241 sirfport
->tx_dma_addr
= dma_map_single(port
->dev
,
242 xmit
->buf
+ xmit
->tail
,
243 tran_size
, DMA_TO_DEVICE
);
244 sirfport
->tx_dma_desc
= dmaengine_prep_slave_single(
245 sirfport
->tx_dma_chan
, sirfport
->tx_dma_addr
,
246 tran_size
, DMA_MEM_TO_DEV
, DMA_PREP_INTERRUPT
);
247 if (!sirfport
->tx_dma_desc
) {
248 dev_err(port
->dev
, "DMA prep slave single fail\n");
251 sirfport
->tx_dma_desc
->callback
=
252 sirfsoc_uart_tx_dma_complete_callback
;
253 sirfport
->tx_dma_desc
->callback_param
= (void *)sirfport
;
254 sirfport
->transfer_size
= tran_size
;
256 dmaengine_submit(sirfport
->tx_dma_desc
);
257 dma_async_issue_pending(sirfport
->tx_dma_chan
);
258 sirfport
->tx_dma_state
= TX_DMA_RUNNING
;
262 static void sirfsoc_uart_start_tx(struct uart_port
*port
)
264 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
265 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
266 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
267 if (sirfport
->tx_dma_chan
)
268 sirfsoc_uart_tx_with_dma(sirfport
);
270 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
)
271 wr_regl(port
, ureg
->sirfsoc_tx_rx_en
, rd_regl(port
,
272 ureg
->sirfsoc_tx_rx_en
) | SIRFUART_TX_EN
);
273 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_STOP
);
274 sirfsoc_uart_pio_tx_chars(sirfport
, port
->fifosize
);
275 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_START
);
276 if (!sirfport
->is_atlas7
)
277 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
278 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)|
279 uint_en
->sirfsoc_txfifo_empty_en
);
281 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
282 uint_en
->sirfsoc_txfifo_empty_en
);
286 static void sirfsoc_uart_stop_rx(struct uart_port
*port
)
288 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
289 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
290 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
292 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
293 if (sirfport
->rx_dma_chan
) {
294 if (!sirfport
->is_atlas7
)
295 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
296 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
297 ~(SIRFUART_RX_DMA_INT_EN(uint_en
,
298 sirfport
->uart_reg
->uart_type
) |
299 uint_en
->sirfsoc_rx_done_en
));
301 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
,
302 SIRFUART_RX_DMA_INT_EN(uint_en
,
303 sirfport
->uart_reg
->uart_type
)|
304 uint_en
->sirfsoc_rx_done_en
);
305 dmaengine_terminate_all(sirfport
->rx_dma_chan
);
307 if (!sirfport
->is_atlas7
)
308 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
309 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)&
310 ~(SIRFUART_RX_IO_INT_EN(uint_en
,
311 sirfport
->uart_reg
->uart_type
)));
313 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
,
314 SIRFUART_RX_IO_INT_EN(uint_en
,
315 sirfport
->uart_reg
->uart_type
));
319 static void sirfsoc_uart_disable_ms(struct uart_port
*port
)
321 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
322 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
323 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
325 if (!sirfport
->hw_flow_ctrl
)
327 sirfport
->ms_enabled
= false;
328 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
329 wr_regl(port
, ureg
->sirfsoc_afc_ctrl
,
330 rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) & ~0x3FF);
331 if (!sirfport
->is_atlas7
)
332 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
333 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)&
334 ~uint_en
->sirfsoc_cts_en
);
336 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
,
337 uint_en
->sirfsoc_cts_en
);
339 disable_irq(gpio_to_irq(sirfport
->cts_gpio
));
342 static irqreturn_t
sirfsoc_uart_usp_cts_handler(int irq
, void *dev_id
)
344 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)dev_id
;
345 struct uart_port
*port
= &sirfport
->port
;
346 spin_lock(&port
->lock
);
347 if (gpio_is_valid(sirfport
->cts_gpio
) && sirfport
->ms_enabled
)
348 uart_handle_cts_change(port
,
349 !gpio_get_value(sirfport
->cts_gpio
));
350 spin_unlock(&port
->lock
);
354 static void sirfsoc_uart_enable_ms(struct uart_port
*port
)
356 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
357 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
358 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
360 if (!sirfport
->hw_flow_ctrl
)
362 sirfport
->ms_enabled
= true;
363 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
364 wr_regl(port
, ureg
->sirfsoc_afc_ctrl
,
365 rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) |
366 SIRFUART_AFC_TX_EN
| SIRFUART_AFC_RX_EN
|
367 SIRFUART_AFC_CTRL_RX_THD
);
368 if (!sirfport
->is_atlas7
)
369 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
370 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)
371 | uint_en
->sirfsoc_cts_en
);
373 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
374 uint_en
->sirfsoc_cts_en
);
376 enable_irq(gpio_to_irq(sirfport
->cts_gpio
));
379 static void sirfsoc_uart_break_ctl(struct uart_port
*port
, int break_state
)
381 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
382 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
383 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
384 unsigned long ulcon
= rd_regl(port
, ureg
->sirfsoc_line_ctrl
);
386 ulcon
|= SIRFUART_SET_BREAK
;
388 ulcon
&= ~SIRFUART_SET_BREAK
;
389 wr_regl(port
, ureg
->sirfsoc_line_ctrl
, ulcon
);
394 sirfsoc_uart_pio_rx_chars(struct uart_port
*port
, unsigned int max_rx_count
)
396 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
397 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
398 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
399 unsigned int ch
, rx_count
= 0;
400 struct tty_struct
*tty
;
401 tty
= tty_port_tty_get(&port
->state
->port
);
404 while (!(rd_regl(port
, ureg
->sirfsoc_rx_fifo_status
) &
405 ufifo_st
->ff_empty(port
))) {
406 ch
= rd_regl(port
, ureg
->sirfsoc_rx_fifo_data
) |
408 if (unlikely(uart_handle_sysrq_char(port
, ch
)))
410 uart_insert_char(port
, 0, 0, ch
, TTY_NORMAL
);
412 if (rx_count
>= max_rx_count
)
416 sirfport
->rx_io_count
+= rx_count
;
417 port
->icount
.rx
+= rx_count
;
423 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port
*sirfport
, int count
)
425 struct uart_port
*port
= &sirfport
->port
;
426 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
427 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
428 struct circ_buf
*xmit
= &port
->state
->xmit
;
429 unsigned int num_tx
= 0;
430 while (!uart_circ_empty(xmit
) &&
431 !(rd_regl(port
, ureg
->sirfsoc_tx_fifo_status
) &
432 ufifo_st
->ff_full(port
)) &&
434 wr_regl(port
, ureg
->sirfsoc_tx_fifo_data
,
435 xmit
->buf
[xmit
->tail
]);
436 xmit
->tail
= (xmit
->tail
+ 1) & (UART_XMIT_SIZE
- 1);
440 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
441 uart_write_wakeup(port
);
445 static void sirfsoc_uart_tx_dma_complete_callback(void *param
)
447 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)param
;
448 struct uart_port
*port
= &sirfport
->port
;
449 struct circ_buf
*xmit
= &port
->state
->xmit
;
452 spin_lock_irqsave(&port
->lock
, flags
);
453 xmit
->tail
= (xmit
->tail
+ sirfport
->transfer_size
) &
454 (UART_XMIT_SIZE
- 1);
455 port
->icount
.tx
+= sirfport
->transfer_size
;
456 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
457 uart_write_wakeup(port
);
458 if (sirfport
->tx_dma_addr
)
459 dma_unmap_single(port
->dev
, sirfport
->tx_dma_addr
,
460 sirfport
->transfer_size
, DMA_TO_DEVICE
);
461 sirfport
->tx_dma_state
= TX_DMA_IDLE
;
462 sirfsoc_uart_tx_with_dma(sirfport
);
463 spin_unlock_irqrestore(&port
->lock
, flags
);
466 static irqreturn_t
sirfsoc_uart_isr(int irq
, void *dev_id
)
468 unsigned long intr_status
;
469 unsigned long cts_status
;
470 unsigned long flag
= TTY_NORMAL
;
471 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)dev_id
;
472 struct uart_port
*port
= &sirfport
->port
;
473 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
474 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
475 struct sirfsoc_int_status
*uint_st
= &sirfport
->uart_reg
->uart_int_st
;
476 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
477 struct uart_state
*state
= port
->state
;
478 struct circ_buf
*xmit
= &port
->state
->xmit
;
479 spin_lock(&port
->lock
);
480 intr_status
= rd_regl(port
, ureg
->sirfsoc_int_st_reg
);
481 wr_regl(port
, ureg
->sirfsoc_int_st_reg
, intr_status
);
482 intr_status
&= rd_regl(port
, ureg
->sirfsoc_int_en_reg
);
483 if (unlikely(intr_status
& (SIRFUART_ERR_INT_STAT(uint_st
,
484 sirfport
->uart_reg
->uart_type
)))) {
485 if (intr_status
& uint_st
->sirfsoc_rxd_brk
) {
487 if (uart_handle_break(port
))
490 if (intr_status
& uint_st
->sirfsoc_rx_oflow
) {
491 port
->icount
.overrun
++;
494 if (intr_status
& uint_st
->sirfsoc_frm_err
) {
495 port
->icount
.frame
++;
498 if (intr_status
& uint_st
->sirfsoc_parity_err
) {
499 port
->icount
.parity
++;
502 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_RESET
);
503 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
504 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_START
);
505 intr_status
&= port
->read_status_mask
;
506 uart_insert_char(port
, intr_status
,
507 uint_en
->sirfsoc_rx_oflow_en
, 0, flag
);
510 if ((sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) &&
511 (intr_status
& SIRFUART_CTS_INT_ST(uint_st
)) &&
512 !sirfport
->tx_dma_state
) {
513 cts_status
= rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) &
514 SIRFUART_AFC_CTS_STATUS
;
519 uart_handle_cts_change(port
, cts_status
);
520 wake_up_interruptible(&state
->port
.delta_msr_wait
);
522 if (!sirfport
->rx_dma_chan
&&
523 (intr_status
& SIRFUART_RX_IO_INT_ST(uint_st
))) {
525 * chip will trigger continuous RX_TIMEOUT interrupt
526 * in RXFIFO empty and not trigger if RXFIFO recevice
527 * data in limit time, original method use RX_TIMEOUT
528 * will trigger lots of useless interrupt in RXFIFO
529 * empty.RXFIFO received one byte will trigger RX_DONE
530 * interrupt.use RX_DONE to wait for data received
531 * into RXFIFO, use RX_THD/RX_FULL for lots data receive
532 * and use RX_TIMEOUT for the last left data.
534 if (intr_status
& uint_st
->sirfsoc_rx_done
) {
535 if (!sirfport
->is_atlas7
) {
536 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
537 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)
538 & ~(uint_en
->sirfsoc_rx_done_en
));
539 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
540 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)
541 | (uint_en
->sirfsoc_rx_timeout_en
));
543 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
,
544 uint_en
->sirfsoc_rx_done_en
);
545 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
546 uint_en
->sirfsoc_rx_timeout_en
);
549 if (intr_status
& uint_st
->sirfsoc_rx_timeout
) {
550 if (!sirfport
->is_atlas7
) {
551 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
552 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)
553 & ~(uint_en
->sirfsoc_rx_timeout_en
));
554 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
555 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)
556 | (uint_en
->sirfsoc_rx_done_en
));
559 ureg
->sirfsoc_int_en_clr_reg
,
560 uint_en
->sirfsoc_rx_timeout_en
);
561 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
562 uint_en
->sirfsoc_rx_done_en
);
565 sirfsoc_uart_pio_rx_chars(port
, port
->fifosize
);
568 spin_unlock(&port
->lock
);
569 tty_flip_buffer_push(&state
->port
);
570 spin_lock(&port
->lock
);
571 if (intr_status
& uint_st
->sirfsoc_txfifo_empty
) {
572 if (sirfport
->tx_dma_chan
)
573 sirfsoc_uart_tx_with_dma(sirfport
);
575 if (uart_circ_empty(xmit
) || uart_tx_stopped(port
)) {
576 spin_unlock(&port
->lock
);
579 sirfsoc_uart_pio_tx_chars(sirfport
,
581 if ((uart_circ_empty(xmit
)) &&
582 (rd_regl(port
, ureg
->sirfsoc_tx_fifo_status
) &
583 ufifo_st
->ff_empty(port
)))
584 sirfsoc_uart_stop_tx(port
);
588 spin_unlock(&port
->lock
);
593 static void sirfsoc_uart_rx_dma_complete_callback(void *param
)
597 /* submit rx dma task into dmaengine */
598 static void sirfsoc_uart_start_next_rx_dma(struct uart_port
*port
)
600 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
601 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
602 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
603 sirfport
->rx_io_count
= 0;
604 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
605 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) &
607 sirfport
->rx_dma_items
.xmit
.tail
=
608 sirfport
->rx_dma_items
.xmit
.head
= 0;
609 sirfport
->rx_dma_items
.desc
=
610 dmaengine_prep_dma_cyclic(sirfport
->rx_dma_chan
,
611 sirfport
->rx_dma_items
.dma_addr
, SIRFSOC_RX_DMA_BUF_SIZE
,
612 SIRFSOC_RX_DMA_BUF_SIZE
/ 2,
613 DMA_DEV_TO_MEM
, DMA_PREP_INTERRUPT
);
614 if (IS_ERR_OR_NULL(sirfport
->rx_dma_items
.desc
)) {
615 dev_err(port
->dev
, "DMA slave single fail\n");
618 sirfport
->rx_dma_items
.desc
->callback
=
619 sirfsoc_uart_rx_dma_complete_callback
;
620 sirfport
->rx_dma_items
.desc
->callback_param
= sirfport
;
621 sirfport
->rx_dma_items
.cookie
=
622 dmaengine_submit(sirfport
->rx_dma_items
.desc
);
623 dma_async_issue_pending(sirfport
->rx_dma_chan
);
624 if (!sirfport
->is_atlas7
)
625 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
626 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) |
627 SIRFUART_RX_DMA_INT_EN(uint_en
,
628 sirfport
->uart_reg
->uart_type
));
630 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
631 SIRFUART_RX_DMA_INT_EN(uint_en
,
632 sirfport
->uart_reg
->uart_type
));
635 static void sirfsoc_uart_start_rx(struct uart_port
*port
)
637 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
638 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
639 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
641 sirfport
->rx_io_count
= 0;
642 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_RESET
);
643 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
644 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_START
);
645 if (sirfport
->rx_dma_chan
)
646 sirfsoc_uart_start_next_rx_dma(port
);
648 if (!sirfport
->is_atlas7
)
649 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
650 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) |
651 SIRFUART_RX_IO_INT_EN(uint_en
,
652 sirfport
->uart_reg
->uart_type
));
654 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
655 SIRFUART_RX_IO_INT_EN(uint_en
,
656 sirfport
->uart_reg
->uart_type
));
661 sirfsoc_usp_calc_sample_div(unsigned long set_rate
,
662 unsigned long ioclk_rate
, unsigned long *sample_reg
)
664 unsigned long min_delta
= ~0UL;
665 unsigned short sample_div
;
666 unsigned long ioclk_div
= 0;
667 unsigned long temp_delta
;
669 for (sample_div
= SIRF_USP_MIN_SAMPLE_DIV
;
670 sample_div
<= SIRF_MAX_SAMPLE_DIV
; sample_div
++) {
671 temp_delta
= ioclk_rate
-
672 (ioclk_rate
+ (set_rate
* sample_div
) / 2)
673 / (set_rate
* sample_div
) * set_rate
* sample_div
;
675 temp_delta
= (temp_delta
> 0) ? temp_delta
: -temp_delta
;
676 if (temp_delta
< min_delta
) {
677 ioclk_div
= (2 * ioclk_rate
/
678 (set_rate
* sample_div
) + 1) / 2 - 1;
679 if (ioclk_div
> SIRF_IOCLK_DIV_MAX
)
681 min_delta
= temp_delta
;
682 *sample_reg
= sample_div
;
691 sirfsoc_uart_calc_sample_div(unsigned long baud_rate
,
692 unsigned long ioclk_rate
, unsigned long *set_baud
)
694 unsigned long min_delta
= ~0UL;
695 unsigned short sample_div
;
696 unsigned int regv
= 0;
697 unsigned long ioclk_div
;
698 unsigned long baud_tmp
;
701 for (sample_div
= SIRF_MIN_SAMPLE_DIV
;
702 sample_div
<= SIRF_MAX_SAMPLE_DIV
; sample_div
++) {
703 ioclk_div
= (ioclk_rate
/ (baud_rate
* (sample_div
+ 1))) - 1;
704 if (ioclk_div
> SIRF_IOCLK_DIV_MAX
)
706 baud_tmp
= ioclk_rate
/ ((ioclk_div
+ 1) * (sample_div
+ 1));
707 temp_delta
= baud_tmp
- baud_rate
;
708 temp_delta
= (temp_delta
> 0) ? temp_delta
: -temp_delta
;
709 if (temp_delta
< min_delta
) {
710 regv
= regv
& (~SIRF_IOCLK_DIV_MASK
);
711 regv
= regv
| ioclk_div
;
712 regv
= regv
& (~SIRF_SAMPLE_DIV_MASK
);
713 regv
= regv
| (sample_div
<< SIRF_SAMPLE_DIV_SHIFT
);
714 min_delta
= temp_delta
;
715 *set_baud
= baud_tmp
;
721 static void sirfsoc_uart_set_termios(struct uart_port
*port
,
722 struct ktermios
*termios
,
723 struct ktermios
*old
)
725 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
726 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
727 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
728 unsigned long config_reg
= 0;
729 unsigned long baud_rate
;
730 unsigned long set_baud
;
733 unsigned int clk_div_reg
= 0;
734 unsigned long txfifo_op_reg
, ioclk_rate
;
735 unsigned long rx_time_out
;
737 u32 data_bit_len
, stop_bit_len
, len_val
;
738 unsigned long sample_div_reg
= 0xf;
739 ioclk_rate
= port
->uartclk
;
741 switch (termios
->c_cflag
& CSIZE
) {
745 config_reg
|= SIRFUART_DATA_BIT_LEN_8
;
749 config_reg
|= SIRFUART_DATA_BIT_LEN_7
;
753 config_reg
|= SIRFUART_DATA_BIT_LEN_6
;
757 config_reg
|= SIRFUART_DATA_BIT_LEN_5
;
760 if (termios
->c_cflag
& CSTOPB
) {
761 config_reg
|= SIRFUART_STOP_BIT_LEN_2
;
766 spin_lock_irqsave(&port
->lock
, flags
);
767 port
->read_status_mask
= uint_en
->sirfsoc_rx_oflow_en
;
768 port
->ignore_status_mask
= 0;
769 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
770 if (termios
->c_iflag
& INPCK
)
771 port
->read_status_mask
|= uint_en
->sirfsoc_frm_err_en
|
772 uint_en
->sirfsoc_parity_err_en
;
774 if (termios
->c_iflag
& INPCK
)
775 port
->read_status_mask
|= uint_en
->sirfsoc_frm_err_en
;
777 if (termios
->c_iflag
& (IGNBRK
| BRKINT
| PARMRK
))
778 port
->read_status_mask
|= uint_en
->sirfsoc_rxd_brk_en
;
779 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
780 if (termios
->c_iflag
& IGNPAR
)
781 port
->ignore_status_mask
|=
782 uint_en
->sirfsoc_frm_err_en
|
783 uint_en
->sirfsoc_parity_err_en
;
784 if (termios
->c_cflag
& PARENB
) {
785 if (termios
->c_cflag
& CMSPAR
) {
786 if (termios
->c_cflag
& PARODD
)
787 config_reg
|= SIRFUART_STICK_BIT_MARK
;
789 config_reg
|= SIRFUART_STICK_BIT_SPACE
;
791 if (termios
->c_cflag
& PARODD
)
792 config_reg
|= SIRFUART_STICK_BIT_ODD
;
794 config_reg
|= SIRFUART_STICK_BIT_EVEN
;
798 if (termios
->c_iflag
& IGNPAR
)
799 port
->ignore_status_mask
|=
800 uint_en
->sirfsoc_frm_err_en
;
801 if (termios
->c_cflag
& PARENB
)
803 "USP-UART not support parity err\n");
805 if (termios
->c_iflag
& IGNBRK
) {
806 port
->ignore_status_mask
|=
807 uint_en
->sirfsoc_rxd_brk_en
;
808 if (termios
->c_iflag
& IGNPAR
)
809 port
->ignore_status_mask
|=
810 uint_en
->sirfsoc_rx_oflow_en
;
812 if ((termios
->c_cflag
& CREAD
) == 0)
813 port
->ignore_status_mask
|= SIRFUART_DUMMY_READ
;
814 /* Hardware Flow Control Settings */
815 if (UART_ENABLE_MS(port
, termios
->c_cflag
)) {
816 if (!sirfport
->ms_enabled
)
817 sirfsoc_uart_enable_ms(port
);
819 if (sirfport
->ms_enabled
)
820 sirfsoc_uart_disable_ms(port
);
822 baud_rate
= uart_get_baud_rate(port
, termios
, old
, 0, 4000000);
823 if (ioclk_rate
== 150000000) {
824 for (ic
= 0; ic
< SIRF_BAUD_RATE_SUPPORT_NR
; ic
++)
825 if (baud_rate
== baudrate_to_regv
[ic
].baud_rate
)
826 clk_div_reg
= baudrate_to_regv
[ic
].reg_val
;
828 set_baud
= baud_rate
;
829 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
830 if (unlikely(clk_div_reg
== 0))
831 clk_div_reg
= sirfsoc_uart_calc_sample_div(baud_rate
,
832 ioclk_rate
, &set_baud
);
833 wr_regl(port
, ureg
->sirfsoc_divisor
, clk_div_reg
);
835 clk_div_reg
= sirfsoc_usp_calc_sample_div(baud_rate
,
836 ioclk_rate
, &sample_div_reg
);
838 set_baud
= ((ioclk_rate
/ (clk_div_reg
+1) - 1) /
839 (sample_div_reg
+ 1));
840 /* setting usp mode 2 */
841 len_val
= ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET
) |
842 (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET
));
843 len_val
|= ((clk_div_reg
& SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK
)
844 << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET
);
845 wr_regl(port
, ureg
->sirfsoc_mode2
, len_val
);
847 if (tty_termios_baud_rate(termios
))
848 tty_termios_encode_baud_rate(termios
, set_baud
, set_baud
);
849 /* set receive timeout && data bits len */
850 rx_time_out
= SIRFSOC_UART_RX_TIMEOUT(set_baud
, 20000);
851 rx_time_out
= SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out
);
852 txfifo_op_reg
= rd_regl(port
, ureg
->sirfsoc_tx_fifo_op
);
853 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_STOP
);
854 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
,
855 (txfifo_op_reg
& ~SIRFUART_FIFO_START
));
856 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
857 config_reg
|= SIRFUART_UART_RECV_TIMEOUT(rx_time_out
);
858 wr_regl(port
, ureg
->sirfsoc_line_ctrl
, config_reg
);
861 len_val
= (data_bit_len
- 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET
;
862 len_val
|= (data_bit_len
+ 1 + stop_bit_len
- 1) <<
863 SIRFSOC_USP_TX_FRAME_LEN_OFFSET
;
864 len_val
|= ((data_bit_len
- 1) <<
865 SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET
);
866 len_val
|= (((clk_div_reg
& 0xc00) >> 10) <<
867 SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET
);
868 wr_regl(port
, ureg
->sirfsoc_tx_frame_ctrl
, len_val
);
870 len_val
= (data_bit_len
- 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET
;
871 len_val
|= (data_bit_len
+ 1 + stop_bit_len
- 1) <<
872 SIRFSOC_USP_RX_FRAME_LEN_OFFSET
;
873 len_val
|= (data_bit_len
- 1) <<
874 SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET
;
875 len_val
|= (((clk_div_reg
& 0xf000) >> 12) <<
876 SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET
);
877 wr_regl(port
, ureg
->sirfsoc_rx_frame_ctrl
, len_val
);
879 wr_regl(port
, ureg
->sirfsoc_async_param_reg
,
880 (SIRFUART_USP_RECV_TIMEOUT(rx_time_out
)) |
881 (sample_div_reg
& SIRFSOC_USP_ASYNC_DIV2_MASK
) <<
882 SIRFSOC_USP_ASYNC_DIV2_OFFSET
);
884 if (sirfport
->tx_dma_chan
)
885 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
, SIRFUART_DMA_MODE
);
887 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
, SIRFUART_IO_MODE
);
888 if (sirfport
->rx_dma_chan
)
889 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
, SIRFUART_DMA_MODE
);
891 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
, SIRFUART_IO_MODE
);
892 sirfport
->rx_period_time
= 20000000;
893 /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
894 if (set_baud
< 1000000)
898 wr_regl(port
, ureg
->sirfsoc_tx_fifo_ctrl
,
899 SIRFUART_FIFO_THD(port
) / threshold_div
);
900 wr_regl(port
, ureg
->sirfsoc_rx_fifo_ctrl
,
901 SIRFUART_FIFO_THD(port
) / threshold_div
);
902 txfifo_op_reg
|= SIRFUART_FIFO_START
;
903 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, txfifo_op_reg
);
904 uart_update_timeout(port
, termios
->c_cflag
, set_baud
);
905 sirfsoc_uart_start_rx(port
);
906 wr_regl(port
, ureg
->sirfsoc_tx_rx_en
, SIRFUART_TX_EN
| SIRFUART_RX_EN
);
907 spin_unlock_irqrestore(&port
->lock
, flags
);
910 static void sirfsoc_uart_pm(struct uart_port
*port
, unsigned int state
,
911 unsigned int oldstate
)
913 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
915 clk_prepare_enable(sirfport
->clk
);
917 clk_disable_unprepare(sirfport
->clk
);
920 static int sirfsoc_uart_startup(struct uart_port
*port
)
922 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
923 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
924 unsigned int index
= port
->line
;
926 irq_modify_status(port
->irq
, IRQ_NOREQUEST
, IRQ_NOAUTOEN
);
927 ret
= request_irq(port
->irq
,
933 dev_err(port
->dev
, "UART%d request IRQ line (%d) failed.\n",
937 /* initial hardware settings */
938 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
,
939 rd_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
) |
941 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
942 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) |
944 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
945 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) &
946 ~SIRFUART_RX_DMA_FLUSH
);
947 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_len
, 0);
948 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_len
, 0);
949 wr_regl(port
, ureg
->sirfsoc_tx_rx_en
, SIRFUART_RX_EN
| SIRFUART_TX_EN
);
950 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
)
951 wr_regl(port
, ureg
->sirfsoc_mode1
,
952 SIRFSOC_USP_ENDIAN_CTRL_LSBF
|
954 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_RESET
);
955 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_RESET
);
956 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
957 wr_regl(port
, ureg
->sirfsoc_tx_fifo_ctrl
, SIRFUART_FIFO_THD(port
));
958 wr_regl(port
, ureg
->sirfsoc_rx_fifo_ctrl
, SIRFUART_FIFO_THD(port
));
959 if (sirfport
->rx_dma_chan
)
960 wr_regl(port
, ureg
->sirfsoc_rx_fifo_level_chk
,
961 SIRFUART_RX_FIFO_CHK_SC(port
->line
, 0x4) |
962 SIRFUART_RX_FIFO_CHK_LC(port
->line
, 0xe) |
963 SIRFUART_RX_FIFO_CHK_HC(port
->line
, 0x1b));
964 if (sirfport
->tx_dma_chan
) {
965 sirfport
->tx_dma_state
= TX_DMA_IDLE
;
966 wr_regl(port
, ureg
->sirfsoc_tx_fifo_level_chk
,
967 SIRFUART_TX_FIFO_CHK_SC(port
->line
, 0x1b) |
968 SIRFUART_TX_FIFO_CHK_LC(port
->line
, 0xe) |
969 SIRFUART_TX_FIFO_CHK_HC(port
->line
, 0x4));
971 sirfport
->ms_enabled
= false;
972 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
&&
973 sirfport
->hw_flow_ctrl
) {
974 irq_modify_status(gpio_to_irq(sirfport
->cts_gpio
),
975 IRQ_NOREQUEST
, IRQ_NOAUTOEN
);
976 ret
= request_irq(gpio_to_irq(sirfport
->cts_gpio
),
977 sirfsoc_uart_usp_cts_handler
, IRQF_TRIGGER_FALLING
|
978 IRQF_TRIGGER_RISING
, "usp_cts_irq", sirfport
);
980 dev_err(port
->dev
, "UART-USP:request gpio irq fail\n");
984 enable_irq(port
->irq
);
985 if (sirfport
->rx_dma_chan
&& !sirfport
->is_hrt_enabled
) {
986 sirfport
->is_hrt_enabled
= true;
987 sirfport
->rx_period_time
= 20000000;
988 sirfport
->rx_dma_items
.xmit
.tail
=
989 sirfport
->rx_dma_items
.xmit
.head
= 0;
990 hrtimer_start(&sirfport
->hrt
,
991 ns_to_ktime(sirfport
->rx_period_time
),
997 free_irq(port
->irq
, sirfport
);
1002 static void sirfsoc_uart_shutdown(struct uart_port
*port
)
1004 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1005 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
1006 if (!sirfport
->is_atlas7
)
1007 wr_regl(port
, ureg
->sirfsoc_int_en_reg
, 0);
1009 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
, ~0UL);
1011 free_irq(port
->irq
, sirfport
);
1012 if (sirfport
->ms_enabled
)
1013 sirfsoc_uart_disable_ms(port
);
1014 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
&&
1015 sirfport
->hw_flow_ctrl
) {
1016 gpio_set_value(sirfport
->rts_gpio
, 1);
1017 free_irq(gpio_to_irq(sirfport
->cts_gpio
), sirfport
);
1019 if (sirfport
->tx_dma_chan
)
1020 sirfport
->tx_dma_state
= TX_DMA_IDLE
;
1021 if (sirfport
->rx_dma_chan
&& sirfport
->is_hrt_enabled
) {
1022 while ((rd_regl(port
, ureg
->sirfsoc_rx_fifo_status
) &
1023 SIRFUART_RX_FIFO_MASK
) > 0)
1025 sirfport
->is_hrt_enabled
= false;
1026 hrtimer_cancel(&sirfport
->hrt
);
1030 static const char *sirfsoc_uart_type(struct uart_port
*port
)
1032 return port
->type
== SIRFSOC_PORT_TYPE
? SIRFUART_PORT_NAME
: NULL
;
1035 static int sirfsoc_uart_request_port(struct uart_port
*port
)
1037 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1038 struct sirfsoc_uart_param
*uart_param
= &sirfport
->uart_reg
->uart_param
;
1040 ret
= request_mem_region(port
->mapbase
,
1041 SIRFUART_MAP_SIZE
, uart_param
->port_name
);
1042 return ret
? 0 : -EBUSY
;
1045 static void sirfsoc_uart_release_port(struct uart_port
*port
)
1047 release_mem_region(port
->mapbase
, SIRFUART_MAP_SIZE
);
1050 static void sirfsoc_uart_config_port(struct uart_port
*port
, int flags
)
1052 if (flags
& UART_CONFIG_TYPE
) {
1053 port
->type
= SIRFSOC_PORT_TYPE
;
1054 sirfsoc_uart_request_port(port
);
1058 static struct uart_ops sirfsoc_uart_ops
= {
1059 .tx_empty
= sirfsoc_uart_tx_empty
,
1060 .get_mctrl
= sirfsoc_uart_get_mctrl
,
1061 .set_mctrl
= sirfsoc_uart_set_mctrl
,
1062 .stop_tx
= sirfsoc_uart_stop_tx
,
1063 .start_tx
= sirfsoc_uart_start_tx
,
1064 .stop_rx
= sirfsoc_uart_stop_rx
,
1065 .enable_ms
= sirfsoc_uart_enable_ms
,
1066 .break_ctl
= sirfsoc_uart_break_ctl
,
1067 .startup
= sirfsoc_uart_startup
,
1068 .shutdown
= sirfsoc_uart_shutdown
,
1069 .set_termios
= sirfsoc_uart_set_termios
,
1070 .pm
= sirfsoc_uart_pm
,
1071 .type
= sirfsoc_uart_type
,
1072 .release_port
= sirfsoc_uart_release_port
,
1073 .request_port
= sirfsoc_uart_request_port
,
1074 .config_port
= sirfsoc_uart_config_port
,
1077 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1079 sirfsoc_uart_console_setup(struct console
*co
, char *options
)
1081 unsigned int baud
= 115200;
1082 unsigned int bits
= 8;
1083 unsigned int parity
= 'n';
1084 unsigned int flow
= 'n';
1085 struct sirfsoc_uart_port
*sirfport
;
1086 struct sirfsoc_register
*ureg
;
1087 if (co
->index
< 0 || co
->index
>= SIRFSOC_UART_NR
)
1089 sirfport
= sirf_ports
[co
->index
];
1092 ureg
= &sirfport
->uart_reg
->uart_reg
;
1093 if (!sirfport
->port
.mapbase
)
1096 /* enable usp in mode1 register */
1097 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
)
1098 wr_regl(&sirfport
->port
, ureg
->sirfsoc_mode1
, SIRFSOC_USP_EN
|
1099 SIRFSOC_USP_ENDIAN_CTRL_LSBF
);
1101 uart_parse_options(options
, &baud
, &parity
, &bits
, &flow
);
1102 sirfport
->port
.cons
= co
;
1104 /* default console tx/rx transfer using io mode */
1105 sirfport
->rx_dma_chan
= NULL
;
1106 sirfport
->tx_dma_chan
= NULL
;
1107 return uart_set_options(&sirfport
->port
, co
, baud
, parity
, bits
, flow
);
1110 static void sirfsoc_uart_console_putchar(struct uart_port
*port
, int ch
)
1112 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1113 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
1114 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
1115 while (rd_regl(port
, ureg
->sirfsoc_tx_fifo_status
) &
1116 ufifo_st
->ff_full(port
))
1118 wr_regl(port
, ureg
->sirfsoc_tx_fifo_data
, ch
);
1121 static void sirfsoc_uart_console_write(struct console
*co
, const char *s
,
1124 struct sirfsoc_uart_port
*sirfport
= sirf_ports
[co
->index
];
1126 uart_console_write(&sirfport
->port
, s
, count
,
1127 sirfsoc_uart_console_putchar
);
1130 static struct console sirfsoc_uart_console
= {
1131 .name
= SIRFSOC_UART_NAME
,
1132 .device
= uart_console_device
,
1133 .flags
= CON_PRINTBUFFER
,
1135 .write
= sirfsoc_uart_console_write
,
1136 .setup
= sirfsoc_uart_console_setup
,
1137 .data
= &sirfsoc_uart_drv
,
1140 static int __init
sirfsoc_uart_console_init(void)
1142 register_console(&sirfsoc_uart_console
);
1145 console_initcall(sirfsoc_uart_console_init
);
1148 static struct uart_driver sirfsoc_uart_drv
= {
1149 .owner
= THIS_MODULE
,
1150 .driver_name
= SIRFUART_PORT_NAME
,
1151 .nr
= SIRFSOC_UART_NR
,
1152 .dev_name
= SIRFSOC_UART_NAME
,
1153 .major
= SIRFSOC_UART_MAJOR
,
1154 .minor
= SIRFSOC_UART_MINOR
,
1155 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1156 .cons
= &sirfsoc_uart_console
,
1162 static enum hrtimer_restart
1163 sirfsoc_uart_rx_dma_hrtimer_callback(struct hrtimer
*hrt
)
1165 struct sirfsoc_uart_port
*sirfport
;
1166 struct uart_port
*port
;
1167 int count
, inserted
;
1168 struct dma_tx_state tx_state
;
1169 struct tty_struct
*tty
;
1170 struct sirfsoc_register
*ureg
;
1171 struct circ_buf
*xmit
;
1173 sirfport
= container_of(hrt
, struct sirfsoc_uart_port
, hrt
);
1174 port
= &sirfport
->port
;
1176 tty
= port
->state
->port
.tty
;
1177 ureg
= &sirfport
->uart_reg
->uart_reg
;
1178 xmit
= &sirfport
->rx_dma_items
.xmit
;
1179 dmaengine_tx_status(sirfport
->rx_dma_chan
,
1180 sirfport
->rx_dma_items
.cookie
, &tx_state
);
1181 xmit
->head
= SIRFSOC_RX_DMA_BUF_SIZE
- tx_state
.residue
;
1182 count
= CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
,
1183 SIRFSOC_RX_DMA_BUF_SIZE
);
1185 inserted
= tty_insert_flip_string(tty
->port
,
1186 (const unsigned char *)&xmit
->buf
[xmit
->tail
], count
);
1189 port
->icount
.rx
+= inserted
;
1190 xmit
->tail
= (xmit
->tail
+ inserted
) &
1191 (SIRFSOC_RX_DMA_BUF_SIZE
- 1);
1192 count
= CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
,
1193 SIRFSOC_RX_DMA_BUF_SIZE
);
1194 tty_flip_buffer_push(tty
->port
);
1197 * if RX DMA buffer data have all push into tty buffer, and there is
1198 * only little data(less than a dma transfer unit) left in rxfifo,
1199 * fetch it out in pio mode and switch back to dma immediately
1201 if (!inserted
&& !count
&&
1202 ((rd_regl(port
, ureg
->sirfsoc_rx_fifo_status
) &
1203 SIRFUART_RX_FIFO_MASK
) > 0)) {
1204 /* switch to pio mode */
1205 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
1206 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) |
1208 while ((rd_regl(port
, ureg
->sirfsoc_rx_fifo_status
) &
1209 SIRFUART_RX_FIFO_MASK
) > 0) {
1210 if (sirfsoc_uart_pio_rx_chars(port
, 16) > 0)
1211 tty_flip_buffer_push(tty
->port
);
1213 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_RESET
);
1214 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
1215 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_START
);
1216 /* switch back to dma mode */
1217 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
1218 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) &
1222 hrtimer_forward_now(hrt
, ns_to_ktime(sirfport
->rx_period_time
));
1223 return HRTIMER_RESTART
;
1226 static struct of_device_id sirfsoc_uart_ids
[] = {
1227 { .compatible
= "sirf,prima2-uart", .data
= &sirfsoc_uart
,},
1228 { .compatible
= "sirf,atlas7-uart", .data
= &sirfsoc_uart
},
1229 { .compatible
= "sirf,prima2-usp-uart", .data
= &sirfsoc_usp
},
1230 { .compatible
= "sirf,atlas7-usp-uart", .data
= &sirfsoc_usp
},
1233 MODULE_DEVICE_TABLE(of
, sirfsoc_uart_ids
);
1235 static int sirfsoc_uart_probe(struct platform_device
*pdev
)
1237 struct sirfsoc_uart_port
*sirfport
;
1238 struct uart_port
*port
;
1239 struct resource
*res
;
1241 struct dma_slave_config slv_cfg
= {
1244 struct dma_slave_config tx_slv_cfg
= {
1247 const struct of_device_id
*match
;
1249 match
= of_match_node(sirfsoc_uart_ids
, pdev
->dev
.of_node
);
1250 sirfport
= devm_kzalloc(&pdev
->dev
, sizeof(*sirfport
), GFP_KERNEL
);
1255 sirfport
->port
.line
= of_alias_get_id(pdev
->dev
.of_node
, "serial");
1256 sirf_ports
[sirfport
->port
.line
] = sirfport
;
1257 sirfport
->port
.iotype
= UPIO_MEM
;
1258 sirfport
->port
.flags
= UPF_BOOT_AUTOCONF
;
1259 port
= &sirfport
->port
;
1260 port
->dev
= &pdev
->dev
;
1261 port
->private_data
= sirfport
;
1262 sirfport
->uart_reg
= (struct sirfsoc_uart_register
*)match
->data
;
1264 sirfport
->hw_flow_ctrl
= of_property_read_bool(pdev
->dev
.of_node
,
1265 "sirf,uart-has-rtscts");
1266 if (of_device_is_compatible(pdev
->dev
.of_node
, "sirf,prima2-uart") ||
1267 of_device_is_compatible(pdev
->dev
.of_node
, "sirf,atlas7-uart"))
1268 sirfport
->uart_reg
->uart_type
= SIRF_REAL_UART
;
1269 if (of_device_is_compatible(pdev
->dev
.of_node
,
1270 "sirf,prima2-usp-uart") || of_device_is_compatible(
1271 pdev
->dev
.of_node
, "sirf,atlas7-usp-uart")) {
1272 sirfport
->uart_reg
->uart_type
= SIRF_USP_UART
;
1273 if (!sirfport
->hw_flow_ctrl
)
1274 goto usp_no_flow_control
;
1275 if (of_find_property(pdev
->dev
.of_node
, "cts-gpios", NULL
))
1276 sirfport
->cts_gpio
= of_get_named_gpio(
1277 pdev
->dev
.of_node
, "cts-gpios", 0);
1279 sirfport
->cts_gpio
= -1;
1280 if (of_find_property(pdev
->dev
.of_node
, "rts-gpios", NULL
))
1281 sirfport
->rts_gpio
= of_get_named_gpio(
1282 pdev
->dev
.of_node
, "rts-gpios", 0);
1284 sirfport
->rts_gpio
= -1;
1286 if ((!gpio_is_valid(sirfport
->cts_gpio
) ||
1287 !gpio_is_valid(sirfport
->rts_gpio
))) {
1290 "Usp flow control must have cts and rts gpio");
1293 ret
= devm_gpio_request(&pdev
->dev
, sirfport
->cts_gpio
,
1296 dev_err(&pdev
->dev
, "Unable request cts gpio");
1299 gpio_direction_input(sirfport
->cts_gpio
);
1300 ret
= devm_gpio_request(&pdev
->dev
, sirfport
->rts_gpio
,
1303 dev_err(&pdev
->dev
, "Unable request rts gpio");
1306 gpio_direction_output(sirfport
->rts_gpio
, 1);
1308 usp_no_flow_control
:
1309 if (of_device_is_compatible(pdev
->dev
.of_node
, "sirf,atlas7-uart") ||
1310 of_device_is_compatible(pdev
->dev
.of_node
, "sirf,atlas7-usp-uart"))
1311 sirfport
->is_atlas7
= true;
1313 if (of_property_read_u32(pdev
->dev
.of_node
,
1317 "Unable to find fifosize in uart node.\n");
1322 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1324 dev_err(&pdev
->dev
, "Insufficient resources.\n");
1328 port
->mapbase
= res
->start
;
1329 port
->membase
= devm_ioremap(&pdev
->dev
,
1330 res
->start
, resource_size(res
));
1331 if (!port
->membase
) {
1332 dev_err(&pdev
->dev
, "Cannot remap resource.\n");
1336 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
1338 dev_err(&pdev
->dev
, "Insufficient resources.\n");
1342 port
->irq
= res
->start
;
1344 sirfport
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1345 if (IS_ERR(sirfport
->clk
)) {
1346 ret
= PTR_ERR(sirfport
->clk
);
1349 port
->uartclk
= clk_get_rate(sirfport
->clk
);
1351 port
->ops
= &sirfsoc_uart_ops
;
1352 spin_lock_init(&port
->lock
);
1354 platform_set_drvdata(pdev
, sirfport
);
1355 ret
= uart_add_one_port(&sirfsoc_uart_drv
, port
);
1357 dev_err(&pdev
->dev
, "Cannot add UART port(%d).\n", pdev
->id
);
1361 sirfport
->rx_dma_chan
= dma_request_slave_channel(port
->dev
, "rx");
1362 sirfport
->rx_dma_items
.xmit
.buf
=
1363 dma_alloc_coherent(port
->dev
, SIRFSOC_RX_DMA_BUF_SIZE
,
1364 &sirfport
->rx_dma_items
.dma_addr
, GFP_KERNEL
);
1365 if (!sirfport
->rx_dma_items
.xmit
.buf
) {
1366 dev_err(port
->dev
, "Uart alloc bufa failed\n");
1368 goto alloc_coherent_err
;
1370 sirfport
->rx_dma_items
.xmit
.head
=
1371 sirfport
->rx_dma_items
.xmit
.tail
= 0;
1372 if (sirfport
->rx_dma_chan
)
1373 dmaengine_slave_config(sirfport
->rx_dma_chan
, &slv_cfg
);
1374 sirfport
->tx_dma_chan
= dma_request_slave_channel(port
->dev
, "tx");
1375 if (sirfport
->tx_dma_chan
)
1376 dmaengine_slave_config(sirfport
->tx_dma_chan
, &tx_slv_cfg
);
1377 if (sirfport
->rx_dma_chan
) {
1378 hrtimer_init(&sirfport
->hrt
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1379 sirfport
->hrt
.function
= sirfsoc_uart_rx_dma_hrtimer_callback
;
1380 sirfport
->is_hrt_enabled
= false;
1385 dma_free_coherent(port
->dev
, SIRFSOC_RX_DMA_BUF_SIZE
,
1386 sirfport
->rx_dma_items
.xmit
.buf
,
1387 sirfport
->rx_dma_items
.dma_addr
);
1388 dma_release_channel(sirfport
->rx_dma_chan
);
1393 static int sirfsoc_uart_remove(struct platform_device
*pdev
)
1395 struct sirfsoc_uart_port
*sirfport
= platform_get_drvdata(pdev
);
1396 struct uart_port
*port
= &sirfport
->port
;
1397 uart_remove_one_port(&sirfsoc_uart_drv
, port
);
1398 if (sirfport
->rx_dma_chan
) {
1399 dmaengine_terminate_all(sirfport
->rx_dma_chan
);
1400 dma_release_channel(sirfport
->rx_dma_chan
);
1401 dma_free_coherent(port
->dev
, SIRFSOC_RX_DMA_BUF_SIZE
,
1402 sirfport
->rx_dma_items
.xmit
.buf
,
1403 sirfport
->rx_dma_items
.dma_addr
);
1405 if (sirfport
->tx_dma_chan
) {
1406 dmaengine_terminate_all(sirfport
->tx_dma_chan
);
1407 dma_release_channel(sirfport
->tx_dma_chan
);
1412 #ifdef CONFIG_PM_SLEEP
1414 sirfsoc_uart_suspend(struct device
*pdev
)
1416 struct sirfsoc_uart_port
*sirfport
= dev_get_drvdata(pdev
);
1417 struct uart_port
*port
= &sirfport
->port
;
1418 uart_suspend_port(&sirfsoc_uart_drv
, port
);
1422 static int sirfsoc_uart_resume(struct device
*pdev
)
1424 struct sirfsoc_uart_port
*sirfport
= dev_get_drvdata(pdev
);
1425 struct uart_port
*port
= &sirfport
->port
;
1426 uart_resume_port(&sirfsoc_uart_drv
, port
);
1431 static const struct dev_pm_ops sirfsoc_uart_pm_ops
= {
1432 SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend
, sirfsoc_uart_resume
)
1435 static struct platform_driver sirfsoc_uart_driver
= {
1436 .probe
= sirfsoc_uart_probe
,
1437 .remove
= sirfsoc_uart_remove
,
1439 .name
= SIRFUART_PORT_NAME
,
1440 .of_match_table
= sirfsoc_uart_ids
,
1441 .pm
= &sirfsoc_uart_pm_ops
,
1445 static int __init
sirfsoc_uart_init(void)
1449 ret
= uart_register_driver(&sirfsoc_uart_drv
);
1453 ret
= platform_driver_register(&sirfsoc_uart_driver
);
1455 uart_unregister_driver(&sirfsoc_uart_drv
);
1459 module_init(sirfsoc_uart_init
);
1461 static void __exit
sirfsoc_uart_exit(void)
1463 platform_driver_unregister(&sirfsoc_uart_driver
);
1464 uart_unregister_driver(&sirfsoc_uart_drv
);
1466 module_exit(sirfsoc_uart_exit
);
1468 MODULE_LICENSE("GPL v2");
1469 MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
1470 MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");