1 // SPDX-License-Identifier: GPL-2.0+
3 * Driver for CSR SiRFprimaII onboard UARTs.
5 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
8 #include <linux/module.h>
9 #include <linux/ioport.h>
10 #include <linux/platform_device.h>
11 #include <linux/init.h>
12 #include <linux/sysrq.h>
13 #include <linux/console.h>
14 #include <linux/tty.h>
15 #include <linux/tty_flip.h>
16 #include <linux/serial_core.h>
17 #include <linux/serial.h>
18 #include <linux/clk.h>
20 #include <linux/slab.h>
22 #include <linux/of_gpio.h>
23 #include <linux/dmaengine.h>
24 #include <linux/dma-direction.h>
25 #include <linux/dma-mapping.h>
27 #include <asm/mach/irq.h>
29 #include "sirfsoc_uart.h"
32 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port
*sirfport
, int count
);
34 sirfsoc_uart_pio_rx_chars(struct uart_port
*port
, unsigned int max_rx_count
);
35 static struct uart_driver sirfsoc_uart_drv
;
37 static void sirfsoc_uart_tx_dma_complete_callback(void *param
);
38 static const struct sirfsoc_baudrate_to_regv baudrate_to_regv
[] = {
59 static struct sirfsoc_uart_port
*sirf_ports
[SIRFSOC_UART_NR
];
61 static inline struct sirfsoc_uart_port
*to_sirfport(struct uart_port
*port
)
63 return container_of(port
, struct sirfsoc_uart_port
, port
);
66 static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port
*port
)
69 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
70 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
71 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
72 reg
= rd_regl(port
, ureg
->sirfsoc_tx_fifo_status
);
73 return (reg
& ufifo_st
->ff_empty(port
)) ? TIOCSER_TEMT
: 0;
76 static unsigned int sirfsoc_uart_get_mctrl(struct uart_port
*port
)
78 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
79 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
80 if (!sirfport
->hw_flow_ctrl
|| !sirfport
->ms_enabled
)
82 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
83 if (!(rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) &
84 SIRFUART_AFC_CTS_STATUS
))
89 if (!gpio_get_value(sirfport
->cts_gpio
))
95 return TIOCM_CAR
| TIOCM_DSR
;
97 return TIOCM_CAR
| TIOCM_DSR
| TIOCM_CTS
;
100 static void sirfsoc_uart_set_mctrl(struct uart_port
*port
, unsigned int mctrl
)
102 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
103 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
104 unsigned int assert = mctrl
& TIOCM_RTS
;
105 unsigned int val
= assert ? SIRFUART_AFC_CTRL_RX_THD
: 0x0;
106 unsigned int current_val
;
108 if (mctrl
& TIOCM_LOOP
) {
109 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
)
110 wr_regl(port
, ureg
->sirfsoc_line_ctrl
,
111 rd_regl(port
, ureg
->sirfsoc_line_ctrl
) |
114 wr_regl(port
, ureg
->sirfsoc_mode1
,
115 rd_regl(port
, ureg
->sirfsoc_mode1
) |
116 SIRFSOC_USP_LOOP_BACK_CTRL
);
118 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
)
119 wr_regl(port
, ureg
->sirfsoc_line_ctrl
,
120 rd_regl(port
, ureg
->sirfsoc_line_ctrl
) &
121 ~SIRFUART_LOOP_BACK
);
123 wr_regl(port
, ureg
->sirfsoc_mode1
,
124 rd_regl(port
, ureg
->sirfsoc_mode1
) &
125 ~SIRFSOC_USP_LOOP_BACK_CTRL
);
128 if (!sirfport
->hw_flow_ctrl
|| !sirfport
->ms_enabled
)
130 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
131 current_val
= rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) & ~0xFF;
133 wr_regl(port
, ureg
->sirfsoc_afc_ctrl
, val
);
136 gpio_set_value(sirfport
->rts_gpio
, 1);
138 gpio_set_value(sirfport
->rts_gpio
, 0);
142 static void sirfsoc_uart_stop_tx(struct uart_port
*port
)
144 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
145 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
146 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
148 if (sirfport
->tx_dma_chan
) {
149 if (sirfport
->tx_dma_state
== TX_DMA_RUNNING
) {
150 dmaengine_pause(sirfport
->tx_dma_chan
);
151 sirfport
->tx_dma_state
= TX_DMA_PAUSE
;
153 if (!sirfport
->is_atlas7
)
154 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
155 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
156 ~uint_en
->sirfsoc_txfifo_empty_en
);
158 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
,
159 uint_en
->sirfsoc_txfifo_empty_en
);
162 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
)
163 wr_regl(port
, ureg
->sirfsoc_tx_rx_en
, rd_regl(port
,
164 ureg
->sirfsoc_tx_rx_en
) & ~SIRFUART_TX_EN
);
165 if (!sirfport
->is_atlas7
)
166 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
167 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
168 ~uint_en
->sirfsoc_txfifo_empty_en
);
170 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
,
171 uint_en
->sirfsoc_txfifo_empty_en
);
175 static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port
*sirfport
)
177 struct uart_port
*port
= &sirfport
->port
;
178 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
179 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
180 struct circ_buf
*xmit
= &port
->state
->xmit
;
181 unsigned long tran_size
;
182 unsigned long tran_start
;
183 unsigned long pio_tx_size
;
185 tran_size
= CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
);
186 tran_start
= (unsigned long)(xmit
->buf
+ xmit
->tail
);
187 if (uart_circ_empty(xmit
) || uart_tx_stopped(port
) ||
190 if (sirfport
->tx_dma_state
== TX_DMA_PAUSE
) {
191 dmaengine_resume(sirfport
->tx_dma_chan
);
194 if (sirfport
->tx_dma_state
== TX_DMA_RUNNING
)
196 if (!sirfport
->is_atlas7
)
197 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
198 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)&
199 ~(uint_en
->sirfsoc_txfifo_empty_en
));
201 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
,
202 uint_en
->sirfsoc_txfifo_empty_en
);
204 * DMA requires buffer address and buffer length are both aligned with
205 * 4 bytes, so we use PIO for
206 * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
207 * bytes, and move to DMA for the left part aligned with 4bytes
208 * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
209 * part first, move to PIO for the left 1~3 bytes
211 if (tran_size
< 4 || BYTES_TO_ALIGN(tran_start
)) {
212 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_STOP
);
213 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
,
214 rd_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
)|
216 if (BYTES_TO_ALIGN(tran_start
)) {
217 pio_tx_size
= sirfsoc_uart_pio_tx_chars(sirfport
,
218 BYTES_TO_ALIGN(tran_start
));
219 tran_size
-= pio_tx_size
;
222 sirfsoc_uart_pio_tx_chars(sirfport
, tran_size
);
223 if (!sirfport
->is_atlas7
)
224 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
225 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)|
226 uint_en
->sirfsoc_txfifo_empty_en
);
228 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
229 uint_en
->sirfsoc_txfifo_empty_en
);
230 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_START
);
232 /* tx transfer mode switch into dma mode */
233 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_STOP
);
234 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
,
235 rd_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
)&
237 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_START
);
240 sirfport
->tx_dma_addr
= dma_map_single(port
->dev
,
241 xmit
->buf
+ xmit
->tail
,
242 tran_size
, DMA_TO_DEVICE
);
243 sirfport
->tx_dma_desc
= dmaengine_prep_slave_single(
244 sirfport
->tx_dma_chan
, sirfport
->tx_dma_addr
,
245 tran_size
, DMA_MEM_TO_DEV
, DMA_PREP_INTERRUPT
);
246 if (!sirfport
->tx_dma_desc
) {
247 dev_err(port
->dev
, "DMA prep slave single fail\n");
250 sirfport
->tx_dma_desc
->callback
=
251 sirfsoc_uart_tx_dma_complete_callback
;
252 sirfport
->tx_dma_desc
->callback_param
= (void *)sirfport
;
253 sirfport
->transfer_size
= tran_size
;
255 dmaengine_submit(sirfport
->tx_dma_desc
);
256 dma_async_issue_pending(sirfport
->tx_dma_chan
);
257 sirfport
->tx_dma_state
= TX_DMA_RUNNING
;
261 static void sirfsoc_uart_start_tx(struct uart_port
*port
)
263 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
264 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
265 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
266 if (sirfport
->tx_dma_chan
)
267 sirfsoc_uart_tx_with_dma(sirfport
);
269 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
)
270 wr_regl(port
, ureg
->sirfsoc_tx_rx_en
, rd_regl(port
,
271 ureg
->sirfsoc_tx_rx_en
) | SIRFUART_TX_EN
);
272 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_STOP
);
273 sirfsoc_uart_pio_tx_chars(sirfport
, port
->fifosize
);
274 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_START
);
275 if (!sirfport
->is_atlas7
)
276 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
277 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)|
278 uint_en
->sirfsoc_txfifo_empty_en
);
280 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
281 uint_en
->sirfsoc_txfifo_empty_en
);
285 static void sirfsoc_uart_stop_rx(struct uart_port
*port
)
287 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
288 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
289 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
291 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
292 if (sirfport
->rx_dma_chan
) {
293 if (!sirfport
->is_atlas7
)
294 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
295 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
296 ~(SIRFUART_RX_DMA_INT_EN(uint_en
,
297 sirfport
->uart_reg
->uart_type
) |
298 uint_en
->sirfsoc_rx_done_en
));
300 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
,
301 SIRFUART_RX_DMA_INT_EN(uint_en
,
302 sirfport
->uart_reg
->uart_type
)|
303 uint_en
->sirfsoc_rx_done_en
);
304 dmaengine_terminate_all(sirfport
->rx_dma_chan
);
306 if (!sirfport
->is_atlas7
)
307 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
308 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)&
309 ~(SIRFUART_RX_IO_INT_EN(uint_en
,
310 sirfport
->uart_reg
->uart_type
)));
312 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
,
313 SIRFUART_RX_IO_INT_EN(uint_en
,
314 sirfport
->uart_reg
->uart_type
));
318 static void sirfsoc_uart_disable_ms(struct uart_port
*port
)
320 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
321 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
322 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
324 if (!sirfport
->hw_flow_ctrl
)
326 sirfport
->ms_enabled
= false;
327 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
328 wr_regl(port
, ureg
->sirfsoc_afc_ctrl
,
329 rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) & ~0x3FF);
330 if (!sirfport
->is_atlas7
)
331 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
332 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)&
333 ~uint_en
->sirfsoc_cts_en
);
335 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
,
336 uint_en
->sirfsoc_cts_en
);
338 disable_irq(gpio_to_irq(sirfport
->cts_gpio
));
341 static irqreturn_t
sirfsoc_uart_usp_cts_handler(int irq
, void *dev_id
)
343 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)dev_id
;
344 struct uart_port
*port
= &sirfport
->port
;
345 spin_lock(&port
->lock
);
346 if (gpio_is_valid(sirfport
->cts_gpio
) && sirfport
->ms_enabled
)
347 uart_handle_cts_change(port
,
348 !gpio_get_value(sirfport
->cts_gpio
));
349 spin_unlock(&port
->lock
);
353 static void sirfsoc_uart_enable_ms(struct uart_port
*port
)
355 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
356 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
357 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
359 if (!sirfport
->hw_flow_ctrl
)
361 sirfport
->ms_enabled
= true;
362 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
363 wr_regl(port
, ureg
->sirfsoc_afc_ctrl
,
364 rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) |
365 SIRFUART_AFC_TX_EN
| SIRFUART_AFC_RX_EN
|
366 SIRFUART_AFC_CTRL_RX_THD
);
367 if (!sirfport
->is_atlas7
)
368 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
369 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)
370 | uint_en
->sirfsoc_cts_en
);
372 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
373 uint_en
->sirfsoc_cts_en
);
375 enable_irq(gpio_to_irq(sirfport
->cts_gpio
));
378 static void sirfsoc_uart_break_ctl(struct uart_port
*port
, int break_state
)
380 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
381 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
382 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
383 unsigned long ulcon
= rd_regl(port
, ureg
->sirfsoc_line_ctrl
);
385 ulcon
|= SIRFUART_SET_BREAK
;
387 ulcon
&= ~SIRFUART_SET_BREAK
;
388 wr_regl(port
, ureg
->sirfsoc_line_ctrl
, ulcon
);
393 sirfsoc_uart_pio_rx_chars(struct uart_port
*port
, unsigned int max_rx_count
)
395 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
396 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
397 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
398 unsigned int ch
, rx_count
= 0;
399 struct tty_struct
*tty
;
400 tty
= tty_port_tty_get(&port
->state
->port
);
403 while (!(rd_regl(port
, ureg
->sirfsoc_rx_fifo_status
) &
404 ufifo_st
->ff_empty(port
))) {
405 ch
= rd_regl(port
, ureg
->sirfsoc_rx_fifo_data
) |
407 if (unlikely(uart_handle_sysrq_char(port
, ch
)))
409 uart_insert_char(port
, 0, 0, ch
, TTY_NORMAL
);
411 if (rx_count
>= max_rx_count
)
415 port
->icount
.rx
+= rx_count
;
421 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port
*sirfport
, int count
)
423 struct uart_port
*port
= &sirfport
->port
;
424 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
425 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
426 struct circ_buf
*xmit
= &port
->state
->xmit
;
427 unsigned int num_tx
= 0;
428 while (!uart_circ_empty(xmit
) &&
429 !(rd_regl(port
, ureg
->sirfsoc_tx_fifo_status
) &
430 ufifo_st
->ff_full(port
)) &&
432 wr_regl(port
, ureg
->sirfsoc_tx_fifo_data
,
433 xmit
->buf
[xmit
->tail
]);
434 xmit
->tail
= (xmit
->tail
+ 1) & (UART_XMIT_SIZE
- 1);
438 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
439 uart_write_wakeup(port
);
443 static void sirfsoc_uart_tx_dma_complete_callback(void *param
)
445 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)param
;
446 struct uart_port
*port
= &sirfport
->port
;
447 struct circ_buf
*xmit
= &port
->state
->xmit
;
450 spin_lock_irqsave(&port
->lock
, flags
);
451 xmit
->tail
= (xmit
->tail
+ sirfport
->transfer_size
) &
452 (UART_XMIT_SIZE
- 1);
453 port
->icount
.tx
+= sirfport
->transfer_size
;
454 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
455 uart_write_wakeup(port
);
456 if (sirfport
->tx_dma_addr
)
457 dma_unmap_single(port
->dev
, sirfport
->tx_dma_addr
,
458 sirfport
->transfer_size
, DMA_TO_DEVICE
);
459 sirfport
->tx_dma_state
= TX_DMA_IDLE
;
460 sirfsoc_uart_tx_with_dma(sirfport
);
461 spin_unlock_irqrestore(&port
->lock
, flags
);
464 static irqreturn_t
sirfsoc_uart_isr(int irq
, void *dev_id
)
466 unsigned long intr_status
;
467 unsigned long cts_status
;
468 unsigned long flag
= TTY_NORMAL
;
469 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)dev_id
;
470 struct uart_port
*port
= &sirfport
->port
;
471 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
472 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
473 struct sirfsoc_int_status
*uint_st
= &sirfport
->uart_reg
->uart_int_st
;
474 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
475 struct uart_state
*state
= port
->state
;
476 struct circ_buf
*xmit
= &port
->state
->xmit
;
477 spin_lock(&port
->lock
);
478 intr_status
= rd_regl(port
, ureg
->sirfsoc_int_st_reg
);
479 wr_regl(port
, ureg
->sirfsoc_int_st_reg
, intr_status
);
480 intr_status
&= rd_regl(port
, ureg
->sirfsoc_int_en_reg
);
481 if (unlikely(intr_status
& (SIRFUART_ERR_INT_STAT(uint_st
,
482 sirfport
->uart_reg
->uart_type
)))) {
483 if (intr_status
& uint_st
->sirfsoc_rxd_brk
) {
485 if (uart_handle_break(port
))
488 if (intr_status
& uint_st
->sirfsoc_rx_oflow
) {
489 port
->icount
.overrun
++;
492 if (intr_status
& uint_st
->sirfsoc_frm_err
) {
493 port
->icount
.frame
++;
496 if (intr_status
& uint_st
->sirfsoc_parity_err
) {
497 port
->icount
.parity
++;
500 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_RESET
);
501 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
502 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_START
);
503 intr_status
&= port
->read_status_mask
;
504 uart_insert_char(port
, intr_status
,
505 uint_en
->sirfsoc_rx_oflow_en
, 0, flag
);
508 if ((sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) &&
509 (intr_status
& SIRFUART_CTS_INT_ST(uint_st
)) &&
510 !sirfport
->tx_dma_state
) {
511 cts_status
= rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) &
512 SIRFUART_AFC_CTS_STATUS
;
517 uart_handle_cts_change(port
, cts_status
);
518 wake_up_interruptible(&state
->port
.delta_msr_wait
);
520 if (!sirfport
->rx_dma_chan
&&
521 (intr_status
& SIRFUART_RX_IO_INT_ST(uint_st
))) {
523 * chip will trigger continuous RX_TIMEOUT interrupt
524 * in RXFIFO empty and not trigger if RXFIFO recevice
525 * data in limit time, original method use RX_TIMEOUT
526 * will trigger lots of useless interrupt in RXFIFO
527 * empty.RXFIFO received one byte will trigger RX_DONE
528 * interrupt.use RX_DONE to wait for data received
529 * into RXFIFO, use RX_THD/RX_FULL for lots data receive
530 * and use RX_TIMEOUT for the last left data.
532 if (intr_status
& uint_st
->sirfsoc_rx_done
) {
533 if (!sirfport
->is_atlas7
) {
534 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
535 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)
536 & ~(uint_en
->sirfsoc_rx_done_en
));
537 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
538 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)
539 | (uint_en
->sirfsoc_rx_timeout_en
));
541 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
,
542 uint_en
->sirfsoc_rx_done_en
);
543 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
544 uint_en
->sirfsoc_rx_timeout_en
);
547 if (intr_status
& uint_st
->sirfsoc_rx_timeout
) {
548 if (!sirfport
->is_atlas7
) {
549 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
550 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)
551 & ~(uint_en
->sirfsoc_rx_timeout_en
));
552 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
553 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)
554 | (uint_en
->sirfsoc_rx_done_en
));
557 ureg
->sirfsoc_int_en_clr_reg
,
558 uint_en
->sirfsoc_rx_timeout_en
);
559 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
560 uint_en
->sirfsoc_rx_done_en
);
563 sirfsoc_uart_pio_rx_chars(port
, port
->fifosize
);
566 spin_unlock(&port
->lock
);
567 tty_flip_buffer_push(&state
->port
);
568 spin_lock(&port
->lock
);
569 if (intr_status
& uint_st
->sirfsoc_txfifo_empty
) {
570 if (sirfport
->tx_dma_chan
)
571 sirfsoc_uart_tx_with_dma(sirfport
);
573 if (uart_circ_empty(xmit
) || uart_tx_stopped(port
)) {
574 spin_unlock(&port
->lock
);
577 sirfsoc_uart_pio_tx_chars(sirfport
,
579 if ((uart_circ_empty(xmit
)) &&
580 (rd_regl(port
, ureg
->sirfsoc_tx_fifo_status
) &
581 ufifo_st
->ff_empty(port
)))
582 sirfsoc_uart_stop_tx(port
);
586 spin_unlock(&port
->lock
);
591 static void sirfsoc_uart_rx_dma_complete_callback(void *param
)
595 /* submit rx dma task into dmaengine */
596 static void sirfsoc_uart_start_next_rx_dma(struct uart_port
*port
)
598 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
599 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
600 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
601 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
602 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) &
604 sirfport
->rx_dma_items
.xmit
.tail
=
605 sirfport
->rx_dma_items
.xmit
.head
= 0;
606 sirfport
->rx_dma_items
.desc
=
607 dmaengine_prep_dma_cyclic(sirfport
->rx_dma_chan
,
608 sirfport
->rx_dma_items
.dma_addr
, SIRFSOC_RX_DMA_BUF_SIZE
,
609 SIRFSOC_RX_DMA_BUF_SIZE
/ 2,
610 DMA_DEV_TO_MEM
, DMA_PREP_INTERRUPT
);
611 if (IS_ERR_OR_NULL(sirfport
->rx_dma_items
.desc
)) {
612 dev_err(port
->dev
, "DMA slave single fail\n");
615 sirfport
->rx_dma_items
.desc
->callback
=
616 sirfsoc_uart_rx_dma_complete_callback
;
617 sirfport
->rx_dma_items
.desc
->callback_param
= sirfport
;
618 sirfport
->rx_dma_items
.cookie
=
619 dmaengine_submit(sirfport
->rx_dma_items
.desc
);
620 dma_async_issue_pending(sirfport
->rx_dma_chan
);
621 if (!sirfport
->is_atlas7
)
622 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
623 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) |
624 SIRFUART_RX_DMA_INT_EN(uint_en
,
625 sirfport
->uart_reg
->uart_type
));
627 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
628 SIRFUART_RX_DMA_INT_EN(uint_en
,
629 sirfport
->uart_reg
->uart_type
));
633 sirfsoc_usp_calc_sample_div(unsigned long set_rate
,
634 unsigned long ioclk_rate
, unsigned long *sample_reg
)
636 unsigned long min_delta
= ~0UL;
637 unsigned short sample_div
;
638 unsigned long ioclk_div
= 0;
639 unsigned long temp_delta
;
641 for (sample_div
= SIRF_USP_MIN_SAMPLE_DIV
;
642 sample_div
<= SIRF_MAX_SAMPLE_DIV
; sample_div
++) {
643 temp_delta
= ioclk_rate
-
644 (ioclk_rate
+ (set_rate
* sample_div
) / 2)
645 / (set_rate
* sample_div
) * set_rate
* sample_div
;
647 temp_delta
= (temp_delta
> 0) ? temp_delta
: -temp_delta
;
648 if (temp_delta
< min_delta
) {
649 ioclk_div
= (2 * ioclk_rate
/
650 (set_rate
* sample_div
) + 1) / 2 - 1;
651 if (ioclk_div
> SIRF_IOCLK_DIV_MAX
)
653 min_delta
= temp_delta
;
654 *sample_reg
= sample_div
;
663 sirfsoc_uart_calc_sample_div(unsigned long baud_rate
,
664 unsigned long ioclk_rate
, unsigned long *set_baud
)
666 unsigned long min_delta
= ~0UL;
667 unsigned short sample_div
;
668 unsigned int regv
= 0;
669 unsigned long ioclk_div
;
670 unsigned long baud_tmp
;
673 for (sample_div
= SIRF_MIN_SAMPLE_DIV
;
674 sample_div
<= SIRF_MAX_SAMPLE_DIV
; sample_div
++) {
675 ioclk_div
= (ioclk_rate
/ (baud_rate
* (sample_div
+ 1))) - 1;
676 if (ioclk_div
> SIRF_IOCLK_DIV_MAX
)
678 baud_tmp
= ioclk_rate
/ ((ioclk_div
+ 1) * (sample_div
+ 1));
679 temp_delta
= baud_tmp
- baud_rate
;
680 temp_delta
= (temp_delta
> 0) ? temp_delta
: -temp_delta
;
681 if (temp_delta
< min_delta
) {
682 regv
= regv
& (~SIRF_IOCLK_DIV_MASK
);
683 regv
= regv
| ioclk_div
;
684 regv
= regv
& (~SIRF_SAMPLE_DIV_MASK
);
685 regv
= regv
| (sample_div
<< SIRF_SAMPLE_DIV_SHIFT
);
686 min_delta
= temp_delta
;
687 *set_baud
= baud_tmp
;
693 static void sirfsoc_uart_set_termios(struct uart_port
*port
,
694 struct ktermios
*termios
,
695 struct ktermios
*old
)
697 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
698 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
699 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
700 unsigned long config_reg
= 0;
701 unsigned long baud_rate
;
702 unsigned long set_baud
;
705 unsigned int clk_div_reg
= 0;
706 unsigned long txfifo_op_reg
, ioclk_rate
;
707 unsigned long rx_time_out
;
709 u32 data_bit_len
, stop_bit_len
, len_val
;
710 unsigned long sample_div_reg
= 0xf;
711 ioclk_rate
= port
->uartclk
;
713 switch (termios
->c_cflag
& CSIZE
) {
717 config_reg
|= SIRFUART_DATA_BIT_LEN_8
;
721 config_reg
|= SIRFUART_DATA_BIT_LEN_7
;
725 config_reg
|= SIRFUART_DATA_BIT_LEN_6
;
729 config_reg
|= SIRFUART_DATA_BIT_LEN_5
;
732 if (termios
->c_cflag
& CSTOPB
) {
733 config_reg
|= SIRFUART_STOP_BIT_LEN_2
;
738 spin_lock_irqsave(&port
->lock
, flags
);
739 port
->read_status_mask
= uint_en
->sirfsoc_rx_oflow_en
;
740 port
->ignore_status_mask
= 0;
741 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
742 if (termios
->c_iflag
& INPCK
)
743 port
->read_status_mask
|= uint_en
->sirfsoc_frm_err_en
|
744 uint_en
->sirfsoc_parity_err_en
;
746 if (termios
->c_iflag
& INPCK
)
747 port
->read_status_mask
|= uint_en
->sirfsoc_frm_err_en
;
749 if (termios
->c_iflag
& (IGNBRK
| BRKINT
| PARMRK
))
750 port
->read_status_mask
|= uint_en
->sirfsoc_rxd_brk_en
;
751 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
752 if (termios
->c_iflag
& IGNPAR
)
753 port
->ignore_status_mask
|=
754 uint_en
->sirfsoc_frm_err_en
|
755 uint_en
->sirfsoc_parity_err_en
;
756 if (termios
->c_cflag
& PARENB
) {
757 if (termios
->c_cflag
& CMSPAR
) {
758 if (termios
->c_cflag
& PARODD
)
759 config_reg
|= SIRFUART_STICK_BIT_MARK
;
761 config_reg
|= SIRFUART_STICK_BIT_SPACE
;
763 if (termios
->c_cflag
& PARODD
)
764 config_reg
|= SIRFUART_STICK_BIT_ODD
;
766 config_reg
|= SIRFUART_STICK_BIT_EVEN
;
770 if (termios
->c_iflag
& IGNPAR
)
771 port
->ignore_status_mask
|=
772 uint_en
->sirfsoc_frm_err_en
;
773 if (termios
->c_cflag
& PARENB
)
775 "USP-UART not support parity err\n");
777 if (termios
->c_iflag
& IGNBRK
) {
778 port
->ignore_status_mask
|=
779 uint_en
->sirfsoc_rxd_brk_en
;
780 if (termios
->c_iflag
& IGNPAR
)
781 port
->ignore_status_mask
|=
782 uint_en
->sirfsoc_rx_oflow_en
;
784 if ((termios
->c_cflag
& CREAD
) == 0)
785 port
->ignore_status_mask
|= SIRFUART_DUMMY_READ
;
786 /* Hardware Flow Control Settings */
787 if (UART_ENABLE_MS(port
, termios
->c_cflag
)) {
788 if (!sirfport
->ms_enabled
)
789 sirfsoc_uart_enable_ms(port
);
791 if (sirfport
->ms_enabled
)
792 sirfsoc_uart_disable_ms(port
);
794 baud_rate
= uart_get_baud_rate(port
, termios
, old
, 0, 4000000);
795 if (ioclk_rate
== 150000000) {
796 for (ic
= 0; ic
< SIRF_BAUD_RATE_SUPPORT_NR
; ic
++)
797 if (baud_rate
== baudrate_to_regv
[ic
].baud_rate
)
798 clk_div_reg
= baudrate_to_regv
[ic
].reg_val
;
800 set_baud
= baud_rate
;
801 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
802 if (unlikely(clk_div_reg
== 0))
803 clk_div_reg
= sirfsoc_uart_calc_sample_div(baud_rate
,
804 ioclk_rate
, &set_baud
);
805 wr_regl(port
, ureg
->sirfsoc_divisor
, clk_div_reg
);
807 clk_div_reg
= sirfsoc_usp_calc_sample_div(baud_rate
,
808 ioclk_rate
, &sample_div_reg
);
810 set_baud
= ((ioclk_rate
/ (clk_div_reg
+1) - 1) /
811 (sample_div_reg
+ 1));
812 /* setting usp mode 2 */
813 len_val
= ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET
) |
814 (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET
));
815 len_val
|= ((clk_div_reg
& SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK
)
816 << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET
);
817 wr_regl(port
, ureg
->sirfsoc_mode2
, len_val
);
819 if (tty_termios_baud_rate(termios
))
820 tty_termios_encode_baud_rate(termios
, set_baud
, set_baud
);
821 /* set receive timeout && data bits len */
822 rx_time_out
= SIRFSOC_UART_RX_TIMEOUT(set_baud
, 20000);
823 rx_time_out
= SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out
);
824 txfifo_op_reg
= rd_regl(port
, ureg
->sirfsoc_tx_fifo_op
);
825 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
,
826 (txfifo_op_reg
& ~SIRFUART_FIFO_START
));
827 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
828 config_reg
|= SIRFUART_UART_RECV_TIMEOUT(rx_time_out
);
829 wr_regl(port
, ureg
->sirfsoc_line_ctrl
, config_reg
);
832 len_val
= (data_bit_len
- 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET
;
833 len_val
|= (data_bit_len
+ 1 + stop_bit_len
- 1) <<
834 SIRFSOC_USP_TX_FRAME_LEN_OFFSET
;
835 len_val
|= ((data_bit_len
- 1) <<
836 SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET
);
837 len_val
|= (((clk_div_reg
& 0xc00) >> 10) <<
838 SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET
);
839 wr_regl(port
, ureg
->sirfsoc_tx_frame_ctrl
, len_val
);
841 len_val
= (data_bit_len
- 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET
;
842 len_val
|= (data_bit_len
+ 1 + stop_bit_len
- 1) <<
843 SIRFSOC_USP_RX_FRAME_LEN_OFFSET
;
844 len_val
|= (data_bit_len
- 1) <<
845 SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET
;
846 len_val
|= (((clk_div_reg
& 0xf000) >> 12) <<
847 SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET
);
848 wr_regl(port
, ureg
->sirfsoc_rx_frame_ctrl
, len_val
);
850 wr_regl(port
, ureg
->sirfsoc_async_param_reg
,
851 (SIRFUART_USP_RECV_TIMEOUT(rx_time_out
)) |
852 (sample_div_reg
& SIRFSOC_USP_ASYNC_DIV2_MASK
) <<
853 SIRFSOC_USP_ASYNC_DIV2_OFFSET
);
855 if (sirfport
->tx_dma_chan
)
856 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
, SIRFUART_DMA_MODE
);
858 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
, SIRFUART_IO_MODE
);
859 if (sirfport
->rx_dma_chan
)
860 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
861 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) &
864 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
865 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) |
867 sirfport
->rx_period_time
= 20000000;
868 /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
869 if (set_baud
< 1000000)
873 wr_regl(port
, ureg
->sirfsoc_tx_fifo_ctrl
,
874 SIRFUART_FIFO_THD(port
) / threshold_div
);
875 wr_regl(port
, ureg
->sirfsoc_rx_fifo_ctrl
,
876 SIRFUART_FIFO_THD(port
) / threshold_div
);
877 txfifo_op_reg
|= SIRFUART_FIFO_START
;
878 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, txfifo_op_reg
);
879 uart_update_timeout(port
, termios
->c_cflag
, set_baud
);
880 wr_regl(port
, ureg
->sirfsoc_tx_rx_en
, SIRFUART_TX_EN
| SIRFUART_RX_EN
);
881 spin_unlock_irqrestore(&port
->lock
, flags
);
884 static void sirfsoc_uart_pm(struct uart_port
*port
, unsigned int state
,
885 unsigned int oldstate
)
887 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
889 clk_prepare_enable(sirfport
->clk
);
891 clk_disable_unprepare(sirfport
->clk
);
894 static int sirfsoc_uart_startup(struct uart_port
*port
)
896 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
897 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
898 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
899 unsigned int index
= port
->line
;
901 irq_modify_status(port
->irq
, IRQ_NOREQUEST
, IRQ_NOAUTOEN
);
902 ret
= request_irq(port
->irq
,
908 dev_err(port
->dev
, "UART%d request IRQ line (%d) failed.\n",
912 /* initial hardware settings */
913 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
,
914 rd_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
) |
916 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
917 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) |
919 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
920 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) &
921 ~SIRFUART_RX_DMA_FLUSH
);
922 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_len
, 0);
923 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_len
, 0);
924 wr_regl(port
, ureg
->sirfsoc_tx_rx_en
, SIRFUART_RX_EN
| SIRFUART_TX_EN
);
925 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
)
926 wr_regl(port
, ureg
->sirfsoc_mode1
,
927 SIRFSOC_USP_ENDIAN_CTRL_LSBF
|
929 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_RESET
);
930 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_RESET
);
931 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
932 wr_regl(port
, ureg
->sirfsoc_tx_fifo_ctrl
, SIRFUART_FIFO_THD(port
));
933 wr_regl(port
, ureg
->sirfsoc_rx_fifo_ctrl
, SIRFUART_FIFO_THD(port
));
934 if (sirfport
->rx_dma_chan
)
935 wr_regl(port
, ureg
->sirfsoc_rx_fifo_level_chk
,
936 SIRFUART_RX_FIFO_CHK_SC(port
->line
, 0x1) |
937 SIRFUART_RX_FIFO_CHK_LC(port
->line
, 0x2) |
938 SIRFUART_RX_FIFO_CHK_HC(port
->line
, 0x4));
939 if (sirfport
->tx_dma_chan
) {
940 sirfport
->tx_dma_state
= TX_DMA_IDLE
;
941 wr_regl(port
, ureg
->sirfsoc_tx_fifo_level_chk
,
942 SIRFUART_TX_FIFO_CHK_SC(port
->line
, 0x1b) |
943 SIRFUART_TX_FIFO_CHK_LC(port
->line
, 0xe) |
944 SIRFUART_TX_FIFO_CHK_HC(port
->line
, 0x4));
946 sirfport
->ms_enabled
= false;
947 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
&&
948 sirfport
->hw_flow_ctrl
) {
949 irq_modify_status(gpio_to_irq(sirfport
->cts_gpio
),
950 IRQ_NOREQUEST
, IRQ_NOAUTOEN
);
951 ret
= request_irq(gpio_to_irq(sirfport
->cts_gpio
),
952 sirfsoc_uart_usp_cts_handler
, IRQF_TRIGGER_FALLING
|
953 IRQF_TRIGGER_RISING
, "usp_cts_irq", sirfport
);
955 dev_err(port
->dev
, "UART-USP:request gpio irq fail\n");
959 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
&&
960 sirfport
->rx_dma_chan
)
961 wr_regl(port
, ureg
->sirfsoc_swh_dma_io
,
962 SIRFUART_CLEAR_RX_ADDR_EN
);
963 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
&&
964 sirfport
->rx_dma_chan
)
965 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
966 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) |
967 SIRFSOC_USP_FRADDR_CLR_EN
);
968 if (sirfport
->rx_dma_chan
&& !sirfport
->is_hrt_enabled
) {
969 sirfport
->is_hrt_enabled
= true;
970 sirfport
->rx_period_time
= 20000000;
971 sirfport
->rx_last_pos
= -1;
972 sirfport
->pio_fetch_cnt
= 0;
973 sirfport
->rx_dma_items
.xmit
.tail
=
974 sirfport
->rx_dma_items
.xmit
.head
= 0;
975 hrtimer_start(&sirfport
->hrt
,
976 ns_to_ktime(sirfport
->rx_period_time
),
979 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_START
);
980 if (sirfport
->rx_dma_chan
)
981 sirfsoc_uart_start_next_rx_dma(port
);
983 if (!sirfport
->is_atlas7
)
984 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
985 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) |
986 SIRFUART_RX_IO_INT_EN(uint_en
,
987 sirfport
->uart_reg
->uart_type
));
989 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
990 SIRFUART_RX_IO_INT_EN(uint_en
,
991 sirfport
->uart_reg
->uart_type
));
993 enable_irq(port
->irq
);
997 free_irq(port
->irq
, sirfport
);
1002 static void sirfsoc_uart_shutdown(struct uart_port
*port
)
1004 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1005 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
1006 struct circ_buf
*xmit
;
1008 xmit
= &sirfport
->rx_dma_items
.xmit
;
1009 if (!sirfport
->is_atlas7
)
1010 wr_regl(port
, ureg
->sirfsoc_int_en_reg
, 0);
1012 wr_regl(port
, ureg
->sirfsoc_int_en_clr_reg
, ~0UL);
1014 free_irq(port
->irq
, sirfport
);
1015 if (sirfport
->ms_enabled
)
1016 sirfsoc_uart_disable_ms(port
);
1017 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
&&
1018 sirfport
->hw_flow_ctrl
) {
1019 gpio_set_value(sirfport
->rts_gpio
, 1);
1020 free_irq(gpio_to_irq(sirfport
->cts_gpio
), sirfport
);
1022 if (sirfport
->tx_dma_chan
)
1023 sirfport
->tx_dma_state
= TX_DMA_IDLE
;
1024 if (sirfport
->rx_dma_chan
&& sirfport
->is_hrt_enabled
) {
1025 while (((rd_regl(port
, ureg
->sirfsoc_rx_fifo_status
) &
1026 SIRFUART_RX_FIFO_MASK
) > sirfport
->pio_fetch_cnt
) &&
1027 !CIRC_CNT(xmit
->head
, xmit
->tail
,
1028 SIRFSOC_RX_DMA_BUF_SIZE
))
1030 sirfport
->is_hrt_enabled
= false;
1031 hrtimer_cancel(&sirfport
->hrt
);
1035 static const char *sirfsoc_uart_type(struct uart_port
*port
)
1037 return port
->type
== SIRFSOC_PORT_TYPE
? SIRFUART_PORT_NAME
: NULL
;
1040 static int sirfsoc_uart_request_port(struct uart_port
*port
)
1042 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1043 struct sirfsoc_uart_param
*uart_param
= &sirfport
->uart_reg
->uart_param
;
1045 ret
= request_mem_region(port
->mapbase
,
1046 SIRFUART_MAP_SIZE
, uart_param
->port_name
);
1047 return ret
? 0 : -EBUSY
;
1050 static void sirfsoc_uart_release_port(struct uart_port
*port
)
1052 release_mem_region(port
->mapbase
, SIRFUART_MAP_SIZE
);
1055 static void sirfsoc_uart_config_port(struct uart_port
*port
, int flags
)
1057 if (flags
& UART_CONFIG_TYPE
) {
1058 port
->type
= SIRFSOC_PORT_TYPE
;
1059 sirfsoc_uart_request_port(port
);
1063 static const struct uart_ops sirfsoc_uart_ops
= {
1064 .tx_empty
= sirfsoc_uart_tx_empty
,
1065 .get_mctrl
= sirfsoc_uart_get_mctrl
,
1066 .set_mctrl
= sirfsoc_uart_set_mctrl
,
1067 .stop_tx
= sirfsoc_uart_stop_tx
,
1068 .start_tx
= sirfsoc_uart_start_tx
,
1069 .stop_rx
= sirfsoc_uart_stop_rx
,
1070 .enable_ms
= sirfsoc_uart_enable_ms
,
1071 .break_ctl
= sirfsoc_uart_break_ctl
,
1072 .startup
= sirfsoc_uart_startup
,
1073 .shutdown
= sirfsoc_uart_shutdown
,
1074 .set_termios
= sirfsoc_uart_set_termios
,
1075 .pm
= sirfsoc_uart_pm
,
1076 .type
= sirfsoc_uart_type
,
1077 .release_port
= sirfsoc_uart_release_port
,
1078 .request_port
= sirfsoc_uart_request_port
,
1079 .config_port
= sirfsoc_uart_config_port
,
1082 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1084 sirfsoc_uart_console_setup(struct console
*co
, char *options
)
1086 unsigned int baud
= 115200;
1087 unsigned int bits
= 8;
1088 unsigned int parity
= 'n';
1089 unsigned int flow
= 'n';
1090 struct sirfsoc_uart_port
*sirfport
;
1091 struct sirfsoc_register
*ureg
;
1092 if (co
->index
< 0 || co
->index
>= SIRFSOC_UART_NR
)
1094 sirfport
= sirf_ports
[co
->index
];
1097 ureg
= &sirfport
->uart_reg
->uart_reg
;
1098 if (!sirfport
->port
.mapbase
)
1101 /* enable usp in mode1 register */
1102 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
)
1103 wr_regl(&sirfport
->port
, ureg
->sirfsoc_mode1
, SIRFSOC_USP_EN
|
1104 SIRFSOC_USP_ENDIAN_CTRL_LSBF
);
1106 uart_parse_options(options
, &baud
, &parity
, &bits
, &flow
);
1107 sirfport
->port
.cons
= co
;
1109 /* default console tx/rx transfer using io mode */
1110 sirfport
->rx_dma_chan
= NULL
;
1111 sirfport
->tx_dma_chan
= NULL
;
1112 return uart_set_options(&sirfport
->port
, co
, baud
, parity
, bits
, flow
);
1115 static void sirfsoc_uart_console_putchar(struct uart_port
*port
, int ch
)
1117 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1118 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
1119 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
1120 while (rd_regl(port
, ureg
->sirfsoc_tx_fifo_status
) &
1121 ufifo_st
->ff_full(port
))
1123 wr_regl(port
, ureg
->sirfsoc_tx_fifo_data
, ch
);
1126 static void sirfsoc_uart_console_write(struct console
*co
, const char *s
,
1129 struct sirfsoc_uart_port
*sirfport
= sirf_ports
[co
->index
];
1131 uart_console_write(&sirfport
->port
, s
, count
,
1132 sirfsoc_uart_console_putchar
);
1135 static struct console sirfsoc_uart_console
= {
1136 .name
= SIRFSOC_UART_NAME
,
1137 .device
= uart_console_device
,
1138 .flags
= CON_PRINTBUFFER
,
1140 .write
= sirfsoc_uart_console_write
,
1141 .setup
= sirfsoc_uart_console_setup
,
1142 .data
= &sirfsoc_uart_drv
,
1145 static int __init
sirfsoc_uart_console_init(void)
1147 register_console(&sirfsoc_uart_console
);
1150 console_initcall(sirfsoc_uart_console_init
);
1153 static struct uart_driver sirfsoc_uart_drv
= {
1154 .owner
= THIS_MODULE
,
1155 .driver_name
= SIRFUART_PORT_NAME
,
1156 .nr
= SIRFSOC_UART_NR
,
1157 .dev_name
= SIRFSOC_UART_NAME
,
1158 .major
= SIRFSOC_UART_MAJOR
,
1159 .minor
= SIRFSOC_UART_MINOR
,
1160 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1161 .cons
= &sirfsoc_uart_console
,
1167 static enum hrtimer_restart
1168 sirfsoc_uart_rx_dma_hrtimer_callback(struct hrtimer
*hrt
)
1170 struct sirfsoc_uart_port
*sirfport
;
1171 struct uart_port
*port
;
1172 int count
, inserted
;
1173 struct dma_tx_state tx_state
;
1174 struct tty_struct
*tty
;
1175 struct sirfsoc_register
*ureg
;
1176 struct circ_buf
*xmit
;
1177 struct sirfsoc_fifo_status
*ufifo_st
;
1180 sirfport
= container_of(hrt
, struct sirfsoc_uart_port
, hrt
);
1181 port
= &sirfport
->port
;
1183 tty
= port
->state
->port
.tty
;
1184 ureg
= &sirfport
->uart_reg
->uart_reg
;
1185 xmit
= &sirfport
->rx_dma_items
.xmit
;
1186 ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
1188 dmaengine_tx_status(sirfport
->rx_dma_chan
,
1189 sirfport
->rx_dma_items
.cookie
, &tx_state
);
1190 if (SIRFSOC_RX_DMA_BUF_SIZE
- tx_state
.residue
!=
1191 sirfport
->rx_last_pos
) {
1192 xmit
->head
= SIRFSOC_RX_DMA_BUF_SIZE
- tx_state
.residue
;
1193 sirfport
->rx_last_pos
= xmit
->head
;
1194 sirfport
->pio_fetch_cnt
= 0;
1196 count
= CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
,
1197 SIRFSOC_RX_DMA_BUF_SIZE
);
1199 inserted
= tty_insert_flip_string(tty
->port
,
1200 (const unsigned char *)&xmit
->buf
[xmit
->tail
], count
);
1203 port
->icount
.rx
+= inserted
;
1204 xmit
->tail
= (xmit
->tail
+ inserted
) &
1205 (SIRFSOC_RX_DMA_BUF_SIZE
- 1);
1206 count
= CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
,
1207 SIRFSOC_RX_DMA_BUF_SIZE
);
1208 tty_flip_buffer_push(tty
->port
);
1211 * if RX DMA buffer data have all push into tty buffer, and there is
1212 * only little data(less than a dma transfer unit) left in rxfifo,
1213 * fetch it out in pio mode and switch back to dma immediately
1215 if (!inserted
&& !count
&&
1216 ((rd_regl(port
, ureg
->sirfsoc_rx_fifo_status
) &
1217 SIRFUART_RX_FIFO_MASK
) > sirfport
->pio_fetch_cnt
)) {
1218 dmaengine_pause(sirfport
->rx_dma_chan
);
1219 /* switch to pio mode */
1220 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
1221 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) |
1224 * UART controller SWH_DMA_IO register have CLEAR_RX_ADDR_EN
1225 * When found changing I/O to DMA mode, it clears
1226 * two low bits of read point;
1227 * USP have similar FRADDR_CLR_EN bit in USP_RX_DMA_IO_CTRL.
1228 * Fetch data out from rxfifo into DMA buffer in PIO mode,
1229 * while switch back to DMA mode, the data fetched will override
1230 * by DMA, as hardware have a strange behaviour:
1231 * after switch back to DMA mode, check rxfifo status it will
1232 * be the number PIO fetched, so record the fetched data count
1233 * to avoid the repeated fetch
1236 while (!(rd_regl(port
, ureg
->sirfsoc_rx_fifo_status
) &
1237 ufifo_st
->ff_empty(port
)) && max_pio_cnt
--) {
1238 xmit
->buf
[xmit
->head
] =
1239 rd_regl(port
, ureg
->sirfsoc_rx_fifo_data
);
1240 xmit
->head
= (xmit
->head
+ 1) &
1241 (SIRFSOC_RX_DMA_BUF_SIZE
- 1);
1242 sirfport
->pio_fetch_cnt
++;
1244 /* switch back to dma mode */
1245 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
1246 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) &
1248 dmaengine_resume(sirfport
->rx_dma_chan
);
1251 hrtimer_forward_now(hrt
, ns_to_ktime(sirfport
->rx_period_time
));
1252 return HRTIMER_RESTART
;
1255 static const struct of_device_id sirfsoc_uart_ids
[] = {
1256 { .compatible
= "sirf,prima2-uart", .data
= &sirfsoc_uart
,},
1257 { .compatible
= "sirf,atlas7-uart", .data
= &sirfsoc_uart
},
1258 { .compatible
= "sirf,prima2-usp-uart", .data
= &sirfsoc_usp
},
1259 { .compatible
= "sirf,atlas7-usp-uart", .data
= &sirfsoc_usp
},
1262 MODULE_DEVICE_TABLE(of
, sirfsoc_uart_ids
);
1264 static int sirfsoc_uart_probe(struct platform_device
*pdev
)
1266 struct device_node
*np
= pdev
->dev
.of_node
;
1267 struct sirfsoc_uart_port
*sirfport
;
1268 struct uart_port
*port
;
1269 struct resource
*res
;
1271 struct dma_slave_config slv_cfg
= {
1274 struct dma_slave_config tx_slv_cfg
= {
1277 const struct of_device_id
*match
;
1279 match
= of_match_node(sirfsoc_uart_ids
, np
);
1280 sirfport
= devm_kzalloc(&pdev
->dev
, sizeof(*sirfport
), GFP_KERNEL
);
1285 sirfport
->port
.line
= of_alias_get_id(np
, "serial");
1286 sirf_ports
[sirfport
->port
.line
] = sirfport
;
1287 sirfport
->port
.iotype
= UPIO_MEM
;
1288 sirfport
->port
.flags
= UPF_BOOT_AUTOCONF
;
1289 port
= &sirfport
->port
;
1290 port
->dev
= &pdev
->dev
;
1291 port
->private_data
= sirfport
;
1292 sirfport
->uart_reg
= (struct sirfsoc_uart_register
*)match
->data
;
1294 sirfport
->hw_flow_ctrl
=
1295 of_property_read_bool(np
, "uart-has-rtscts") ||
1296 of_property_read_bool(np
, "sirf,uart-has-rtscts") /* deprecated */;
1297 if (of_device_is_compatible(np
, "sirf,prima2-uart") ||
1298 of_device_is_compatible(np
, "sirf,atlas7-uart"))
1299 sirfport
->uart_reg
->uart_type
= SIRF_REAL_UART
;
1300 if (of_device_is_compatible(np
, "sirf,prima2-usp-uart") ||
1301 of_device_is_compatible(np
, "sirf,atlas7-usp-uart")) {
1302 sirfport
->uart_reg
->uart_type
= SIRF_USP_UART
;
1303 if (!sirfport
->hw_flow_ctrl
)
1304 goto usp_no_flow_control
;
1305 if (of_find_property(np
, "cts-gpios", NULL
))
1306 sirfport
->cts_gpio
=
1307 of_get_named_gpio(np
, "cts-gpios", 0);
1309 sirfport
->cts_gpio
= -1;
1310 if (of_find_property(np
, "rts-gpios", NULL
))
1311 sirfport
->rts_gpio
=
1312 of_get_named_gpio(np
, "rts-gpios", 0);
1314 sirfport
->rts_gpio
= -1;
1316 if ((!gpio_is_valid(sirfport
->cts_gpio
) ||
1317 !gpio_is_valid(sirfport
->rts_gpio
))) {
1320 "Usp flow control must have cts and rts gpio");
1323 ret
= devm_gpio_request(&pdev
->dev
, sirfport
->cts_gpio
,
1326 dev_err(&pdev
->dev
, "Unable request cts gpio");
1329 gpio_direction_input(sirfport
->cts_gpio
);
1330 ret
= devm_gpio_request(&pdev
->dev
, sirfport
->rts_gpio
,
1333 dev_err(&pdev
->dev
, "Unable request rts gpio");
1336 gpio_direction_output(sirfport
->rts_gpio
, 1);
1338 usp_no_flow_control
:
1339 if (of_device_is_compatible(np
, "sirf,atlas7-uart") ||
1340 of_device_is_compatible(np
, "sirf,atlas7-usp-uart"))
1341 sirfport
->is_atlas7
= true;
1343 if (of_property_read_u32(np
, "fifosize", &port
->fifosize
)) {
1345 "Unable to find fifosize in uart node.\n");
1350 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1352 dev_err(&pdev
->dev
, "Insufficient resources.\n");
1356 port
->mapbase
= res
->start
;
1357 port
->membase
= devm_ioremap(&pdev
->dev
,
1358 res
->start
, resource_size(res
));
1359 if (!port
->membase
) {
1360 dev_err(&pdev
->dev
, "Cannot remap resource.\n");
1364 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
1366 dev_err(&pdev
->dev
, "Insufficient resources.\n");
1370 port
->irq
= res
->start
;
1372 sirfport
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1373 if (IS_ERR(sirfport
->clk
)) {
1374 ret
= PTR_ERR(sirfport
->clk
);
1377 port
->uartclk
= clk_get_rate(sirfport
->clk
);
1379 port
->ops
= &sirfsoc_uart_ops
;
1380 spin_lock_init(&port
->lock
);
1382 platform_set_drvdata(pdev
, sirfport
);
1383 ret
= uart_add_one_port(&sirfsoc_uart_drv
, port
);
1385 dev_err(&pdev
->dev
, "Cannot add UART port(%d).\n", pdev
->id
);
1389 sirfport
->rx_dma_chan
= dma_request_slave_channel(port
->dev
, "rx");
1390 sirfport
->rx_dma_items
.xmit
.buf
=
1391 dma_alloc_coherent(port
->dev
, SIRFSOC_RX_DMA_BUF_SIZE
,
1392 &sirfport
->rx_dma_items
.dma_addr
, GFP_KERNEL
);
1393 if (!sirfport
->rx_dma_items
.xmit
.buf
) {
1394 dev_err(port
->dev
, "Uart alloc bufa failed\n");
1396 goto alloc_coherent_err
;
1398 sirfport
->rx_dma_items
.xmit
.head
=
1399 sirfport
->rx_dma_items
.xmit
.tail
= 0;
1400 if (sirfport
->rx_dma_chan
)
1401 dmaengine_slave_config(sirfport
->rx_dma_chan
, &slv_cfg
);
1402 sirfport
->tx_dma_chan
= dma_request_slave_channel(port
->dev
, "tx");
1403 if (sirfport
->tx_dma_chan
)
1404 dmaengine_slave_config(sirfport
->tx_dma_chan
, &tx_slv_cfg
);
1405 if (sirfport
->rx_dma_chan
) {
1406 hrtimer_init(&sirfport
->hrt
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1407 sirfport
->hrt
.function
= sirfsoc_uart_rx_dma_hrtimer_callback
;
1408 sirfport
->is_hrt_enabled
= false;
1413 dma_free_coherent(port
->dev
, SIRFSOC_RX_DMA_BUF_SIZE
,
1414 sirfport
->rx_dma_items
.xmit
.buf
,
1415 sirfport
->rx_dma_items
.dma_addr
);
1416 dma_release_channel(sirfport
->rx_dma_chan
);
1421 static int sirfsoc_uart_remove(struct platform_device
*pdev
)
1423 struct sirfsoc_uart_port
*sirfport
= platform_get_drvdata(pdev
);
1424 struct uart_port
*port
= &sirfport
->port
;
1425 uart_remove_one_port(&sirfsoc_uart_drv
, port
);
1426 if (sirfport
->rx_dma_chan
) {
1427 dmaengine_terminate_all(sirfport
->rx_dma_chan
);
1428 dma_release_channel(sirfport
->rx_dma_chan
);
1429 dma_free_coherent(port
->dev
, SIRFSOC_RX_DMA_BUF_SIZE
,
1430 sirfport
->rx_dma_items
.xmit
.buf
,
1431 sirfport
->rx_dma_items
.dma_addr
);
1433 if (sirfport
->tx_dma_chan
) {
1434 dmaengine_terminate_all(sirfport
->tx_dma_chan
);
1435 dma_release_channel(sirfport
->tx_dma_chan
);
1440 #ifdef CONFIG_PM_SLEEP
1442 sirfsoc_uart_suspend(struct device
*pdev
)
1444 struct sirfsoc_uart_port
*sirfport
= dev_get_drvdata(pdev
);
1445 struct uart_port
*port
= &sirfport
->port
;
1446 uart_suspend_port(&sirfsoc_uart_drv
, port
);
1450 static int sirfsoc_uart_resume(struct device
*pdev
)
1452 struct sirfsoc_uart_port
*sirfport
= dev_get_drvdata(pdev
);
1453 struct uart_port
*port
= &sirfport
->port
;
1454 uart_resume_port(&sirfsoc_uart_drv
, port
);
1459 static const struct dev_pm_ops sirfsoc_uart_pm_ops
= {
1460 SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend
, sirfsoc_uart_resume
)
1463 static struct platform_driver sirfsoc_uart_driver
= {
1464 .probe
= sirfsoc_uart_probe
,
1465 .remove
= sirfsoc_uart_remove
,
1467 .name
= SIRFUART_PORT_NAME
,
1468 .of_match_table
= sirfsoc_uart_ids
,
1469 .pm
= &sirfsoc_uart_pm_ops
,
1473 static int __init
sirfsoc_uart_init(void)
1477 ret
= uart_register_driver(&sirfsoc_uart_drv
);
1481 ret
= platform_driver_register(&sirfsoc_uart_driver
);
1483 uart_unregister_driver(&sirfsoc_uart_drv
);
1487 module_init(sirfsoc_uart_init
);
1489 static void __exit
sirfsoc_uart_exit(void)
1491 platform_driver_unregister(&sirfsoc_uart_driver
);
1492 uart_unregister_driver(&sirfsoc_uart_drv
);
1494 module_exit(sirfsoc_uart_exit
);
1496 MODULE_LICENSE("GPL v2");
1497 MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
1498 MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");