2 * Driver for CSR SiRFprimaII onboard UARTs.
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
6 * Licensed under GPLv2 or later.
9 #include <linux/module.h>
10 #include <linux/ioport.h>
11 #include <linux/platform_device.h>
12 #include <linux/init.h>
13 #include <linux/sysrq.h>
14 #include <linux/console.h>
15 #include <linux/tty.h>
16 #include <linux/tty_flip.h>
17 #include <linux/serial_core.h>
18 #include <linux/serial.h>
19 #include <linux/clk.h>
21 #include <linux/slab.h>
23 #include <linux/of_gpio.h>
24 #include <linux/dmaengine.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/sirfsoc_dma.h>
29 #include <asm/mach/irq.h>
31 #include "sirfsoc_uart.h"
34 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port
*sirfport
, int count
);
36 sirfsoc_uart_pio_rx_chars(struct uart_port
*port
, unsigned int max_rx_count
);
37 static struct uart_driver sirfsoc_uart_drv
;
39 static void sirfsoc_uart_tx_dma_complete_callback(void *param
);
40 static void sirfsoc_uart_start_next_rx_dma(struct uart_port
*port
);
41 static void sirfsoc_uart_rx_dma_complete_callback(void *param
);
42 static const struct sirfsoc_baudrate_to_regv baudrate_to_regv
[] = {
63 static struct sirfsoc_uart_port sirfsoc_uart_ports
[SIRFSOC_UART_NR
] = {
67 .flags
= UPF_BOOT_AUTOCONF
,
74 .flags
= UPF_BOOT_AUTOCONF
,
81 .flags
= UPF_BOOT_AUTOCONF
,
88 .flags
= UPF_BOOT_AUTOCONF
,
95 .flags
= UPF_BOOT_AUTOCONF
,
102 .flags
= UPF_BOOT_AUTOCONF
,
108 static inline struct sirfsoc_uart_port
*to_sirfport(struct uart_port
*port
)
110 return container_of(port
, struct sirfsoc_uart_port
, port
);
113 static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port
*port
)
116 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
117 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
118 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
119 reg
= rd_regl(port
, ureg
->sirfsoc_tx_fifo_status
);
121 return (reg
& ufifo_st
->ff_empty(port
->line
)) ? TIOCSER_TEMT
: 0;
124 static unsigned int sirfsoc_uart_get_mctrl(struct uart_port
*port
)
126 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
127 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
128 if (!sirfport
->hw_flow_ctrl
|| !sirfport
->ms_enabled
)
130 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
131 if (!(rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) &
132 SIRFUART_AFC_CTS_STATUS
))
137 if (!gpio_get_value(sirfport
->cts_gpio
))
143 return TIOCM_CAR
| TIOCM_DSR
;
145 return TIOCM_CAR
| TIOCM_DSR
| TIOCM_CTS
;
148 static void sirfsoc_uart_set_mctrl(struct uart_port
*port
, unsigned int mctrl
)
150 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
151 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
152 unsigned int assert = mctrl
& TIOCM_RTS
;
153 unsigned int val
= assert ? SIRFUART_AFC_CTRL_RX_THD
: 0x0;
154 unsigned int current_val
;
156 if (!sirfport
->hw_flow_ctrl
|| !sirfport
->ms_enabled
)
158 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
159 current_val
= rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) & ~0xFF;
161 wr_regl(port
, ureg
->sirfsoc_afc_ctrl
, val
);
164 gpio_set_value(sirfport
->rts_gpio
, 1);
166 gpio_set_value(sirfport
->rts_gpio
, 0);
170 static void sirfsoc_uart_stop_tx(struct uart_port
*port
)
172 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
173 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
174 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
176 if (IS_DMA_CHAN_VALID(sirfport
->tx_dma_no
)) {
177 if (sirfport
->tx_dma_state
== TX_DMA_RUNNING
) {
178 dmaengine_pause(sirfport
->tx_dma_chan
);
179 sirfport
->tx_dma_state
= TX_DMA_PAUSE
;
181 if (!sirfport
->is_marco
)
182 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
183 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
184 ~uint_en
->sirfsoc_txfifo_empty_en
);
186 wr_regl(port
, SIRFUART_INT_EN_CLR
,
187 uint_en
->sirfsoc_txfifo_empty_en
);
190 if (!sirfport
->is_marco
)
191 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
192 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
193 ~uint_en
->sirfsoc_txfifo_empty_en
);
195 wr_regl(port
, SIRFUART_INT_EN_CLR
,
196 uint_en
->sirfsoc_txfifo_empty_en
);
200 static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port
*sirfport
)
202 struct uart_port
*port
= &sirfport
->port
;
203 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
204 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
205 struct circ_buf
*xmit
= &port
->state
->xmit
;
206 unsigned long tran_size
;
207 unsigned long tran_start
;
208 unsigned long pio_tx_size
;
210 tran_size
= CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
);
211 tran_start
= (unsigned long)(xmit
->buf
+ xmit
->tail
);
212 if (uart_circ_empty(xmit
) || uart_tx_stopped(port
) ||
215 if (sirfport
->tx_dma_state
== TX_DMA_PAUSE
) {
216 dmaengine_resume(sirfport
->tx_dma_chan
);
219 if (sirfport
->tx_dma_state
== TX_DMA_RUNNING
)
221 if (!sirfport
->is_marco
)
222 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
223 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)&
224 ~(uint_en
->sirfsoc_txfifo_empty_en
));
226 wr_regl(port
, SIRFUART_INT_EN_CLR
,
227 uint_en
->sirfsoc_txfifo_empty_en
);
229 * DMA requires buffer address and buffer length are both aligned with
230 * 4 bytes, so we use PIO for
231 * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
232 * bytes, and move to DMA for the left part aligned with 4bytes
233 * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
234 * part first, move to PIO for the left 1~3 bytes
236 if (tran_size
< 4 || BYTES_TO_ALIGN(tran_start
)) {
237 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_STOP
);
238 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
,
239 rd_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
)|
241 if (BYTES_TO_ALIGN(tran_start
)) {
242 pio_tx_size
= sirfsoc_uart_pio_tx_chars(sirfport
,
243 BYTES_TO_ALIGN(tran_start
));
244 tran_size
-= pio_tx_size
;
247 sirfsoc_uart_pio_tx_chars(sirfport
, tran_size
);
248 if (!sirfport
->is_marco
)
249 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
250 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)|
251 uint_en
->sirfsoc_txfifo_empty_en
);
253 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
254 uint_en
->sirfsoc_txfifo_empty_en
);
255 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_START
);
257 /* tx transfer mode switch into dma mode */
258 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_STOP
);
259 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
,
260 rd_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
)&
262 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_START
);
265 sirfport
->tx_dma_addr
= dma_map_single(port
->dev
,
266 xmit
->buf
+ xmit
->tail
,
267 tran_size
, DMA_TO_DEVICE
);
268 sirfport
->tx_dma_desc
= dmaengine_prep_slave_single(
269 sirfport
->tx_dma_chan
, sirfport
->tx_dma_addr
,
270 tran_size
, DMA_MEM_TO_DEV
, DMA_PREP_INTERRUPT
);
271 if (!sirfport
->tx_dma_desc
) {
272 dev_err(port
->dev
, "DMA prep slave single fail\n");
275 sirfport
->tx_dma_desc
->callback
=
276 sirfsoc_uart_tx_dma_complete_callback
;
277 sirfport
->tx_dma_desc
->callback_param
= (void *)sirfport
;
278 sirfport
->transfer_size
= tran_size
;
280 dmaengine_submit(sirfport
->tx_dma_desc
);
281 dma_async_issue_pending(sirfport
->tx_dma_chan
);
282 sirfport
->tx_dma_state
= TX_DMA_RUNNING
;
286 static void sirfsoc_uart_start_tx(struct uart_port
*port
)
288 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
289 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
290 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
291 if (IS_DMA_CHAN_VALID(sirfport
->tx_dma_no
))
292 sirfsoc_uart_tx_with_dma(sirfport
);
294 sirfsoc_uart_pio_tx_chars(sirfport
, 1);
295 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_START
);
296 if (!sirfport
->is_marco
)
297 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
298 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)|
299 uint_en
->sirfsoc_txfifo_empty_en
);
301 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
302 uint_en
->sirfsoc_txfifo_empty_en
);
306 static void sirfsoc_uart_stop_rx(struct uart_port
*port
)
308 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
309 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
310 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
312 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
313 if (IS_DMA_CHAN_VALID(sirfport
->rx_dma_no
)) {
314 if (!sirfport
->is_marco
)
315 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
316 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
317 ~(SIRFUART_RX_DMA_INT_EN(port
, uint_en
) |
318 uint_en
->sirfsoc_rx_done_en
));
320 wr_regl(port
, SIRFUART_INT_EN_CLR
,
321 SIRFUART_RX_DMA_INT_EN(port
, uint_en
)|
322 uint_en
->sirfsoc_rx_done_en
);
323 dmaengine_terminate_all(sirfport
->rx_dma_chan
);
325 if (!sirfport
->is_marco
)
326 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
327 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)&
328 ~(SIRFUART_RX_IO_INT_EN(port
, uint_en
)));
330 wr_regl(port
, SIRFUART_INT_EN_CLR
,
331 SIRFUART_RX_IO_INT_EN(port
, uint_en
));
335 static void sirfsoc_uart_disable_ms(struct uart_port
*port
)
337 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
338 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
339 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
341 if (!sirfport
->hw_flow_ctrl
)
343 sirfport
->ms_enabled
= false;
344 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
345 wr_regl(port
, ureg
->sirfsoc_afc_ctrl
,
346 rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) & ~0x3FF);
347 if (!sirfport
->is_marco
)
348 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
349 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)&
350 ~uint_en
->sirfsoc_cts_en
);
352 wr_regl(port
, SIRFUART_INT_EN_CLR
,
353 uint_en
->sirfsoc_cts_en
);
355 disable_irq(gpio_to_irq(sirfport
->cts_gpio
));
358 static irqreturn_t
sirfsoc_uart_usp_cts_handler(int irq
, void *dev_id
)
360 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)dev_id
;
361 struct uart_port
*port
= &sirfport
->port
;
362 if (gpio_is_valid(sirfport
->cts_gpio
) && sirfport
->ms_enabled
)
363 uart_handle_cts_change(port
,
364 !gpio_get_value(sirfport
->cts_gpio
));
368 static void sirfsoc_uart_enable_ms(struct uart_port
*port
)
370 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
371 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
372 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
374 if (!sirfport
->hw_flow_ctrl
)
376 sirfport
->ms_enabled
= true;
377 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
378 wr_regl(port
, ureg
->sirfsoc_afc_ctrl
,
379 rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) |
380 SIRFUART_AFC_TX_EN
| SIRFUART_AFC_RX_EN
);
381 if (!sirfport
->is_marco
)
382 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
383 rd_regl(port
, ureg
->sirfsoc_int_en_reg
)
384 | uint_en
->sirfsoc_cts_en
);
386 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
387 uint_en
->sirfsoc_cts_en
);
389 enable_irq(gpio_to_irq(sirfport
->cts_gpio
));
392 static void sirfsoc_uart_break_ctl(struct uart_port
*port
, int break_state
)
394 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
395 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
396 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
397 unsigned long ulcon
= rd_regl(port
, ureg
->sirfsoc_line_ctrl
);
399 ulcon
|= SIRFUART_SET_BREAK
;
401 ulcon
&= ~SIRFUART_SET_BREAK
;
402 wr_regl(port
, ureg
->sirfsoc_line_ctrl
, ulcon
);
407 sirfsoc_uart_pio_rx_chars(struct uart_port
*port
, unsigned int max_rx_count
)
409 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
410 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
411 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
412 unsigned int ch
, rx_count
= 0;
413 struct tty_struct
*tty
;
414 tty
= tty_port_tty_get(&port
->state
->port
);
417 while (!(rd_regl(port
, ureg
->sirfsoc_rx_fifo_status
) &
418 ufifo_st
->ff_empty(port
->line
))) {
419 ch
= rd_regl(port
, ureg
->sirfsoc_rx_fifo_data
) |
421 if (unlikely(uart_handle_sysrq_char(port
, ch
)))
423 uart_insert_char(port
, 0, 0, ch
, TTY_NORMAL
);
425 if (rx_count
>= max_rx_count
)
429 sirfport
->rx_io_count
+= rx_count
;
430 port
->icount
.rx
+= rx_count
;
432 spin_unlock(&port
->lock
);
433 tty_flip_buffer_push(&port
->state
->port
);
434 spin_lock(&port
->lock
);
440 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port
*sirfport
, int count
)
442 struct uart_port
*port
= &sirfport
->port
;
443 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
444 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
445 struct circ_buf
*xmit
= &port
->state
->xmit
;
446 unsigned int num_tx
= 0;
447 while (!uart_circ_empty(xmit
) &&
448 !(rd_regl(port
, ureg
->sirfsoc_tx_fifo_status
) &
449 ufifo_st
->ff_full(port
->line
)) &&
451 wr_regl(port
, ureg
->sirfsoc_tx_fifo_data
,
452 xmit
->buf
[xmit
->tail
]);
453 xmit
->tail
= (xmit
->tail
+ 1) & (UART_XMIT_SIZE
- 1);
457 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
458 uart_write_wakeup(port
);
462 static void sirfsoc_uart_tx_dma_complete_callback(void *param
)
464 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)param
;
465 struct uart_port
*port
= &sirfport
->port
;
466 struct circ_buf
*xmit
= &port
->state
->xmit
;
469 xmit
->tail
= (xmit
->tail
+ sirfport
->transfer_size
) &
470 (UART_XMIT_SIZE
- 1);
471 port
->icount
.tx
+= sirfport
->transfer_size
;
472 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
473 uart_write_wakeup(port
);
474 if (sirfport
->tx_dma_addr
)
475 dma_unmap_single(port
->dev
, sirfport
->tx_dma_addr
,
476 sirfport
->transfer_size
, DMA_TO_DEVICE
);
477 spin_lock_irqsave(&sirfport
->tx_lock
, flags
);
478 sirfport
->tx_dma_state
= TX_DMA_IDLE
;
479 sirfsoc_uart_tx_with_dma(sirfport
);
480 spin_unlock_irqrestore(&sirfport
->tx_lock
, flags
);
483 static void sirfsoc_uart_insert_rx_buf_to_tty(
484 struct sirfsoc_uart_port
*sirfport
, int count
)
486 struct uart_port
*port
= &sirfport
->port
;
487 struct tty_port
*tport
= &port
->state
->port
;
490 inserted
= tty_insert_flip_string(tport
,
491 sirfport
->rx_dma_items
[sirfport
->rx_completed
].xmit
.buf
, count
);
492 port
->icount
.rx
+= inserted
;
493 tty_flip_buffer_push(tport
);
496 static void sirfsoc_rx_submit_one_dma_desc(struct uart_port
*port
, int index
)
498 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
500 sirfport
->rx_dma_items
[index
].xmit
.tail
=
501 sirfport
->rx_dma_items
[index
].xmit
.head
= 0;
502 sirfport
->rx_dma_items
[index
].desc
=
503 dmaengine_prep_slave_single(sirfport
->rx_dma_chan
,
504 sirfport
->rx_dma_items
[index
].dma_addr
, SIRFSOC_RX_DMA_BUF_SIZE
,
505 DMA_DEV_TO_MEM
, DMA_PREP_INTERRUPT
);
506 if (!sirfport
->rx_dma_items
[index
].desc
) {
507 dev_err(port
->dev
, "DMA slave single fail\n");
510 sirfport
->rx_dma_items
[index
].desc
->callback
=
511 sirfsoc_uart_rx_dma_complete_callback
;
512 sirfport
->rx_dma_items
[index
].desc
->callback_param
= sirfport
;
513 sirfport
->rx_dma_items
[index
].cookie
=
514 dmaengine_submit(sirfport
->rx_dma_items
[index
].desc
);
515 dma_async_issue_pending(sirfport
->rx_dma_chan
);
518 static void sirfsoc_rx_tmo_process_tl(unsigned long param
)
520 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)param
;
521 struct uart_port
*port
= &sirfport
->port
;
522 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
523 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
524 struct sirfsoc_int_status
*uint_st
= &sirfport
->uart_reg
->uart_int_st
;
527 struct dma_tx_state tx_state
;
529 spin_lock_irqsave(&sirfport
->rx_lock
, flags
);
530 while (DMA_COMPLETE
== dmaengine_tx_status(sirfport
->rx_dma_chan
,
531 sirfport
->rx_dma_items
[sirfport
->rx_completed
].cookie
, &tx_state
)) {
532 sirfsoc_uart_insert_rx_buf_to_tty(sirfport
,
533 SIRFSOC_RX_DMA_BUF_SIZE
);
534 sirfport
->rx_completed
++;
535 sirfport
->rx_completed
%= SIRFSOC_RX_LOOP_BUF_CNT
;
537 count
= CIRC_CNT(sirfport
->rx_dma_items
[sirfport
->rx_issued
].xmit
.head
,
538 sirfport
->rx_dma_items
[sirfport
->rx_issued
].xmit
.tail
,
539 SIRFSOC_RX_DMA_BUF_SIZE
);
541 sirfsoc_uart_insert_rx_buf_to_tty(sirfport
, count
);
542 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
543 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) |
545 sirfsoc_uart_pio_rx_chars(port
, 4 - sirfport
->rx_io_count
);
546 spin_unlock_irqrestore(&sirfport
->rx_lock
, flags
);
547 if (sirfport
->rx_io_count
== 4) {
548 spin_lock_irqsave(&sirfport
->rx_lock
, flags
);
549 sirfport
->rx_io_count
= 0;
550 wr_regl(port
, ureg
->sirfsoc_int_st_reg
,
551 uint_st
->sirfsoc_rx_done
);
552 if (!sirfport
->is_marco
)
553 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
554 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
555 ~(uint_en
->sirfsoc_rx_done_en
));
557 wr_regl(port
, SIRFUART_INT_EN_CLR
,
558 uint_en
->sirfsoc_rx_done_en
);
559 spin_unlock_irqrestore(&sirfport
->rx_lock
, flags
);
561 sirfsoc_uart_start_next_rx_dma(port
);
563 spin_lock_irqsave(&sirfport
->rx_lock
, flags
);
564 wr_regl(port
, ureg
->sirfsoc_int_st_reg
,
565 uint_st
->sirfsoc_rx_done
);
566 if (!sirfport
->is_marco
)
567 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
568 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) |
569 (uint_en
->sirfsoc_rx_done_en
));
571 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
572 uint_en
->sirfsoc_rx_done_en
);
573 spin_unlock_irqrestore(&sirfport
->rx_lock
, flags
);
577 static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port
*sirfport
)
579 struct uart_port
*port
= &sirfport
->port
;
580 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
581 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
582 struct dma_tx_state tx_state
;
583 spin_lock(&sirfport
->rx_lock
);
585 dmaengine_tx_status(sirfport
->rx_dma_chan
,
586 sirfport
->rx_dma_items
[sirfport
->rx_issued
].cookie
, &tx_state
);
587 dmaengine_terminate_all(sirfport
->rx_dma_chan
);
588 sirfport
->rx_dma_items
[sirfport
->rx_issued
].xmit
.head
=
589 SIRFSOC_RX_DMA_BUF_SIZE
- tx_state
.residue
;
590 if (!sirfport
->is_marco
)
591 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
592 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
593 ~(uint_en
->sirfsoc_rx_timeout_en
));
595 wr_regl(port
, SIRFUART_INT_EN_CLR
,
596 uint_en
->sirfsoc_rx_timeout_en
);
597 spin_unlock(&sirfport
->rx_lock
);
598 tasklet_schedule(&sirfport
->rx_tmo_process_tasklet
);
601 static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port
*sirfport
)
603 struct uart_port
*port
= &sirfport
->port
;
604 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
605 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
606 struct sirfsoc_int_status
*uint_st
= &sirfport
->uart_reg
->uart_int_st
;
608 sirfsoc_uart_pio_rx_chars(port
, 4 - sirfport
->rx_io_count
);
609 if (sirfport
->rx_io_count
== 4) {
610 sirfport
->rx_io_count
= 0;
611 if (!sirfport
->is_marco
)
612 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
613 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
614 ~(uint_en
->sirfsoc_rx_done_en
));
616 wr_regl(port
, SIRFUART_INT_EN_CLR
,
617 uint_en
->sirfsoc_rx_done_en
);
618 wr_regl(port
, ureg
->sirfsoc_int_st_reg
,
619 uint_st
->sirfsoc_rx_timeout
);
620 sirfsoc_uart_start_next_rx_dma(port
);
624 static irqreturn_t
sirfsoc_uart_isr(int irq
, void *dev_id
)
626 unsigned long intr_status
;
627 unsigned long cts_status
;
628 unsigned long flag
= TTY_NORMAL
;
629 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)dev_id
;
630 struct uart_port
*port
= &sirfport
->port
;
631 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
632 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
633 struct sirfsoc_int_status
*uint_st
= &sirfport
->uart_reg
->uart_int_st
;
634 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
635 struct uart_state
*state
= port
->state
;
636 struct circ_buf
*xmit
= &port
->state
->xmit
;
637 spin_lock(&port
->lock
);
638 intr_status
= rd_regl(port
, ureg
->sirfsoc_int_st_reg
);
639 wr_regl(port
, ureg
->sirfsoc_int_st_reg
, intr_status
);
640 intr_status
&= rd_regl(port
, ureg
->sirfsoc_int_en_reg
);
641 if (unlikely(intr_status
& (SIRFUART_ERR_INT_STAT(port
, uint_st
)))) {
642 if (intr_status
& uint_st
->sirfsoc_rxd_brk
) {
644 if (uart_handle_break(port
))
647 if (intr_status
& uint_st
->sirfsoc_rx_oflow
)
648 port
->icount
.overrun
++;
649 if (intr_status
& uint_st
->sirfsoc_frm_err
) {
650 port
->icount
.frame
++;
653 if (intr_status
& uint_st
->sirfsoc_parity_err
)
655 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_RESET
);
656 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
657 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_START
);
658 intr_status
&= port
->read_status_mask
;
659 uart_insert_char(port
, intr_status
,
660 uint_en
->sirfsoc_rx_oflow_en
, 0, flag
);
661 tty_flip_buffer_push(&state
->port
);
664 if ((sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) &&
665 (intr_status
& SIRFUART_CTS_INT_ST(uint_st
)) &&
666 !sirfport
->tx_dma_state
) {
667 cts_status
= rd_regl(port
, ureg
->sirfsoc_afc_ctrl
) &
668 SIRFUART_AFC_CTS_STATUS
;
673 uart_handle_cts_change(port
, cts_status
);
674 wake_up_interruptible(&state
->port
.delta_msr_wait
);
676 if (IS_DMA_CHAN_VALID(sirfport
->rx_dma_no
)) {
677 if (intr_status
& uint_st
->sirfsoc_rx_timeout
)
678 sirfsoc_uart_handle_rx_tmo(sirfport
);
679 if (intr_status
& uint_st
->sirfsoc_rx_done
)
680 sirfsoc_uart_handle_rx_done(sirfport
);
682 if (intr_status
& SIRFUART_RX_IO_INT_ST(uint_st
))
683 sirfsoc_uart_pio_rx_chars(port
,
684 SIRFSOC_UART_IO_RX_MAX_CNT
);
686 if (intr_status
& uint_st
->sirfsoc_txfifo_empty
) {
687 if (IS_DMA_CHAN_VALID(sirfport
->tx_dma_no
))
688 sirfsoc_uart_tx_with_dma(sirfport
);
690 if (uart_circ_empty(xmit
) || uart_tx_stopped(port
)) {
691 spin_unlock(&port
->lock
);
694 sirfsoc_uart_pio_tx_chars(sirfport
,
695 SIRFSOC_UART_IO_TX_REASONABLE_CNT
);
696 if ((uart_circ_empty(xmit
)) &&
697 (rd_regl(port
, ureg
->sirfsoc_tx_fifo_status
) &
698 ufifo_st
->ff_empty(port
->line
)))
699 sirfsoc_uart_stop_tx(port
);
703 spin_unlock(&port
->lock
);
707 static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param
)
709 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)param
;
710 struct uart_port
*port
= &sirfport
->port
;
711 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
712 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
714 struct dma_tx_state tx_state
;
715 spin_lock_irqsave(&sirfport
->rx_lock
, flags
);
716 while (DMA_COMPLETE
== dmaengine_tx_status(sirfport
->rx_dma_chan
,
717 sirfport
->rx_dma_items
[sirfport
->rx_completed
].cookie
, &tx_state
)) {
718 sirfsoc_uart_insert_rx_buf_to_tty(sirfport
,
719 SIRFSOC_RX_DMA_BUF_SIZE
);
720 if (rd_regl(port
, ureg
->sirfsoc_int_en_reg
) &
721 uint_en
->sirfsoc_rx_timeout_en
)
722 sirfsoc_rx_submit_one_dma_desc(port
,
723 sirfport
->rx_completed
++);
725 sirfport
->rx_completed
++;
726 sirfport
->rx_completed
%= SIRFSOC_RX_LOOP_BUF_CNT
;
728 spin_unlock_irqrestore(&sirfport
->rx_lock
, flags
);
731 static void sirfsoc_uart_rx_dma_complete_callback(void *param
)
733 struct sirfsoc_uart_port
*sirfport
= (struct sirfsoc_uart_port
*)param
;
734 spin_lock(&sirfport
->rx_lock
);
735 sirfport
->rx_issued
++;
736 sirfport
->rx_issued
%= SIRFSOC_RX_LOOP_BUF_CNT
;
737 spin_unlock(&sirfport
->rx_lock
);
738 tasklet_schedule(&sirfport
->rx_dma_complete_tasklet
);
741 /* submit rx dma task into dmaengine */
742 static void sirfsoc_uart_start_next_rx_dma(struct uart_port
*port
)
744 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
745 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
746 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
749 spin_lock_irqsave(&sirfport
->rx_lock
, flags
);
750 sirfport
->rx_io_count
= 0;
751 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
752 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) &
754 spin_unlock_irqrestore(&sirfport
->rx_lock
, flags
);
755 for (i
= 0; i
< SIRFSOC_RX_LOOP_BUF_CNT
; i
++)
756 sirfsoc_rx_submit_one_dma_desc(port
, i
);
757 sirfport
->rx_completed
= sirfport
->rx_issued
= 0;
758 spin_lock_irqsave(&sirfport
->rx_lock
, flags
);
759 if (!sirfport
->is_marco
)
760 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
761 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) |
762 SIRFUART_RX_DMA_INT_EN(port
, uint_en
));
764 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
765 SIRFUART_RX_DMA_INT_EN(port
, uint_en
));
766 spin_unlock_irqrestore(&sirfport
->rx_lock
, flags
);
769 static void sirfsoc_uart_start_rx(struct uart_port
*port
)
771 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
772 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
773 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
775 sirfport
->rx_io_count
= 0;
776 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_RESET
);
777 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
778 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_START
);
779 if (IS_DMA_CHAN_VALID(sirfport
->rx_dma_no
))
780 sirfsoc_uart_start_next_rx_dma(port
);
782 if (!sirfport
->is_marco
)
783 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
784 rd_regl(port
, ureg
->sirfsoc_int_en_reg
) |
785 SIRFUART_RX_IO_INT_EN(port
, uint_en
));
787 wr_regl(port
, ureg
->sirfsoc_int_en_reg
,
788 SIRFUART_RX_IO_INT_EN(port
, uint_en
));
793 sirfsoc_usp_calc_sample_div(unsigned long set_rate
,
794 unsigned long ioclk_rate
, unsigned long *sample_reg
)
796 unsigned long min_delta
= ~0UL;
797 unsigned short sample_div
;
798 unsigned long ioclk_div
= 0;
799 unsigned long temp_delta
;
801 for (sample_div
= SIRF_MIN_SAMPLE_DIV
;
802 sample_div
<= SIRF_MAX_SAMPLE_DIV
; sample_div
++) {
803 temp_delta
= ioclk_rate
-
804 (ioclk_rate
+ (set_rate
* sample_div
) / 2)
805 / (set_rate
* sample_div
) * set_rate
* sample_div
;
807 temp_delta
= (temp_delta
> 0) ? temp_delta
: -temp_delta
;
808 if (temp_delta
< min_delta
) {
809 ioclk_div
= (2 * ioclk_rate
/
810 (set_rate
* sample_div
) + 1) / 2 - 1;
811 if (ioclk_div
> SIRF_IOCLK_DIV_MAX
)
813 min_delta
= temp_delta
;
814 *sample_reg
= sample_div
;
823 sirfsoc_uart_calc_sample_div(unsigned long baud_rate
,
824 unsigned long ioclk_rate
, unsigned long *set_baud
)
826 unsigned long min_delta
= ~0UL;
827 unsigned short sample_div
;
828 unsigned int regv
= 0;
829 unsigned long ioclk_div
;
830 unsigned long baud_tmp
;
833 for (sample_div
= SIRF_MIN_SAMPLE_DIV
;
834 sample_div
<= SIRF_MAX_SAMPLE_DIV
; sample_div
++) {
835 ioclk_div
= (ioclk_rate
/ (baud_rate
* (sample_div
+ 1))) - 1;
836 if (ioclk_div
> SIRF_IOCLK_DIV_MAX
)
838 baud_tmp
= ioclk_rate
/ ((ioclk_div
+ 1) * (sample_div
+ 1));
839 temp_delta
= baud_tmp
- baud_rate
;
840 temp_delta
= (temp_delta
> 0) ? temp_delta
: -temp_delta
;
841 if (temp_delta
< min_delta
) {
842 regv
= regv
& (~SIRF_IOCLK_DIV_MASK
);
843 regv
= regv
| ioclk_div
;
844 regv
= regv
& (~SIRF_SAMPLE_DIV_MASK
);
845 regv
= regv
| (sample_div
<< SIRF_SAMPLE_DIV_SHIFT
);
846 min_delta
= temp_delta
;
847 *set_baud
= baud_tmp
;
853 static void sirfsoc_uart_set_termios(struct uart_port
*port
,
854 struct ktermios
*termios
,
855 struct ktermios
*old
)
857 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
858 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
859 struct sirfsoc_int_en
*uint_en
= &sirfport
->uart_reg
->uart_int_en
;
860 unsigned long config_reg
= 0;
861 unsigned long baud_rate
;
862 unsigned long set_baud
;
865 unsigned int clk_div_reg
= 0;
866 unsigned long txfifo_op_reg
, ioclk_rate
;
867 unsigned long rx_time_out
;
869 u32 data_bit_len
, stop_bit_len
, len_val
;
870 unsigned long sample_div_reg
= 0xf;
871 ioclk_rate
= port
->uartclk
;
873 switch (termios
->c_cflag
& CSIZE
) {
877 config_reg
|= SIRFUART_DATA_BIT_LEN_8
;
881 config_reg
|= SIRFUART_DATA_BIT_LEN_7
;
885 config_reg
|= SIRFUART_DATA_BIT_LEN_6
;
889 config_reg
|= SIRFUART_DATA_BIT_LEN_5
;
892 if (termios
->c_cflag
& CSTOPB
) {
893 config_reg
|= SIRFUART_STOP_BIT_LEN_2
;
898 spin_lock_irqsave(&port
->lock
, flags
);
899 port
->read_status_mask
= uint_en
->sirfsoc_rx_oflow_en
;
900 port
->ignore_status_mask
= 0;
901 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
902 if (termios
->c_iflag
& INPCK
)
903 port
->read_status_mask
|= uint_en
->sirfsoc_frm_err_en
|
904 uint_en
->sirfsoc_parity_err_en
;
906 if (termios
->c_iflag
& INPCK
)
907 port
->read_status_mask
|= uint_en
->sirfsoc_frm_err_en
;
909 if (termios
->c_iflag
& (BRKINT
| PARMRK
))
910 port
->read_status_mask
|= uint_en
->sirfsoc_rxd_brk_en
;
911 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
912 if (termios
->c_iflag
& IGNPAR
)
913 port
->ignore_status_mask
|=
914 uint_en
->sirfsoc_frm_err_en
|
915 uint_en
->sirfsoc_parity_err_en
;
916 if (termios
->c_cflag
& PARENB
) {
917 if (termios
->c_cflag
& CMSPAR
) {
918 if (termios
->c_cflag
& PARODD
)
919 config_reg
|= SIRFUART_STICK_BIT_MARK
;
921 config_reg
|= SIRFUART_STICK_BIT_SPACE
;
922 } else if (termios
->c_cflag
& PARODD
) {
923 config_reg
|= SIRFUART_STICK_BIT_ODD
;
925 config_reg
|= SIRFUART_STICK_BIT_EVEN
;
929 if (termios
->c_iflag
& IGNPAR
)
930 port
->ignore_status_mask
|=
931 uint_en
->sirfsoc_frm_err_en
;
932 if (termios
->c_cflag
& PARENB
)
934 "USP-UART not support parity err\n");
936 if (termios
->c_iflag
& IGNBRK
) {
937 port
->ignore_status_mask
|=
938 uint_en
->sirfsoc_rxd_brk_en
;
939 if (termios
->c_iflag
& IGNPAR
)
940 port
->ignore_status_mask
|=
941 uint_en
->sirfsoc_rx_oflow_en
;
943 if ((termios
->c_cflag
& CREAD
) == 0)
944 port
->ignore_status_mask
|= SIRFUART_DUMMY_READ
;
945 /* Hardware Flow Control Settings */
946 if (UART_ENABLE_MS(port
, termios
->c_cflag
)) {
947 if (!sirfport
->ms_enabled
)
948 sirfsoc_uart_enable_ms(port
);
950 if (sirfport
->ms_enabled
)
951 sirfsoc_uart_disable_ms(port
);
953 baud_rate
= uart_get_baud_rate(port
, termios
, old
, 0, 4000000);
954 if (ioclk_rate
== 150000000) {
955 for (ic
= 0; ic
< SIRF_BAUD_RATE_SUPPORT_NR
; ic
++)
956 if (baud_rate
== baudrate_to_regv
[ic
].baud_rate
)
957 clk_div_reg
= baudrate_to_regv
[ic
].reg_val
;
959 set_baud
= baud_rate
;
960 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
961 if (unlikely(clk_div_reg
== 0))
962 clk_div_reg
= sirfsoc_uart_calc_sample_div(baud_rate
,
963 ioclk_rate
, &set_baud
);
964 wr_regl(port
, ureg
->sirfsoc_divisor
, clk_div_reg
);
966 clk_div_reg
= sirfsoc_usp_calc_sample_div(baud_rate
,
967 ioclk_rate
, &sample_div_reg
);
969 set_baud
= ((ioclk_rate
/ (clk_div_reg
+1) - 1) /
970 (sample_div_reg
+ 1));
971 /* setting usp mode 2 */
972 len_val
= ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET
) |
973 (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET
));
974 len_val
|= ((clk_div_reg
& SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK
)
975 << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET
);
976 wr_regl(port
, ureg
->sirfsoc_mode2
, len_val
);
978 if (tty_termios_baud_rate(termios
))
979 tty_termios_encode_baud_rate(termios
, set_baud
, set_baud
);
980 /* set receive timeout && data bits len */
981 rx_time_out
= SIRFSOC_UART_RX_TIMEOUT(set_baud
, 20000);
982 rx_time_out
= SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out
);
983 txfifo_op_reg
= rd_regl(port
, ureg
->sirfsoc_tx_fifo_op
);
984 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_STOP
);
985 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
,
986 (txfifo_op_reg
& ~SIRFUART_FIFO_START
));
987 if (sirfport
->uart_reg
->uart_type
== SIRF_REAL_UART
) {
988 config_reg
|= SIRFUART_RECV_TIMEOUT(port
, rx_time_out
);
989 wr_regl(port
, ureg
->sirfsoc_line_ctrl
, config_reg
);
992 len_val
= (data_bit_len
- 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET
;
993 len_val
|= (data_bit_len
+ 1 + stop_bit_len
- 1) <<
994 SIRFSOC_USP_TX_FRAME_LEN_OFFSET
;
995 len_val
|= ((data_bit_len
- 1) <<
996 SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET
);
997 len_val
|= (((clk_div_reg
& 0xc00) >> 10) <<
998 SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET
);
999 wr_regl(port
, ureg
->sirfsoc_tx_frame_ctrl
, len_val
);
1001 len_val
= (data_bit_len
- 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET
;
1002 len_val
|= (data_bit_len
+ 1 + stop_bit_len
- 1) <<
1003 SIRFSOC_USP_RX_FRAME_LEN_OFFSET
;
1004 len_val
|= (data_bit_len
- 1) <<
1005 SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET
;
1006 len_val
|= (((clk_div_reg
& 0xf000) >> 12) <<
1007 SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET
);
1008 wr_regl(port
, ureg
->sirfsoc_rx_frame_ctrl
, len_val
);
1010 wr_regl(port
, ureg
->sirfsoc_async_param_reg
,
1011 (SIRFUART_RECV_TIMEOUT(port
, rx_time_out
)) |
1012 (sample_div_reg
& SIRFSOC_USP_ASYNC_DIV2_MASK
) <<
1013 SIRFSOC_USP_ASYNC_DIV2_OFFSET
);
1015 if (IS_DMA_CHAN_VALID(sirfport
->tx_dma_no
))
1016 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
, SIRFUART_DMA_MODE
);
1018 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
, SIRFUART_IO_MODE
);
1019 if (IS_DMA_CHAN_VALID(sirfport
->rx_dma_no
))
1020 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
, SIRFUART_DMA_MODE
);
1022 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
, SIRFUART_IO_MODE
);
1023 /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
1024 if (set_baud
< 1000000)
1028 wr_regl(port
, ureg
->sirfsoc_tx_fifo_ctrl
,
1029 SIRFUART_FIFO_THD(port
) / threshold_div
);
1030 wr_regl(port
, ureg
->sirfsoc_rx_fifo_ctrl
,
1031 SIRFUART_FIFO_THD(port
) / threshold_div
);
1032 txfifo_op_reg
|= SIRFUART_FIFO_START
;
1033 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, txfifo_op_reg
);
1034 uart_update_timeout(port
, termios
->c_cflag
, set_baud
);
1035 sirfsoc_uart_start_rx(port
);
1036 wr_regl(port
, ureg
->sirfsoc_tx_rx_en
, SIRFUART_TX_EN
| SIRFUART_RX_EN
);
1037 spin_unlock_irqrestore(&port
->lock
, flags
);
1040 static void sirfsoc_uart_pm(struct uart_port
*port
, unsigned int state
,
1041 unsigned int oldstate
)
1043 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1045 clk_prepare_enable(sirfport
->clk
);
1047 clk_disable_unprepare(sirfport
->clk
);
1050 static unsigned int sirfsoc_uart_init_tx_dma(struct uart_port
*port
)
1052 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1053 dma_cap_mask_t dma_mask
;
1054 struct dma_slave_config tx_slv_cfg
= {
1058 dma_cap_zero(dma_mask
);
1059 dma_cap_set(DMA_SLAVE
, dma_mask
);
1060 sirfport
->tx_dma_chan
= dma_request_channel(dma_mask
,
1061 (dma_filter_fn
)sirfsoc_dma_filter_id
,
1062 (void *)sirfport
->tx_dma_no
);
1063 if (!sirfport
->tx_dma_chan
) {
1064 dev_err(port
->dev
, "Uart Request Dma Channel Fail %d\n",
1065 sirfport
->tx_dma_no
);
1066 return -EPROBE_DEFER
;
1068 dmaengine_slave_config(sirfport
->tx_dma_chan
, &tx_slv_cfg
);
1073 static unsigned int sirfsoc_uart_init_rx_dma(struct uart_port
*port
)
1075 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1076 dma_cap_mask_t dma_mask
;
1079 struct dma_slave_config slv_cfg
= {
1083 dma_cap_zero(dma_mask
);
1084 dma_cap_set(DMA_SLAVE
, dma_mask
);
1085 sirfport
->rx_dma_chan
= dma_request_channel(dma_mask
,
1086 (dma_filter_fn
)sirfsoc_dma_filter_id
,
1087 (void *)sirfport
->rx_dma_no
);
1088 if (!sirfport
->rx_dma_chan
) {
1089 dev_err(port
->dev
, "Uart Request Dma Channel Fail %d\n",
1090 sirfport
->rx_dma_no
);
1091 ret
= -EPROBE_DEFER
;
1094 for (i
= 0; i
< SIRFSOC_RX_LOOP_BUF_CNT
; i
++) {
1095 sirfport
->rx_dma_items
[i
].xmit
.buf
=
1096 dma_alloc_coherent(port
->dev
, SIRFSOC_RX_DMA_BUF_SIZE
,
1097 &sirfport
->rx_dma_items
[i
].dma_addr
, GFP_KERNEL
);
1098 if (!sirfport
->rx_dma_items
[i
].xmit
.buf
) {
1099 dev_err(port
->dev
, "Uart alloc bufa failed\n");
1101 goto alloc_coherent_err
;
1103 sirfport
->rx_dma_items
[i
].xmit
.head
=
1104 sirfport
->rx_dma_items
[i
].xmit
.tail
= 0;
1106 dmaengine_slave_config(sirfport
->rx_dma_chan
, &slv_cfg
);
1110 for (j
= 0; j
< i
; j
++)
1111 dma_free_coherent(port
->dev
, SIRFSOC_RX_DMA_BUF_SIZE
,
1112 sirfport
->rx_dma_items
[j
].xmit
.buf
,
1113 sirfport
->rx_dma_items
[j
].dma_addr
);
1114 dma_release_channel(sirfport
->rx_dma_chan
);
1119 static void sirfsoc_uart_uninit_tx_dma(struct sirfsoc_uart_port
*sirfport
)
1121 dmaengine_terminate_all(sirfport
->tx_dma_chan
);
1122 dma_release_channel(sirfport
->tx_dma_chan
);
1125 static void sirfsoc_uart_uninit_rx_dma(struct sirfsoc_uart_port
*sirfport
)
1128 struct uart_port
*port
= &sirfport
->port
;
1129 dmaengine_terminate_all(sirfport
->rx_dma_chan
);
1130 dma_release_channel(sirfport
->rx_dma_chan
);
1131 for (i
= 0; i
< SIRFSOC_RX_LOOP_BUF_CNT
; i
++)
1132 dma_free_coherent(port
->dev
, SIRFSOC_RX_DMA_BUF_SIZE
,
1133 sirfport
->rx_dma_items
[i
].xmit
.buf
,
1134 sirfport
->rx_dma_items
[i
].dma_addr
);
1137 static int sirfsoc_uart_startup(struct uart_port
*port
)
1139 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1140 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
1141 unsigned int index
= port
->line
;
1143 set_irq_flags(port
->irq
, IRQF_VALID
| IRQF_NOAUTOEN
);
1144 ret
= request_irq(port
->irq
,
1150 dev_err(port
->dev
, "UART%d request IRQ line (%d) failed.\n",
1155 /* initial hardware settings */
1156 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
,
1157 rd_regl(port
, ureg
->sirfsoc_tx_dma_io_ctrl
) |
1159 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
,
1160 rd_regl(port
, ureg
->sirfsoc_rx_dma_io_ctrl
) |
1162 wr_regl(port
, ureg
->sirfsoc_tx_dma_io_len
, 0);
1163 wr_regl(port
, ureg
->sirfsoc_rx_dma_io_len
, 0);
1164 wr_regl(port
, ureg
->sirfsoc_tx_rx_en
, SIRFUART_RX_EN
| SIRFUART_TX_EN
);
1165 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
)
1166 wr_regl(port
, ureg
->sirfsoc_mode1
,
1167 SIRFSOC_USP_ENDIAN_CTRL_LSBF
|
1169 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, SIRFUART_FIFO_RESET
);
1170 wr_regl(port
, ureg
->sirfsoc_tx_fifo_op
, 0);
1171 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, SIRFUART_FIFO_RESET
);
1172 wr_regl(port
, ureg
->sirfsoc_rx_fifo_op
, 0);
1173 wr_regl(port
, ureg
->sirfsoc_tx_fifo_ctrl
, SIRFUART_FIFO_THD(port
));
1174 wr_regl(port
, ureg
->sirfsoc_rx_fifo_ctrl
, SIRFUART_FIFO_THD(port
));
1176 if (IS_DMA_CHAN_VALID(sirfport
->rx_dma_no
)) {
1177 ret
= sirfsoc_uart_init_rx_dma(port
);
1180 wr_regl(port
, ureg
->sirfsoc_rx_fifo_level_chk
,
1181 SIRFUART_RX_FIFO_CHK_SC(port
->line
, 0x4) |
1182 SIRFUART_RX_FIFO_CHK_LC(port
->line
, 0xe) |
1183 SIRFUART_RX_FIFO_CHK_HC(port
->line
, 0x1b));
1185 if (IS_DMA_CHAN_VALID(sirfport
->tx_dma_no
)) {
1186 sirfsoc_uart_init_tx_dma(port
);
1187 sirfport
->tx_dma_state
= TX_DMA_IDLE
;
1188 wr_regl(port
, ureg
->sirfsoc_tx_fifo_level_chk
,
1189 SIRFUART_TX_FIFO_CHK_SC(port
->line
, 0x1b) |
1190 SIRFUART_TX_FIFO_CHK_LC(port
->line
, 0xe) |
1191 SIRFUART_TX_FIFO_CHK_HC(port
->line
, 0x4));
1193 sirfport
->ms_enabled
= false;
1194 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
&&
1195 sirfport
->hw_flow_ctrl
) {
1196 set_irq_flags(gpio_to_irq(sirfport
->cts_gpio
),
1197 IRQF_VALID
| IRQF_NOAUTOEN
);
1198 ret
= request_irq(gpio_to_irq(sirfport
->cts_gpio
),
1199 sirfsoc_uart_usp_cts_handler
, IRQF_TRIGGER_FALLING
|
1200 IRQF_TRIGGER_RISING
, "usp_cts_irq", sirfport
);
1202 dev_err(port
->dev
, "UART-USP:request gpio irq fail\n");
1207 enable_irq(port
->irq
);
1211 free_irq(port
->irq
, sirfport
);
1216 static void sirfsoc_uart_shutdown(struct uart_port
*port
)
1218 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1219 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
1220 if (!sirfport
->is_marco
)
1221 wr_regl(port
, ureg
->sirfsoc_int_en_reg
, 0);
1223 wr_regl(port
, SIRFUART_INT_EN_CLR
, ~0UL);
1225 free_irq(port
->irq
, sirfport
);
1226 if (sirfport
->ms_enabled
)
1227 sirfsoc_uart_disable_ms(port
);
1228 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
&&
1229 sirfport
->hw_flow_ctrl
) {
1230 gpio_set_value(sirfport
->rts_gpio
, 1);
1231 free_irq(gpio_to_irq(sirfport
->cts_gpio
), sirfport
);
1233 if (IS_DMA_CHAN_VALID(sirfport
->rx_dma_no
))
1234 sirfsoc_uart_uninit_rx_dma(sirfport
);
1235 if (IS_DMA_CHAN_VALID(sirfport
->tx_dma_no
)) {
1236 sirfsoc_uart_uninit_tx_dma(sirfport
);
1237 sirfport
->tx_dma_state
= TX_DMA_IDLE
;
1241 static const char *sirfsoc_uart_type(struct uart_port
*port
)
1243 return port
->type
== SIRFSOC_PORT_TYPE
? SIRFUART_PORT_NAME
: NULL
;
1246 static int sirfsoc_uart_request_port(struct uart_port
*port
)
1248 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1249 struct sirfsoc_uart_param
*uart_param
= &sirfport
->uart_reg
->uart_param
;
1251 ret
= request_mem_region(port
->mapbase
,
1252 SIRFUART_MAP_SIZE
, uart_param
->port_name
);
1253 return ret
? 0 : -EBUSY
;
1256 static void sirfsoc_uart_release_port(struct uart_port
*port
)
1258 release_mem_region(port
->mapbase
, SIRFUART_MAP_SIZE
);
1261 static void sirfsoc_uart_config_port(struct uart_port
*port
, int flags
)
1263 if (flags
& UART_CONFIG_TYPE
) {
1264 port
->type
= SIRFSOC_PORT_TYPE
;
1265 sirfsoc_uart_request_port(port
);
1269 static struct uart_ops sirfsoc_uart_ops
= {
1270 .tx_empty
= sirfsoc_uart_tx_empty
,
1271 .get_mctrl
= sirfsoc_uart_get_mctrl
,
1272 .set_mctrl
= sirfsoc_uart_set_mctrl
,
1273 .stop_tx
= sirfsoc_uart_stop_tx
,
1274 .start_tx
= sirfsoc_uart_start_tx
,
1275 .stop_rx
= sirfsoc_uart_stop_rx
,
1276 .enable_ms
= sirfsoc_uart_enable_ms
,
1277 .break_ctl
= sirfsoc_uart_break_ctl
,
1278 .startup
= sirfsoc_uart_startup
,
1279 .shutdown
= sirfsoc_uart_shutdown
,
1280 .set_termios
= sirfsoc_uart_set_termios
,
1281 .pm
= sirfsoc_uart_pm
,
1282 .type
= sirfsoc_uart_type
,
1283 .release_port
= sirfsoc_uart_release_port
,
1284 .request_port
= sirfsoc_uart_request_port
,
1285 .config_port
= sirfsoc_uart_config_port
,
1288 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1290 sirfsoc_uart_console_setup(struct console
*co
, char *options
)
1292 unsigned int baud
= 115200;
1293 unsigned int bits
= 8;
1294 unsigned int parity
= 'n';
1295 unsigned int flow
= 'n';
1296 struct uart_port
*port
= &sirfsoc_uart_ports
[co
->index
].port
;
1297 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1298 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
1299 if (co
->index
< 0 || co
->index
>= SIRFSOC_UART_NR
)
1305 /* enable usp in mode1 register */
1306 if (sirfport
->uart_reg
->uart_type
== SIRF_USP_UART
)
1307 wr_regl(port
, ureg
->sirfsoc_mode1
, SIRFSOC_USP_EN
|
1308 SIRFSOC_USP_ENDIAN_CTRL_LSBF
);
1310 uart_parse_options(options
, &baud
, &parity
, &bits
, &flow
);
1313 /* default console tx/rx transfer using io mode */
1314 sirfport
->rx_dma_no
= UNVALID_DMA_CHAN
;
1315 sirfport
->tx_dma_no
= UNVALID_DMA_CHAN
;
1316 return uart_set_options(port
, co
, baud
, parity
, bits
, flow
);
1319 static void sirfsoc_uart_console_putchar(struct uart_port
*port
, int ch
)
1321 struct sirfsoc_uart_port
*sirfport
= to_sirfport(port
);
1322 struct sirfsoc_register
*ureg
= &sirfport
->uart_reg
->uart_reg
;
1323 struct sirfsoc_fifo_status
*ufifo_st
= &sirfport
->uart_reg
->fifo_status
;
1324 while (rd_regl(port
,
1325 ureg
->sirfsoc_tx_fifo_status
) & ufifo_st
->ff_full(port
->line
))
1327 wr_regb(port
, ureg
->sirfsoc_tx_fifo_data
, ch
);
1330 static void sirfsoc_uart_console_write(struct console
*co
, const char *s
,
1333 struct uart_port
*port
= &sirfsoc_uart_ports
[co
->index
].port
;
1334 uart_console_write(port
, s
, count
, sirfsoc_uart_console_putchar
);
1337 static struct console sirfsoc_uart_console
= {
1338 .name
= SIRFSOC_UART_NAME
,
1339 .device
= uart_console_device
,
1340 .flags
= CON_PRINTBUFFER
,
1342 .write
= sirfsoc_uart_console_write
,
1343 .setup
= sirfsoc_uart_console_setup
,
1344 .data
= &sirfsoc_uart_drv
,
1347 static int __init
sirfsoc_uart_console_init(void)
1349 register_console(&sirfsoc_uart_console
);
1352 console_initcall(sirfsoc_uart_console_init
);
1355 static struct uart_driver sirfsoc_uart_drv
= {
1356 .owner
= THIS_MODULE
,
1357 .driver_name
= SIRFUART_PORT_NAME
,
1358 .nr
= SIRFSOC_UART_NR
,
1359 .dev_name
= SIRFSOC_UART_NAME
,
1360 .major
= SIRFSOC_UART_MAJOR
,
1361 .minor
= SIRFSOC_UART_MINOR
,
1362 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1363 .cons
= &sirfsoc_uart_console
,
1369 static struct of_device_id sirfsoc_uart_ids
[] = {
1370 { .compatible
= "sirf,prima2-uart", .data
= &sirfsoc_uart
,},
1371 { .compatible
= "sirf,marco-uart", .data
= &sirfsoc_uart
},
1372 { .compatible
= "sirf,prima2-usp-uart", .data
= &sirfsoc_usp
},
1375 MODULE_DEVICE_TABLE(of
, sirfsoc_uart_ids
);
1377 static int sirfsoc_uart_probe(struct platform_device
*pdev
)
1379 struct sirfsoc_uart_port
*sirfport
;
1380 struct uart_port
*port
;
1381 struct resource
*res
;
1383 const struct of_device_id
*match
;
1385 match
= of_match_node(sirfsoc_uart_ids
, pdev
->dev
.of_node
);
1386 if (of_property_read_u32(pdev
->dev
.of_node
, "cell-index", &pdev
->id
)) {
1388 "Unable to find cell-index in uart node.\n");
1392 if (of_device_is_compatible(pdev
->dev
.of_node
, "sirf,prima2-usp-uart"))
1393 pdev
->id
+= ((struct sirfsoc_uart_register
*)
1394 match
->data
)->uart_param
.register_uart_nr
;
1395 sirfport
= &sirfsoc_uart_ports
[pdev
->id
];
1396 port
= &sirfport
->port
;
1397 port
->dev
= &pdev
->dev
;
1398 port
->private_data
= sirfport
;
1399 sirfport
->uart_reg
= (struct sirfsoc_uart_register
*)match
->data
;
1401 sirfport
->hw_flow_ctrl
= of_property_read_bool(pdev
->dev
.of_node
,
1402 "sirf,uart-has-rtscts");
1403 if (of_device_is_compatible(pdev
->dev
.of_node
, "sirf,prima2-uart")) {
1404 sirfport
->uart_reg
->uart_type
= SIRF_REAL_UART
;
1405 if (of_property_read_u32(pdev
->dev
.of_node
,
1406 "sirf,uart-dma-rx-channel",
1407 &sirfport
->rx_dma_no
))
1408 sirfport
->rx_dma_no
= UNVALID_DMA_CHAN
;
1409 if (of_property_read_u32(pdev
->dev
.of_node
,
1410 "sirf,uart-dma-tx-channel",
1411 &sirfport
->tx_dma_no
))
1412 sirfport
->tx_dma_no
= UNVALID_DMA_CHAN
;
1414 if (of_device_is_compatible(pdev
->dev
.of_node
, "sirf,prima2-usp-uart")) {
1415 sirfport
->uart_reg
->uart_type
= SIRF_USP_UART
;
1416 if (of_property_read_u32(pdev
->dev
.of_node
,
1417 "sirf,usp-dma-rx-channel",
1418 &sirfport
->rx_dma_no
))
1419 sirfport
->rx_dma_no
= UNVALID_DMA_CHAN
;
1420 if (of_property_read_u32(pdev
->dev
.of_node
,
1421 "sirf,usp-dma-tx-channel",
1422 &sirfport
->tx_dma_no
))
1423 sirfport
->tx_dma_no
= UNVALID_DMA_CHAN
;
1424 if (!sirfport
->hw_flow_ctrl
)
1425 goto usp_no_flow_control
;
1426 if (of_find_property(pdev
->dev
.of_node
, "cts-gpios", NULL
))
1427 sirfport
->cts_gpio
= of_get_named_gpio(
1428 pdev
->dev
.of_node
, "cts-gpios", 0);
1430 sirfport
->cts_gpio
= -1;
1431 if (of_find_property(pdev
->dev
.of_node
, "rts-gpios", NULL
))
1432 sirfport
->rts_gpio
= of_get_named_gpio(
1433 pdev
->dev
.of_node
, "rts-gpios", 0);
1435 sirfport
->rts_gpio
= -1;
1437 if ((!gpio_is_valid(sirfport
->cts_gpio
) ||
1438 !gpio_is_valid(sirfport
->rts_gpio
))) {
1441 "Usp flow control must have cts and rts gpio");
1444 ret
= devm_gpio_request(&pdev
->dev
, sirfport
->cts_gpio
,
1447 dev_err(&pdev
->dev
, "Unable request cts gpio");
1450 gpio_direction_input(sirfport
->cts_gpio
);
1451 ret
= devm_gpio_request(&pdev
->dev
, sirfport
->rts_gpio
,
1454 dev_err(&pdev
->dev
, "Unable request rts gpio");
1457 gpio_direction_output(sirfport
->rts_gpio
, 1);
1459 usp_no_flow_control
:
1460 if (of_device_is_compatible(pdev
->dev
.of_node
, "sirf,marco-uart"))
1461 sirfport
->is_marco
= true;
1463 if (of_property_read_u32(pdev
->dev
.of_node
,
1467 "Unable to find fifosize in uart node.\n");
1472 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1474 dev_err(&pdev
->dev
, "Insufficient resources.\n");
1478 spin_lock_init(&sirfport
->rx_lock
);
1479 spin_lock_init(&sirfport
->tx_lock
);
1480 tasklet_init(&sirfport
->rx_dma_complete_tasklet
,
1481 sirfsoc_uart_rx_dma_complete_tl
, (unsigned long)sirfport
);
1482 tasklet_init(&sirfport
->rx_tmo_process_tasklet
,
1483 sirfsoc_rx_tmo_process_tl
, (unsigned long)sirfport
);
1484 port
->mapbase
= res
->start
;
1485 port
->membase
= devm_ioremap(&pdev
->dev
, res
->start
, resource_size(res
));
1486 if (!port
->membase
) {
1487 dev_err(&pdev
->dev
, "Cannot remap resource.\n");
1491 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
1493 dev_err(&pdev
->dev
, "Insufficient resources.\n");
1497 port
->irq
= res
->start
;
1499 sirfport
->clk
= clk_get(&pdev
->dev
, NULL
);
1500 if (IS_ERR(sirfport
->clk
)) {
1501 ret
= PTR_ERR(sirfport
->clk
);
1504 port
->uartclk
= clk_get_rate(sirfport
->clk
);
1506 port
->ops
= &sirfsoc_uart_ops
;
1507 spin_lock_init(&port
->lock
);
1509 platform_set_drvdata(pdev
, sirfport
);
1510 ret
= uart_add_one_port(&sirfsoc_uart_drv
, port
);
1512 dev_err(&pdev
->dev
, "Cannot add UART port(%d).\n", pdev
->id
);
1519 clk_put(sirfport
->clk
);
1524 static int sirfsoc_uart_remove(struct platform_device
*pdev
)
1526 struct sirfsoc_uart_port
*sirfport
= platform_get_drvdata(pdev
);
1527 struct uart_port
*port
= &sirfport
->port
;
1528 clk_put(sirfport
->clk
);
1529 uart_remove_one_port(&sirfsoc_uart_drv
, port
);
1533 #ifdef CONFIG_PM_SLEEP
1535 sirfsoc_uart_suspend(struct device
*pdev
)
1537 struct sirfsoc_uart_port
*sirfport
= dev_get_drvdata(pdev
);
1538 struct uart_port
*port
= &sirfport
->port
;
1539 uart_suspend_port(&sirfsoc_uart_drv
, port
);
1543 static int sirfsoc_uart_resume(struct device
*pdev
)
1545 struct sirfsoc_uart_port
*sirfport
= dev_get_drvdata(pdev
);
1546 struct uart_port
*port
= &sirfport
->port
;
1547 uart_resume_port(&sirfsoc_uart_drv
, port
);
1552 static const struct dev_pm_ops sirfsoc_uart_pm_ops
= {
1553 SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend
, sirfsoc_uart_resume
)
1556 static struct platform_driver sirfsoc_uart_driver
= {
1557 .probe
= sirfsoc_uart_probe
,
1558 .remove
= sirfsoc_uart_remove
,
1560 .name
= SIRFUART_PORT_NAME
,
1561 .owner
= THIS_MODULE
,
1562 .of_match_table
= sirfsoc_uart_ids
,
1563 .pm
= &sirfsoc_uart_pm_ops
,
1567 static int __init
sirfsoc_uart_init(void)
1571 ret
= uart_register_driver(&sirfsoc_uart_drv
);
1575 ret
= platform_driver_register(&sirfsoc_uart_driver
);
1577 uart_unregister_driver(&sirfsoc_uart_drv
);
1581 module_init(sirfsoc_uart_init
);
1583 static void __exit
sirfsoc_uart_exit(void)
1585 platform_driver_unregister(&sirfsoc_uart_driver
);
1586 uart_unregister_driver(&sirfsoc_uart_drv
);
1588 module_exit(sirfsoc_uart_exit
);
1590 MODULE_LICENSE("GPL v2");
1591 MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
1592 MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");