2 * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
4 * Copyright (C) 2002 - 2011 Paul Mundt
5 * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
7 * based off of the old drivers/char/sh-sci.c by:
9 * Copyright (C) 1999, 2000 Niibe Yutaka
10 * Copyright (C) 2000 Sugioka Toshinobu
11 * Modified to support multiple serial ports. Stuart Menefy (May 2000).
12 * Modified to support SecureEdge. David McCullough (2002)
13 * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
14 * Removed SH7300 support (Jul 2007).
16 * This file is subject to the terms and conditions of the GNU General Public
17 * License. See the file "COPYING" in the main directory of this archive
20 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/timer.h>
29 #include <linux/interrupt.h>
30 #include <linux/tty.h>
31 #include <linux/tty_flip.h>
32 #include <linux/serial.h>
33 #include <linux/major.h>
34 #include <linux/string.h>
35 #include <linux/sysrq.h>
36 #include <linux/ioport.h>
38 #include <linux/init.h>
39 #include <linux/delay.h>
40 #include <linux/console.h>
41 #include <linux/platform_device.h>
42 #include <linux/serial_sci.h>
43 #include <linux/notifier.h>
44 #include <linux/pm_runtime.h>
45 #include <linux/cpufreq.h>
46 #include <linux/clk.h>
47 #include <linux/ctype.h>
48 #include <linux/err.h>
49 #include <linux/dmaengine.h>
50 #include <linux/scatterlist.h>
51 #include <linux/slab.h>
54 #include <asm/sh_bios.h>
64 struct uart_port port
;
66 /* Platform configuration */
67 struct plat_sci_port
*cfg
;
69 /* Port enable callback */
70 void (*enable
)(struct uart_port
*port
);
72 /* Port disable callback */
73 void (*disable
)(struct uart_port
*port
);
76 struct timer_list break_timer
;
84 struct dma_chan
*chan_tx
;
85 struct dma_chan
*chan_rx
;
87 #ifdef CONFIG_SERIAL_SH_SCI_DMA
88 struct dma_async_tx_descriptor
*desc_tx
;
89 struct dma_async_tx_descriptor
*desc_rx
[2];
90 dma_cookie_t cookie_tx
;
91 dma_cookie_t cookie_rx
[2];
92 dma_cookie_t active_rx
;
93 struct scatterlist sg_tx
;
94 unsigned int sg_len_tx
;
95 struct scatterlist sg_rx
[2];
97 struct sh_dmae_slave param_tx
;
98 struct sh_dmae_slave param_rx
;
99 struct work_struct work_tx
;
100 struct work_struct work_rx
;
101 struct timer_list rx_timer
;
102 unsigned int rx_timeout
;
105 struct notifier_block freq_transition
;
108 /* Function prototypes */
109 static void sci_start_tx(struct uart_port
*port
);
110 static void sci_stop_tx(struct uart_port
*port
);
111 static void sci_start_rx(struct uart_port
*port
);
113 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
115 static struct sci_port sci_ports
[SCI_NPORTS
];
116 static struct uart_driver sci_uart_driver
;
118 static inline struct sci_port
*
119 to_sci_port(struct uart_port
*uart
)
121 return container_of(uart
, struct sci_port
, port
);
124 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
126 #ifdef CONFIG_CONSOLE_POLL
127 static int sci_poll_get_char(struct uart_port
*port
)
129 unsigned short status
;
133 status
= sci_in(port
, SCxSR
);
134 if (status
& SCxSR_ERRORS(port
)) {
135 sci_out(port
, SCxSR
, SCxSR_ERROR_CLEAR(port
));
141 if (!(status
& SCxSR_RDxF(port
)))
144 c
= sci_in(port
, SCxRDR
);
148 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
154 static void sci_poll_put_char(struct uart_port
*port
, unsigned char c
)
156 unsigned short status
;
159 status
= sci_in(port
, SCxSR
);
160 } while (!(status
& SCxSR_TDxE(port
)));
162 sci_out(port
, SCxTDR
, c
);
163 sci_out(port
, SCxSR
, SCxSR_TDxE_CLEAR(port
) & ~SCxSR_TEND(port
));
165 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
167 #if defined(__H8300H__) || defined(__H8300S__)
168 static void sci_init_pins(struct uart_port
*port
, unsigned int cflag
)
170 int ch
= (port
->mapbase
- SMR0
) >> 3;
173 H8300_GPIO_DDR(h8300_sci_pins
[ch
].port
,
174 h8300_sci_pins
[ch
].rx
,
176 H8300_GPIO_DDR(h8300_sci_pins
[ch
].port
,
177 h8300_sci_pins
[ch
].tx
,
181 H8300_SCI_DR(ch
) |= h8300_sci_pins
[ch
].tx
;
183 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
184 static inline void sci_init_pins(struct uart_port
*port
, unsigned int cflag
)
186 if (port
->mapbase
== 0xA4400000) {
187 __raw_writew(__raw_readw(PACR
) & 0xffc0, PACR
);
188 __raw_writew(__raw_readw(PBCR
) & 0x0fff, PBCR
);
189 } else if (port
->mapbase
== 0xA4410000)
190 __raw_writew(__raw_readw(PBCR
) & 0xf003, PBCR
);
192 #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721)
193 static inline void sci_init_pins(struct uart_port
*port
, unsigned int cflag
)
197 if (cflag
& CRTSCTS
) {
199 if (port
->mapbase
== 0xa4430000) { /* SCIF0 */
200 /* Clear PTCR bit 9-2; enable all scif pins but sck */
201 data
= __raw_readw(PORT_PTCR
);
202 __raw_writew((data
& 0xfc03), PORT_PTCR
);
203 } else if (port
->mapbase
== 0xa4438000) { /* SCIF1 */
204 /* Clear PVCR bit 9-2 */
205 data
= __raw_readw(PORT_PVCR
);
206 __raw_writew((data
& 0xfc03), PORT_PVCR
);
209 if (port
->mapbase
== 0xa4430000) { /* SCIF0 */
210 /* Clear PTCR bit 5-2; enable only tx and rx */
211 data
= __raw_readw(PORT_PTCR
);
212 __raw_writew((data
& 0xffc3), PORT_PTCR
);
213 } else if (port
->mapbase
== 0xa4438000) { /* SCIF1 */
214 /* Clear PVCR bit 5-2 */
215 data
= __raw_readw(PORT_PVCR
);
216 __raw_writew((data
& 0xffc3), PORT_PVCR
);
220 #elif defined(CONFIG_CPU_SH3)
221 /* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */
222 static inline void sci_init_pins(struct uart_port
*port
, unsigned int cflag
)
226 /* We need to set SCPCR to enable RTS/CTS */
227 data
= __raw_readw(SCPCR
);
228 /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
229 __raw_writew(data
& 0x0fcf, SCPCR
);
231 if (!(cflag
& CRTSCTS
)) {
232 /* We need to set SCPCR to enable RTS/CTS */
233 data
= __raw_readw(SCPCR
);
234 /* Clear out SCP7MD1,0, SCP4MD1,0,
235 Set SCP6MD1,0 = {01} (output) */
236 __raw_writew((data
& 0x0fcf) | 0x1000, SCPCR
);
238 data
= __raw_readb(SCPDR
);
239 /* Set /RTS2 (bit6) = 0 */
240 __raw_writeb(data
& 0xbf, SCPDR
);
243 #elif defined(CONFIG_CPU_SUBTYPE_SH7722)
244 static inline void sci_init_pins(struct uart_port
*port
, unsigned int cflag
)
248 if (port
->mapbase
== 0xffe00000) {
249 data
= __raw_readw(PSCR
);
251 if (!(cflag
& CRTSCTS
))
254 __raw_writew(data
, PSCR
);
257 #elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \
258 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
259 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
260 defined(CONFIG_CPU_SUBTYPE_SH7785) || \
261 defined(CONFIG_CPU_SUBTYPE_SH7786) || \
262 defined(CONFIG_CPU_SUBTYPE_SHX3)
263 static inline void sci_init_pins(struct uart_port
*port
, unsigned int cflag
)
265 if (!(cflag
& CRTSCTS
))
266 __raw_writew(0x0080, SCSPTR0
); /* Set RTS = 1 */
268 #elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A)
269 static inline void sci_init_pins(struct uart_port
*port
, unsigned int cflag
)
271 if (!(cflag
& CRTSCTS
))
272 __raw_writew(0x0080, SCSPTR2
); /* Set RTS = 1 */
275 static inline void sci_init_pins(struct uart_port
*port
, unsigned int cflag
)
281 #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
282 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
283 defined(CONFIG_CPU_SUBTYPE_SH7785) || \
284 defined(CONFIG_CPU_SUBTYPE_SH7786)
285 static int scif_txfill(struct uart_port
*port
)
287 return sci_in(port
, SCTFDR
) & 0xff;
290 static int scif_txroom(struct uart_port
*port
)
292 return SCIF_TXROOM_MAX
- scif_txfill(port
);
295 static int scif_rxfill(struct uart_port
*port
)
297 return sci_in(port
, SCRFDR
) & 0xff;
299 #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
300 static int scif_txfill(struct uart_port
*port
)
302 if (port
->mapbase
== 0xffe00000 ||
303 port
->mapbase
== 0xffe08000)
305 return sci_in(port
, SCTFDR
) & 0xff;
308 return sci_in(port
, SCFDR
) >> 8;
311 static int scif_txroom(struct uart_port
*port
)
313 if (port
->mapbase
== 0xffe00000 ||
314 port
->mapbase
== 0xffe08000)
316 return SCIF_TXROOM_MAX
- scif_txfill(port
);
319 return SCIF2_TXROOM_MAX
- scif_txfill(port
);
322 static int scif_rxfill(struct uart_port
*port
)
324 if ((port
->mapbase
== 0xffe00000) ||
325 (port
->mapbase
== 0xffe08000)) {
327 return sci_in(port
, SCRFDR
) & 0xff;
330 return sci_in(port
, SCFDR
) & SCIF2_RFDC_MASK
;
333 #elif defined(CONFIG_ARCH_SH7372)
334 static int scif_txfill(struct uart_port
*port
)
336 if (port
->type
== PORT_SCIFA
)
337 return sci_in(port
, SCFDR
) >> 8;
339 return sci_in(port
, SCTFDR
);
342 static int scif_txroom(struct uart_port
*port
)
344 return port
->fifosize
- scif_txfill(port
);
347 static int scif_rxfill(struct uart_port
*port
)
349 if (port
->type
== PORT_SCIFA
)
350 return sci_in(port
, SCFDR
) & SCIF_RFDC_MASK
;
352 return sci_in(port
, SCRFDR
);
355 static int scif_txfill(struct uart_port
*port
)
357 return sci_in(port
, SCFDR
) >> 8;
360 static int scif_txroom(struct uart_port
*port
)
362 return SCIF_TXROOM_MAX
- scif_txfill(port
);
365 static int scif_rxfill(struct uart_port
*port
)
367 return sci_in(port
, SCFDR
) & SCIF_RFDC_MASK
;
371 static int sci_txfill(struct uart_port
*port
)
373 return !(sci_in(port
, SCxSR
) & SCI_TDRE
);
376 static int sci_txroom(struct uart_port
*port
)
378 return !sci_txfill(port
);
381 static int sci_rxfill(struct uart_port
*port
)
383 return (sci_in(port
, SCxSR
) & SCxSR_RDxF(port
)) != 0;
386 /* ********************************************************************** *
387 * the interrupt related routines *
388 * ********************************************************************** */
390 static void sci_transmit_chars(struct uart_port
*port
)
392 struct circ_buf
*xmit
= &port
->state
->xmit
;
393 unsigned int stopped
= uart_tx_stopped(port
);
394 unsigned short status
;
398 status
= sci_in(port
, SCxSR
);
399 if (!(status
& SCxSR_TDxE(port
))) {
400 ctrl
= sci_in(port
, SCSCR
);
401 if (uart_circ_empty(xmit
))
405 sci_out(port
, SCSCR
, ctrl
);
409 if (port
->type
== PORT_SCI
)
410 count
= sci_txroom(port
);
412 count
= scif_txroom(port
);
420 } else if (!uart_circ_empty(xmit
) && !stopped
) {
421 c
= xmit
->buf
[xmit
->tail
];
422 xmit
->tail
= (xmit
->tail
+ 1) & (UART_XMIT_SIZE
- 1);
427 sci_out(port
, SCxTDR
, c
);
430 } while (--count
> 0);
432 sci_out(port
, SCxSR
, SCxSR_TDxE_CLEAR(port
));
434 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
435 uart_write_wakeup(port
);
436 if (uart_circ_empty(xmit
)) {
439 ctrl
= sci_in(port
, SCSCR
);
441 if (port
->type
!= PORT_SCI
) {
442 sci_in(port
, SCxSR
); /* Dummy read */
443 sci_out(port
, SCxSR
, SCxSR_TDxE_CLEAR(port
));
447 sci_out(port
, SCSCR
, ctrl
);
451 /* On SH3, SCIF may read end-of-break as a space->mark char */
452 #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
454 static void sci_receive_chars(struct uart_port
*port
)
456 struct sci_port
*sci_port
= to_sci_port(port
);
457 struct tty_struct
*tty
= port
->state
->port
.tty
;
458 int i
, count
, copied
= 0;
459 unsigned short status
;
462 status
= sci_in(port
, SCxSR
);
463 if (!(status
& SCxSR_RDxF(port
)))
467 if (port
->type
== PORT_SCI
)
468 count
= sci_rxfill(port
);
470 count
= scif_rxfill(port
);
472 /* Don't copy more bytes than there is room for in the buffer */
473 count
= tty_buffer_request_room(tty
, count
);
475 /* If for any reason we can't copy more data, we're done! */
479 if (port
->type
== PORT_SCI
) {
480 char c
= sci_in(port
, SCxRDR
);
481 if (uart_handle_sysrq_char(port
, c
) ||
482 sci_port
->break_flag
)
485 tty_insert_flip_char(tty
, c
, TTY_NORMAL
);
487 for (i
= 0; i
< count
; i
++) {
488 char c
= sci_in(port
, SCxRDR
);
489 status
= sci_in(port
, SCxSR
);
490 #if defined(CONFIG_CPU_SH3)
491 /* Skip "chars" during break */
492 if (sci_port
->break_flag
) {
494 (status
& SCxSR_FER(port
))) {
499 /* Nonzero => end-of-break */
500 dev_dbg(port
->dev
, "debounce<%02x>\n", c
);
501 sci_port
->break_flag
= 0;
508 #endif /* CONFIG_CPU_SH3 */
509 if (uart_handle_sysrq_char(port
, c
)) {
514 /* Store data and status */
515 if (status
& SCxSR_FER(port
)) {
517 dev_notice(port
->dev
, "frame error\n");
518 } else if (status
& SCxSR_PER(port
)) {
520 dev_notice(port
->dev
, "parity error\n");
524 tty_insert_flip_char(tty
, c
, flag
);
528 sci_in(port
, SCxSR
); /* dummy read */
529 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
532 port
->icount
.rx
+= count
;
536 /* Tell the rest of the system the news. New characters! */
537 tty_flip_buffer_push(tty
);
539 sci_in(port
, SCxSR
); /* dummy read */
540 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
544 #define SCI_BREAK_JIFFIES (HZ/20)
547 * The sci generates interrupts during the break,
548 * 1 per millisecond or so during the break period, for 9600 baud.
549 * So dont bother disabling interrupts.
550 * But dont want more than 1 break event.
551 * Use a kernel timer to periodically poll the rx line until
552 * the break is finished.
554 static inline void sci_schedule_break_timer(struct sci_port
*port
)
556 mod_timer(&port
->break_timer
, jiffies
+ SCI_BREAK_JIFFIES
);
559 /* Ensure that two consecutive samples find the break over. */
560 static void sci_break_timer(unsigned long data
)
562 struct sci_port
*port
= (struct sci_port
*)data
;
565 port
->enable(&port
->port
);
567 if (sci_rxd_in(&port
->port
) == 0) {
568 port
->break_flag
= 1;
569 sci_schedule_break_timer(port
);
570 } else if (port
->break_flag
== 1) {
572 port
->break_flag
= 2;
573 sci_schedule_break_timer(port
);
575 port
->break_flag
= 0;
578 port
->disable(&port
->port
);
581 static int sci_handle_errors(struct uart_port
*port
)
584 unsigned short status
= sci_in(port
, SCxSR
);
585 struct tty_struct
*tty
= port
->state
->port
.tty
;
587 if (status
& SCxSR_ORER(port
)) {
589 if (tty_insert_flip_char(tty
, 0, TTY_OVERRUN
))
592 dev_notice(port
->dev
, "overrun error");
595 if (status
& SCxSR_FER(port
)) {
596 if (sci_rxd_in(port
) == 0) {
597 /* Notify of BREAK */
598 struct sci_port
*sci_port
= to_sci_port(port
);
600 if (!sci_port
->break_flag
) {
601 sci_port
->break_flag
= 1;
602 sci_schedule_break_timer(sci_port
);
604 /* Do sysrq handling. */
605 if (uart_handle_break(port
))
608 dev_dbg(port
->dev
, "BREAK detected\n");
610 if (tty_insert_flip_char(tty
, 0, TTY_BREAK
))
616 if (tty_insert_flip_char(tty
, 0, TTY_FRAME
))
619 dev_notice(port
->dev
, "frame error\n");
623 if (status
& SCxSR_PER(port
)) {
625 if (tty_insert_flip_char(tty
, 0, TTY_PARITY
))
628 dev_notice(port
->dev
, "parity error");
632 tty_flip_buffer_push(tty
);
637 static int sci_handle_fifo_overrun(struct uart_port
*port
)
639 struct tty_struct
*tty
= port
->state
->port
.tty
;
642 if (port
->type
!= PORT_SCIF
)
645 if ((sci_in(port
, SCLSR
) & SCIF_ORER
) != 0) {
646 sci_out(port
, SCLSR
, 0);
648 tty_insert_flip_char(tty
, 0, TTY_OVERRUN
);
649 tty_flip_buffer_push(tty
);
651 dev_notice(port
->dev
, "overrun error\n");
658 static int sci_handle_breaks(struct uart_port
*port
)
661 unsigned short status
= sci_in(port
, SCxSR
);
662 struct tty_struct
*tty
= port
->state
->port
.tty
;
663 struct sci_port
*s
= to_sci_port(port
);
665 if (uart_handle_break(port
))
668 if (!s
->break_flag
&& status
& SCxSR_BRK(port
)) {
669 #if defined(CONFIG_CPU_SH3)
673 /* Notify of BREAK */
674 if (tty_insert_flip_char(tty
, 0, TTY_BREAK
))
677 dev_dbg(port
->dev
, "BREAK detected\n");
681 tty_flip_buffer_push(tty
);
683 copied
+= sci_handle_fifo_overrun(port
);
688 static irqreturn_t
sci_rx_interrupt(int irq
, void *ptr
)
690 #ifdef CONFIG_SERIAL_SH_SCI_DMA
691 struct uart_port
*port
= ptr
;
692 struct sci_port
*s
= to_sci_port(port
);
695 u16 scr
= sci_in(port
, SCSCR
);
696 u16 ssr
= sci_in(port
, SCxSR
);
698 /* Disable future Rx interrupts */
699 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
700 disable_irq_nosync(irq
);
705 sci_out(port
, SCSCR
, scr
);
706 /* Clear current interrupt */
707 sci_out(port
, SCxSR
, ssr
& ~(1 | SCxSR_RDxF(port
)));
708 dev_dbg(port
->dev
, "Rx IRQ %lu: setup t-out in %u jiffies\n",
709 jiffies
, s
->rx_timeout
);
710 mod_timer(&s
->rx_timer
, jiffies
+ s
->rx_timeout
);
716 /* I think sci_receive_chars has to be called irrespective
717 * of whether the I_IXOFF is set, otherwise, how is the interrupt
720 sci_receive_chars(ptr
);
725 static irqreturn_t
sci_tx_interrupt(int irq
, void *ptr
)
727 struct uart_port
*port
= ptr
;
730 spin_lock_irqsave(&port
->lock
, flags
);
731 sci_transmit_chars(port
);
732 spin_unlock_irqrestore(&port
->lock
, flags
);
737 static irqreturn_t
sci_er_interrupt(int irq
, void *ptr
)
739 struct uart_port
*port
= ptr
;
742 if (port
->type
== PORT_SCI
) {
743 if (sci_handle_errors(port
)) {
744 /* discard character in rx buffer */
746 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
749 sci_handle_fifo_overrun(port
);
750 sci_rx_interrupt(irq
, ptr
);
753 sci_out(port
, SCxSR
, SCxSR_ERROR_CLEAR(port
));
755 /* Kick the transmission */
756 sci_tx_interrupt(irq
, ptr
);
761 static irqreturn_t
sci_br_interrupt(int irq
, void *ptr
)
763 struct uart_port
*port
= ptr
;
766 sci_handle_breaks(port
);
767 sci_out(port
, SCxSR
, SCxSR_BREAK_CLEAR(port
));
772 static inline unsigned long port_rx_irq_mask(struct uart_port
*port
)
775 * Not all ports (such as SCIFA) will support REIE. Rather than
776 * special-casing the port type, we check the port initialization
777 * IRQ enable mask to see whether the IRQ is desired at all. If
778 * it's unset, it's logically inferred that there's no point in
781 return SCSCR_RIE
| (to_sci_port(port
)->cfg
->scscr
& SCSCR_REIE
);
784 static irqreturn_t
sci_mpxed_interrupt(int irq
, void *ptr
)
786 unsigned short ssr_status
, scr_status
, err_enabled
;
787 struct uart_port
*port
= ptr
;
788 struct sci_port
*s
= to_sci_port(port
);
789 irqreturn_t ret
= IRQ_NONE
;
791 ssr_status
= sci_in(port
, SCxSR
);
792 scr_status
= sci_in(port
, SCSCR
);
793 err_enabled
= scr_status
& port_rx_irq_mask(port
);
796 if ((ssr_status
& SCxSR_TDxE(port
)) && (scr_status
& SCSCR_TIE
) &&
798 ret
= sci_tx_interrupt(irq
, ptr
);
801 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
804 if (((ssr_status
& SCxSR_RDxF(port
)) || s
->chan_rx
) &&
805 (scr_status
& SCSCR_RIE
))
806 ret
= sci_rx_interrupt(irq
, ptr
);
808 /* Error Interrupt */
809 if ((ssr_status
& SCxSR_ERRORS(port
)) && err_enabled
)
810 ret
= sci_er_interrupt(irq
, ptr
);
812 /* Break Interrupt */
813 if ((ssr_status
& SCxSR_BRK(port
)) && err_enabled
)
814 ret
= sci_br_interrupt(irq
, ptr
);
820 * Here we define a transition notifier so that we can update all of our
821 * ports' baud rate when the peripheral clock changes.
823 static int sci_notifier(struct notifier_block
*self
,
824 unsigned long phase
, void *p
)
826 struct sci_port
*sci_port
;
829 sci_port
= container_of(self
, struct sci_port
, freq_transition
);
831 if ((phase
== CPUFREQ_POSTCHANGE
) ||
832 (phase
== CPUFREQ_RESUMECHANGE
)) {
833 struct uart_port
*port
= &sci_port
->port
;
835 spin_lock_irqsave(&port
->lock
, flags
);
836 port
->uartclk
= clk_get_rate(sci_port
->iclk
);
837 spin_unlock_irqrestore(&port
->lock
, flags
);
843 static void sci_clk_enable(struct uart_port
*port
)
845 struct sci_port
*sci_port
= to_sci_port(port
);
847 pm_runtime_get_sync(port
->dev
);
849 clk_enable(sci_port
->iclk
);
850 sci_port
->port
.uartclk
= clk_get_rate(sci_port
->iclk
);
851 clk_enable(sci_port
->fclk
);
854 static void sci_clk_disable(struct uart_port
*port
)
856 struct sci_port
*sci_port
= to_sci_port(port
);
858 clk_disable(sci_port
->fclk
);
859 clk_disable(sci_port
->iclk
);
861 pm_runtime_put_sync(port
->dev
);
864 static int sci_request_irq(struct sci_port
*port
)
867 irqreturn_t (*handlers
[4])(int irq
, void *ptr
) = {
868 sci_er_interrupt
, sci_rx_interrupt
, sci_tx_interrupt
,
871 const char *desc
[] = { "SCI Receive Error", "SCI Receive Data Full",
872 "SCI Transmit Data Empty", "SCI Break" };
874 if (port
->cfg
->irqs
[0] == port
->cfg
->irqs
[1]) {
875 if (unlikely(!port
->cfg
->irqs
[0]))
878 if (request_irq(port
->cfg
->irqs
[0], sci_mpxed_interrupt
,
879 IRQF_DISABLED
, "sci", port
)) {
880 dev_err(port
->port
.dev
, "Can't allocate IRQ\n");
884 for (i
= 0; i
< ARRAY_SIZE(handlers
); i
++) {
885 if (unlikely(!port
->cfg
->irqs
[i
]))
888 if (request_irq(port
->cfg
->irqs
[i
], handlers
[i
],
889 IRQF_DISABLED
, desc
[i
], port
)) {
890 dev_err(port
->port
.dev
, "Can't allocate IRQ\n");
899 static void sci_free_irq(struct sci_port
*port
)
903 if (port
->cfg
->irqs
[0] == port
->cfg
->irqs
[1])
904 free_irq(port
->cfg
->irqs
[0], port
);
906 for (i
= 0; i
< ARRAY_SIZE(port
->cfg
->irqs
); i
++) {
907 if (!port
->cfg
->irqs
[i
])
910 free_irq(port
->cfg
->irqs
[i
], port
);
915 static unsigned int sci_tx_empty(struct uart_port
*port
)
917 unsigned short status
= sci_in(port
, SCxSR
);
918 unsigned short in_tx_fifo
= scif_txfill(port
);
920 return (status
& SCxSR_TEND(port
)) && !in_tx_fifo
? TIOCSER_TEMT
: 0;
923 static void sci_set_mctrl(struct uart_port
*port
, unsigned int mctrl
)
925 /* This routine is used for seting signals of: DTR, DCD, CTS/RTS */
926 /* We use SCIF's hardware for CTS/RTS, so don't need any for that. */
927 /* If you have signals for DTR and DCD, please implement here. */
930 static unsigned int sci_get_mctrl(struct uart_port
*port
)
932 /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
935 return TIOCM_DTR
| TIOCM_RTS
| TIOCM_DSR
;
938 #ifdef CONFIG_SERIAL_SH_SCI_DMA
939 static void sci_dma_tx_complete(void *arg
)
941 struct sci_port
*s
= arg
;
942 struct uart_port
*port
= &s
->port
;
943 struct circ_buf
*xmit
= &port
->state
->xmit
;
946 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
948 spin_lock_irqsave(&port
->lock
, flags
);
950 xmit
->tail
+= sg_dma_len(&s
->sg_tx
);
951 xmit
->tail
&= UART_XMIT_SIZE
- 1;
953 port
->icount
.tx
+= sg_dma_len(&s
->sg_tx
);
955 async_tx_ack(s
->desc_tx
);
956 s
->cookie_tx
= -EINVAL
;
959 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
960 uart_write_wakeup(port
);
962 if (!uart_circ_empty(xmit
)) {
963 schedule_work(&s
->work_tx
);
964 } else if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
965 u16 ctrl
= sci_in(port
, SCSCR
);
966 sci_out(port
, SCSCR
, ctrl
& ~SCSCR_TIE
);
969 spin_unlock_irqrestore(&port
->lock
, flags
);
972 /* Locking: called with port lock held */
973 static int sci_dma_rx_push(struct sci_port
*s
, struct tty_struct
*tty
,
976 struct uart_port
*port
= &s
->port
;
979 room
= tty_buffer_request_room(tty
, count
);
981 if (s
->active_rx
== s
->cookie_rx
[0]) {
983 } else if (s
->active_rx
== s
->cookie_rx
[1]) {
986 dev_err(port
->dev
, "cookie %d not found!\n", s
->active_rx
);
991 dev_warn(port
->dev
, "Rx overrun: dropping %u bytes\n",
996 for (i
= 0; i
< room
; i
++)
997 tty_insert_flip_char(tty
, ((u8
*)sg_virt(&s
->sg_rx
[active
]))[i
],
1000 port
->icount
.rx
+= room
;
1005 static void sci_dma_rx_complete(void *arg
)
1007 struct sci_port
*s
= arg
;
1008 struct uart_port
*port
= &s
->port
;
1009 struct tty_struct
*tty
= port
->state
->port
.tty
;
1010 unsigned long flags
;
1013 dev_dbg(port
->dev
, "%s(%d) active #%d\n", __func__
, port
->line
, s
->active_rx
);
1015 spin_lock_irqsave(&port
->lock
, flags
);
1017 count
= sci_dma_rx_push(s
, tty
, s
->buf_len_rx
);
1019 mod_timer(&s
->rx_timer
, jiffies
+ s
->rx_timeout
);
1021 spin_unlock_irqrestore(&port
->lock
, flags
);
1024 tty_flip_buffer_push(tty
);
1026 schedule_work(&s
->work_rx
);
1029 static void sci_rx_dma_release(struct sci_port
*s
, bool enable_pio
)
1031 struct dma_chan
*chan
= s
->chan_rx
;
1032 struct uart_port
*port
= &s
->port
;
1035 s
->cookie_rx
[0] = s
->cookie_rx
[1] = -EINVAL
;
1036 dma_release_channel(chan
);
1037 if (sg_dma_address(&s
->sg_rx
[0]))
1038 dma_free_coherent(port
->dev
, s
->buf_len_rx
* 2,
1039 sg_virt(&s
->sg_rx
[0]), sg_dma_address(&s
->sg_rx
[0]));
1044 static void sci_tx_dma_release(struct sci_port
*s
, bool enable_pio
)
1046 struct dma_chan
*chan
= s
->chan_tx
;
1047 struct uart_port
*port
= &s
->port
;
1050 s
->cookie_tx
= -EINVAL
;
1051 dma_release_channel(chan
);
1056 static void sci_submit_rx(struct sci_port
*s
)
1058 struct dma_chan
*chan
= s
->chan_rx
;
1061 for (i
= 0; i
< 2; i
++) {
1062 struct scatterlist
*sg
= &s
->sg_rx
[i
];
1063 struct dma_async_tx_descriptor
*desc
;
1065 desc
= chan
->device
->device_prep_slave_sg(chan
,
1066 sg
, 1, DMA_FROM_DEVICE
, DMA_PREP_INTERRUPT
);
1069 s
->desc_rx
[i
] = desc
;
1070 desc
->callback
= sci_dma_rx_complete
;
1071 desc
->callback_param
= s
;
1072 s
->cookie_rx
[i
] = desc
->tx_submit(desc
);
1075 if (!desc
|| s
->cookie_rx
[i
] < 0) {
1077 async_tx_ack(s
->desc_rx
[0]);
1078 s
->cookie_rx
[0] = -EINVAL
;
1082 s
->cookie_rx
[i
] = -EINVAL
;
1084 dev_warn(s
->port
.dev
,
1085 "failed to re-start DMA, using PIO\n");
1086 sci_rx_dma_release(s
, true);
1089 dev_dbg(s
->port
.dev
, "%s(): cookie %d to #%d\n", __func__
,
1090 s
->cookie_rx
[i
], i
);
1093 s
->active_rx
= s
->cookie_rx
[0];
1095 dma_async_issue_pending(chan
);
1098 static void work_fn_rx(struct work_struct
*work
)
1100 struct sci_port
*s
= container_of(work
, struct sci_port
, work_rx
);
1101 struct uart_port
*port
= &s
->port
;
1102 struct dma_async_tx_descriptor
*desc
;
1105 if (s
->active_rx
== s
->cookie_rx
[0]) {
1107 } else if (s
->active_rx
== s
->cookie_rx
[1]) {
1110 dev_err(port
->dev
, "cookie %d not found!\n", s
->active_rx
);
1113 desc
= s
->desc_rx
[new];
1115 if (dma_async_is_tx_complete(s
->chan_rx
, s
->active_rx
, NULL
, NULL
) !=
1117 /* Handle incomplete DMA receive */
1118 struct tty_struct
*tty
= port
->state
->port
.tty
;
1119 struct dma_chan
*chan
= s
->chan_rx
;
1120 struct sh_desc
*sh_desc
= container_of(desc
, struct sh_desc
,
1122 unsigned long flags
;
1125 chan
->device
->device_control(chan
, DMA_TERMINATE_ALL
, 0);
1126 dev_dbg(port
->dev
, "Read %u bytes with cookie %d\n",
1127 sh_desc
->partial
, sh_desc
->cookie
);
1129 spin_lock_irqsave(&port
->lock
, flags
);
1130 count
= sci_dma_rx_push(s
, tty
, sh_desc
->partial
);
1131 spin_unlock_irqrestore(&port
->lock
, flags
);
1134 tty_flip_buffer_push(tty
);
1141 s
->cookie_rx
[new] = desc
->tx_submit(desc
);
1142 if (s
->cookie_rx
[new] < 0) {
1143 dev_warn(port
->dev
, "Failed submitting Rx DMA descriptor\n");
1144 sci_rx_dma_release(s
, true);
1148 s
->active_rx
= s
->cookie_rx
[!new];
1150 dev_dbg(port
->dev
, "%s: cookie %d #%d, new active #%d\n", __func__
,
1151 s
->cookie_rx
[new], new, s
->active_rx
);
1154 static void work_fn_tx(struct work_struct
*work
)
1156 struct sci_port
*s
= container_of(work
, struct sci_port
, work_tx
);
1157 struct dma_async_tx_descriptor
*desc
;
1158 struct dma_chan
*chan
= s
->chan_tx
;
1159 struct uart_port
*port
= &s
->port
;
1160 struct circ_buf
*xmit
= &port
->state
->xmit
;
1161 struct scatterlist
*sg
= &s
->sg_tx
;
1165 * Port xmit buffer is already mapped, and it is one page... Just adjust
1166 * offsets and lengths. Since it is a circular buffer, we have to
1167 * transmit till the end, and then the rest. Take the port lock to get a
1168 * consistent xmit buffer state.
1170 spin_lock_irq(&port
->lock
);
1171 sg
->offset
= xmit
->tail
& (UART_XMIT_SIZE
- 1);
1172 sg_dma_address(sg
) = (sg_dma_address(sg
) & ~(UART_XMIT_SIZE
- 1)) +
1174 sg_dma_len(sg
) = min((int)CIRC_CNT(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
),
1175 CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
));
1176 spin_unlock_irq(&port
->lock
);
1178 BUG_ON(!sg_dma_len(sg
));
1180 desc
= chan
->device
->device_prep_slave_sg(chan
,
1181 sg
, s
->sg_len_tx
, DMA_TO_DEVICE
,
1182 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1185 sci_tx_dma_release(s
, true);
1189 dma_sync_sg_for_device(port
->dev
, sg
, 1, DMA_TO_DEVICE
);
1191 spin_lock_irq(&port
->lock
);
1193 desc
->callback
= sci_dma_tx_complete
;
1194 desc
->callback_param
= s
;
1195 spin_unlock_irq(&port
->lock
);
1196 s
->cookie_tx
= desc
->tx_submit(desc
);
1197 if (s
->cookie_tx
< 0) {
1198 dev_warn(port
->dev
, "Failed submitting Tx DMA descriptor\n");
1200 sci_tx_dma_release(s
, true);
1204 dev_dbg(port
->dev
, "%s: %p: %d...%d, cookie %d\n", __func__
,
1205 xmit
->buf
, xmit
->tail
, xmit
->head
, s
->cookie_tx
);
1207 dma_async_issue_pending(chan
);
1211 static void sci_start_tx(struct uart_port
*port
)
1213 struct sci_port
*s
= to_sci_port(port
);
1214 unsigned short ctrl
;
1216 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1217 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1218 u16
new, scr
= sci_in(port
, SCSCR
);
1222 new = scr
& ~0x8000;
1224 sci_out(port
, SCSCR
, new);
1227 if (s
->chan_tx
&& !uart_circ_empty(&s
->port
.state
->xmit
) &&
1229 schedule_work(&s
->work_tx
);
1232 if (!s
->chan_tx
|| port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1233 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1234 ctrl
= sci_in(port
, SCSCR
);
1235 sci_out(port
, SCSCR
, ctrl
| SCSCR_TIE
);
1239 static void sci_stop_tx(struct uart_port
*port
)
1241 unsigned short ctrl
;
1243 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1244 ctrl
= sci_in(port
, SCSCR
);
1246 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
1251 sci_out(port
, SCSCR
, ctrl
);
1254 static void sci_start_rx(struct uart_port
*port
)
1256 unsigned short ctrl
;
1258 ctrl
= sci_in(port
, SCSCR
) | port_rx_irq_mask(port
);
1260 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
1263 sci_out(port
, SCSCR
, ctrl
);
1266 static void sci_stop_rx(struct uart_port
*port
)
1268 unsigned short ctrl
;
1270 ctrl
= sci_in(port
, SCSCR
);
1272 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
1275 ctrl
&= ~port_rx_irq_mask(port
);
1277 sci_out(port
, SCSCR
, ctrl
);
1280 static void sci_enable_ms(struct uart_port
*port
)
1282 /* Nothing here yet .. */
1285 static void sci_break_ctl(struct uart_port
*port
, int break_state
)
1287 /* Nothing here yet .. */
1290 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1291 static bool filter(struct dma_chan
*chan
, void *slave
)
1293 struct sh_dmae_slave
*param
= slave
;
1295 dev_dbg(chan
->device
->dev
, "%s: slave ID %d\n", __func__
,
1298 if (param
->dma_dev
== chan
->device
->dev
) {
1299 chan
->private = param
;
1306 static void rx_timer_fn(unsigned long arg
)
1308 struct sci_port
*s
= (struct sci_port
*)arg
;
1309 struct uart_port
*port
= &s
->port
;
1310 u16 scr
= sci_in(port
, SCSCR
);
1312 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1314 enable_irq(s
->cfg
->irqs
[1]);
1316 sci_out(port
, SCSCR
, scr
| SCSCR_RIE
);
1317 dev_dbg(port
->dev
, "DMA Rx timed out\n");
1318 schedule_work(&s
->work_rx
);
1321 static void sci_request_dma(struct uart_port
*port
)
1323 struct sci_port
*s
= to_sci_port(port
);
1324 struct sh_dmae_slave
*param
;
1325 struct dma_chan
*chan
;
1326 dma_cap_mask_t mask
;
1329 dev_dbg(port
->dev
, "%s: port %d DMA %p\n", __func__
,
1330 port
->line
, s
->cfg
->dma_dev
);
1332 if (!s
->cfg
->dma_dev
)
1336 dma_cap_set(DMA_SLAVE
, mask
);
1338 param
= &s
->param_tx
;
1340 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
1341 param
->slave_id
= s
->cfg
->dma_slave_tx
;
1342 param
->dma_dev
= s
->cfg
->dma_dev
;
1344 s
->cookie_tx
= -EINVAL
;
1345 chan
= dma_request_channel(mask
, filter
, param
);
1346 dev_dbg(port
->dev
, "%s: TX: got channel %p\n", __func__
, chan
);
1349 sg_init_table(&s
->sg_tx
, 1);
1350 /* UART circular tx buffer is an aligned page. */
1351 BUG_ON((int)port
->state
->xmit
.buf
& ~PAGE_MASK
);
1352 sg_set_page(&s
->sg_tx
, virt_to_page(port
->state
->xmit
.buf
),
1353 UART_XMIT_SIZE
, (int)port
->state
->xmit
.buf
& ~PAGE_MASK
);
1354 nent
= dma_map_sg(port
->dev
, &s
->sg_tx
, 1, DMA_TO_DEVICE
);
1356 sci_tx_dma_release(s
, false);
1358 dev_dbg(port
->dev
, "%s: mapped %d@%p to %x\n", __func__
,
1359 sg_dma_len(&s
->sg_tx
),
1360 port
->state
->xmit
.buf
, sg_dma_address(&s
->sg_tx
));
1362 s
->sg_len_tx
= nent
;
1364 INIT_WORK(&s
->work_tx
, work_fn_tx
);
1367 param
= &s
->param_rx
;
1369 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
1370 param
->slave_id
= s
->cfg
->dma_slave_rx
;
1371 param
->dma_dev
= s
->cfg
->dma_dev
;
1373 chan
= dma_request_channel(mask
, filter
, param
);
1374 dev_dbg(port
->dev
, "%s: RX: got channel %p\n", __func__
, chan
);
1382 s
->buf_len_rx
= 2 * max(16, (int)port
->fifosize
);
1383 buf
[0] = dma_alloc_coherent(port
->dev
, s
->buf_len_rx
* 2,
1384 &dma
[0], GFP_KERNEL
);
1388 "failed to allocate dma buffer, using PIO\n");
1389 sci_rx_dma_release(s
, true);
1393 buf
[1] = buf
[0] + s
->buf_len_rx
;
1394 dma
[1] = dma
[0] + s
->buf_len_rx
;
1396 for (i
= 0; i
< 2; i
++) {
1397 struct scatterlist
*sg
= &s
->sg_rx
[i
];
1399 sg_init_table(sg
, 1);
1400 sg_set_page(sg
, virt_to_page(buf
[i
]), s
->buf_len_rx
,
1401 (int)buf
[i
] & ~PAGE_MASK
);
1402 sg_dma_address(sg
) = dma
[i
];
1405 INIT_WORK(&s
->work_rx
, work_fn_rx
);
1406 setup_timer(&s
->rx_timer
, rx_timer_fn
, (unsigned long)s
);
1412 static void sci_free_dma(struct uart_port
*port
)
1414 struct sci_port
*s
= to_sci_port(port
);
1416 if (!s
->cfg
->dma_dev
)
1420 sci_tx_dma_release(s
, false);
1422 sci_rx_dma_release(s
, false);
1425 static inline void sci_request_dma(struct uart_port
*port
)
1429 static inline void sci_free_dma(struct uart_port
*port
)
1434 static int sci_startup(struct uart_port
*port
)
1436 struct sci_port
*s
= to_sci_port(port
);
1439 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
1444 ret
= sci_request_irq(s
);
1445 if (unlikely(ret
< 0))
1448 sci_request_dma(port
);
1456 static void sci_shutdown(struct uart_port
*port
)
1458 struct sci_port
*s
= to_sci_port(port
);
1460 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
1472 static unsigned int sci_scbrr_calc(unsigned int algo_id
, unsigned int bps
,
1477 return ((freq
+ 16 * bps
) / (16 * bps
) - 1);
1479 return ((freq
+ 16 * bps
) / (32 * bps
) - 1);
1481 return (((freq
* 2) + 16 * bps
) / (16 * bps
) - 1);
1483 return (((freq
* 2) + 16 * bps
) / (32 * bps
) - 1);
1485 return (((freq
* 1000 / 32) / bps
) - 1);
1488 /* Warn, but use a safe default */
1491 return ((freq
+ 16 * bps
) / (32 * bps
) - 1);
1494 static void sci_set_termios(struct uart_port
*port
, struct ktermios
*termios
,
1495 struct ktermios
*old
)
1497 struct sci_port
*s
= to_sci_port(port
);
1498 unsigned int status
, baud
, smr_val
, max_baud
;
1503 * earlyprintk comes here early on with port->uartclk set to zero.
1504 * the clock framework is not up and running at this point so here
1505 * we assume that 115200 is the maximum baud rate. please note that
1506 * the baud rate is not programmed during earlyprintk - it is assumed
1507 * that the previous boot loader has enabled required clocks and
1508 * setup the baud rate generator hardware for us already.
1510 max_baud
= port
->uartclk
? port
->uartclk
/ 16 : 115200;
1512 baud
= uart_get_baud_rate(port
, termios
, old
, 0, max_baud
);
1513 if (likely(baud
&& port
->uartclk
))
1514 t
= sci_scbrr_calc(s
->cfg
->scbrr_algo_id
, baud
, port
->uartclk
);
1520 status
= sci_in(port
, SCxSR
);
1521 } while (!(status
& SCxSR_TEND(port
)));
1523 sci_out(port
, SCSCR
, 0x00); /* TE=0, RE=0, CKE1=0 */
1525 if (port
->type
!= PORT_SCI
)
1526 sci_out(port
, SCFCR
, scfcr
| SCFCR_RFRST
| SCFCR_TFRST
);
1528 smr_val
= sci_in(port
, SCSMR
) & 3;
1530 if ((termios
->c_cflag
& CSIZE
) == CS7
)
1532 if (termios
->c_cflag
& PARENB
)
1534 if (termios
->c_cflag
& PARODD
)
1536 if (termios
->c_cflag
& CSTOPB
)
1539 uart_update_timeout(port
, termios
->c_cflag
, baud
);
1541 sci_out(port
, SCSMR
, smr_val
);
1543 dev_dbg(port
->dev
, "%s: SMR %x, t %x, SCSCR %x\n", __func__
, smr_val
, t
,
1548 sci_out(port
, SCSMR
, (sci_in(port
, SCSMR
) & ~3) | 1);
1551 sci_out(port
, SCSMR
, sci_in(port
, SCSMR
) & ~3);
1553 sci_out(port
, SCBRR
, t
);
1554 udelay((1000000+(baud
-1)) / baud
); /* Wait one bit interval */
1557 sci_init_pins(port
, termios
->c_cflag
);
1558 sci_out(port
, SCFCR
, scfcr
| ((termios
->c_cflag
& CRTSCTS
) ? SCFCR_MCE
: 0));
1560 sci_out(port
, SCSCR
, s
->cfg
->scscr
);
1562 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1564 * Calculate delay for 1.5 DMA buffers: see
1565 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1566 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1567 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1568 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1569 * sizes), but it has been found out experimentally, that this is not
1570 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1571 * as a minimum seem to work perfectly.
1574 s
->rx_timeout
= (port
->timeout
- HZ
/ 50) * s
->buf_len_rx
* 3 /
1577 "DMA Rx t-out %ums, tty t-out %u jiffies\n",
1578 s
->rx_timeout
* 1000 / HZ
, port
->timeout
);
1579 if (s
->rx_timeout
< msecs_to_jiffies(20))
1580 s
->rx_timeout
= msecs_to_jiffies(20);
1584 if ((termios
->c_cflag
& CREAD
) != 0)
1591 static const char *sci_type(struct uart_port
*port
)
1593 switch (port
->type
) {
1609 static inline unsigned long sci_port_size(struct uart_port
*port
)
1612 * Pick an arbitrary size that encapsulates all of the base
1613 * registers by default. This can be optimized later, or derived
1614 * from platform resource data at such a time that ports begin to
1615 * behave more erratically.
1620 static int sci_remap_port(struct uart_port
*port
)
1622 unsigned long size
= sci_port_size(port
);
1625 * Nothing to do if there's already an established membase.
1630 if (port
->flags
& UPF_IOREMAP
) {
1631 port
->membase
= ioremap_nocache(port
->mapbase
, size
);
1632 if (unlikely(!port
->membase
)) {
1633 dev_err(port
->dev
, "can't remap port#%d\n", port
->line
);
1638 * For the simple (and majority of) cases where we don't
1639 * need to do any remapping, just cast the cookie
1642 port
->membase
= (void __iomem
*)port
->mapbase
;
1648 static void sci_release_port(struct uart_port
*port
)
1650 if (port
->flags
& UPF_IOREMAP
) {
1651 iounmap(port
->membase
);
1652 port
->membase
= NULL
;
1655 release_mem_region(port
->mapbase
, sci_port_size(port
));
1658 static int sci_request_port(struct uart_port
*port
)
1660 unsigned long size
= sci_port_size(port
);
1661 struct resource
*res
;
1664 res
= request_mem_region(port
->mapbase
, size
, dev_name(port
->dev
));
1665 if (unlikely(res
== NULL
))
1668 ret
= sci_remap_port(port
);
1669 if (unlikely(ret
!= 0)) {
1670 release_resource(res
);
1677 static void sci_config_port(struct uart_port
*port
, int flags
)
1679 if (flags
& UART_CONFIG_TYPE
) {
1680 struct sci_port
*sport
= to_sci_port(port
);
1682 port
->type
= sport
->cfg
->type
;
1683 sci_request_port(port
);
1687 static int sci_verify_port(struct uart_port
*port
, struct serial_struct
*ser
)
1689 struct sci_port
*s
= to_sci_port(port
);
1691 if (ser
->irq
!= s
->cfg
->irqs
[SCIx_TXI_IRQ
] || ser
->irq
> nr_irqs
)
1693 if (ser
->baud_base
< 2400)
1694 /* No paper tape reader for Mitch.. */
1700 static struct uart_ops sci_uart_ops
= {
1701 .tx_empty
= sci_tx_empty
,
1702 .set_mctrl
= sci_set_mctrl
,
1703 .get_mctrl
= sci_get_mctrl
,
1704 .start_tx
= sci_start_tx
,
1705 .stop_tx
= sci_stop_tx
,
1706 .stop_rx
= sci_stop_rx
,
1707 .enable_ms
= sci_enable_ms
,
1708 .break_ctl
= sci_break_ctl
,
1709 .startup
= sci_startup
,
1710 .shutdown
= sci_shutdown
,
1711 .set_termios
= sci_set_termios
,
1713 .release_port
= sci_release_port
,
1714 .request_port
= sci_request_port
,
1715 .config_port
= sci_config_port
,
1716 .verify_port
= sci_verify_port
,
1717 #ifdef CONFIG_CONSOLE_POLL
1718 .poll_get_char
= sci_poll_get_char
,
1719 .poll_put_char
= sci_poll_put_char
,
1723 static int __devinit
sci_init_single(struct platform_device
*dev
,
1724 struct sci_port
*sci_port
,
1726 struct plat_sci_port
*p
)
1728 struct uart_port
*port
= &sci_port
->port
;
1730 port
->ops
= &sci_uart_ops
;
1731 port
->iotype
= UPIO_MEM
;
1736 port
->fifosize
= 256;
1739 port
->fifosize
= 64;
1742 port
->fifosize
= 16;
1750 sci_port
->iclk
= clk_get(&dev
->dev
, "sci_ick");
1751 if (IS_ERR(sci_port
->iclk
)) {
1752 sci_port
->iclk
= clk_get(&dev
->dev
, "peripheral_clk");
1753 if (IS_ERR(sci_port
->iclk
)) {
1754 dev_err(&dev
->dev
, "can't get iclk\n");
1755 return PTR_ERR(sci_port
->iclk
);
1760 * The function clock is optional, ignore it if we can't
1763 sci_port
->fclk
= clk_get(&dev
->dev
, "sci_fck");
1764 if (IS_ERR(sci_port
->fclk
))
1765 sci_port
->fclk
= NULL
;
1767 sci_port
->enable
= sci_clk_enable
;
1768 sci_port
->disable
= sci_clk_disable
;
1769 port
->dev
= &dev
->dev
;
1771 pm_runtime_enable(&dev
->dev
);
1774 sci_port
->break_timer
.data
= (unsigned long)sci_port
;
1775 sci_port
->break_timer
.function
= sci_break_timer
;
1776 init_timer(&sci_port
->break_timer
);
1780 port
->mapbase
= p
->mapbase
;
1781 port
->type
= p
->type
;
1782 port
->flags
= p
->flags
;
1785 * The UART port needs an IRQ value, so we peg this to the TX IRQ
1786 * for the multi-IRQ ports, which is where we are primarily
1787 * concerned with the shutdown path synchronization.
1789 * For the muxed case there's nothing more to do.
1791 port
->irq
= p
->irqs
[SCIx_RXI_IRQ
];
1794 dev_dbg(port
->dev
, "DMA device %p, tx %d, rx %d\n",
1795 p
->dma_dev
, p
->dma_slave_tx
, p
->dma_slave_rx
);
1800 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
1801 static void serial_console_putchar(struct uart_port
*port
, int ch
)
1803 sci_poll_put_char(port
, ch
);
1807 * Print a string to the serial port trying not to disturb
1808 * any possible real use of the port...
1810 static void serial_console_write(struct console
*co
, const char *s
,
1813 struct sci_port
*sci_port
= &sci_ports
[co
->index
];
1814 struct uart_port
*port
= &sci_port
->port
;
1815 unsigned short bits
;
1817 if (sci_port
->enable
)
1818 sci_port
->enable(port
);
1820 uart_console_write(port
, s
, count
, serial_console_putchar
);
1822 /* wait until fifo is empty and last bit has been transmitted */
1823 bits
= SCxSR_TDxE(port
) | SCxSR_TEND(port
);
1824 while ((sci_in(port
, SCxSR
) & bits
) != bits
)
1827 if (sci_port
->disable
)
1828 sci_port
->disable(port
);
1831 static int __devinit
serial_console_setup(struct console
*co
, char *options
)
1833 struct sci_port
*sci_port
;
1834 struct uart_port
*port
;
1842 * Refuse to handle any bogus ports.
1844 if (co
->index
< 0 || co
->index
>= SCI_NPORTS
)
1847 sci_port
= &sci_ports
[co
->index
];
1848 port
= &sci_port
->port
;
1851 * Refuse to handle uninitialized ports.
1856 ret
= sci_remap_port(port
);
1857 if (unlikely(ret
!= 0))
1860 if (sci_port
->enable
)
1861 sci_port
->enable(port
);
1864 uart_parse_options(options
, &baud
, &parity
, &bits
, &flow
);
1866 ret
= uart_set_options(port
, co
, baud
, parity
, bits
, flow
);
1867 #if defined(__H8300H__) || defined(__H8300S__)
1868 /* disable rx interrupt */
1872 /* TODO: disable clock */
1876 static struct console serial_console
= {
1878 .device
= uart_console_device
,
1879 .write
= serial_console_write
,
1880 .setup
= serial_console_setup
,
1881 .flags
= CON_PRINTBUFFER
,
1883 .data
= &sci_uart_driver
,
1886 static struct console early_serial_console
= {
1887 .name
= "early_ttySC",
1888 .write
= serial_console_write
,
1889 .flags
= CON_PRINTBUFFER
,
1893 static char early_serial_buf
[32];
1895 static int __devinit
sci_probe_earlyprintk(struct platform_device
*pdev
)
1897 struct plat_sci_port
*cfg
= pdev
->dev
.platform_data
;
1899 if (early_serial_console
.data
)
1902 early_serial_console
.index
= pdev
->id
;
1904 sci_init_single(NULL
, &sci_ports
[pdev
->id
], pdev
->id
, cfg
);
1906 serial_console_setup(&early_serial_console
, early_serial_buf
);
1908 if (!strstr(early_serial_buf
, "keep"))
1909 early_serial_console
.flags
|= CON_BOOT
;
1911 register_console(&early_serial_console
);
1915 #define SCI_CONSOLE (&serial_console)
1918 static inline int __devinit
sci_probe_earlyprintk(struct platform_device
*pdev
)
1923 #define SCI_CONSOLE NULL
1925 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
1927 static char banner
[] __initdata
=
1928 KERN_INFO
"SuperH SCI(F) driver initialized\n";
1930 static struct uart_driver sci_uart_driver
= {
1931 .owner
= THIS_MODULE
,
1932 .driver_name
= "sci",
1933 .dev_name
= "ttySC",
1935 .minor
= SCI_MINOR_START
,
1937 .cons
= SCI_CONSOLE
,
1940 static int sci_remove(struct platform_device
*dev
)
1942 struct sci_port
*port
= platform_get_drvdata(dev
);
1944 cpufreq_unregister_notifier(&port
->freq_transition
,
1945 CPUFREQ_TRANSITION_NOTIFIER
);
1947 uart_remove_one_port(&sci_uart_driver
, &port
->port
);
1949 clk_put(port
->iclk
);
1950 clk_put(port
->fclk
);
1952 pm_runtime_disable(&dev
->dev
);
1956 static int __devinit
sci_probe_single(struct platform_device
*dev
,
1958 struct plat_sci_port
*p
,
1959 struct sci_port
*sciport
)
1964 if (unlikely(index
>= SCI_NPORTS
)) {
1965 dev_notice(&dev
->dev
, "Attempting to register port "
1966 "%d when only %d are available.\n",
1967 index
+1, SCI_NPORTS
);
1968 dev_notice(&dev
->dev
, "Consider bumping "
1969 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
1973 ret
= sci_init_single(dev
, sciport
, index
, p
);
1977 return uart_add_one_port(&sci_uart_driver
, &sciport
->port
);
1980 static int __devinit
sci_probe(struct platform_device
*dev
)
1982 struct plat_sci_port
*p
= dev
->dev
.platform_data
;
1983 struct sci_port
*sp
= &sci_ports
[dev
->id
];
1987 * If we've come here via earlyprintk initialization, head off to
1988 * the special early probe. We don't have sufficient device state
1989 * to make it beyond this yet.
1991 if (is_early_platform_device(dev
))
1992 return sci_probe_earlyprintk(dev
);
1994 platform_set_drvdata(dev
, sp
);
1996 ret
= sci_probe_single(dev
, dev
->id
, p
, sp
);
2000 sp
->freq_transition
.notifier_call
= sci_notifier
;
2002 ret
= cpufreq_register_notifier(&sp
->freq_transition
,
2003 CPUFREQ_TRANSITION_NOTIFIER
);
2004 if (unlikely(ret
< 0))
2007 #ifdef CONFIG_SH_STANDARD_BIOS
2008 sh_bios_gdb_detach();
2018 static int sci_suspend(struct device
*dev
)
2020 struct sci_port
*sport
= dev_get_drvdata(dev
);
2023 uart_suspend_port(&sci_uart_driver
, &sport
->port
);
2028 static int sci_resume(struct device
*dev
)
2030 struct sci_port
*sport
= dev_get_drvdata(dev
);
2033 uart_resume_port(&sci_uart_driver
, &sport
->port
);
2038 static const struct dev_pm_ops sci_dev_pm_ops
= {
2039 .suspend
= sci_suspend
,
2040 .resume
= sci_resume
,
2043 static struct platform_driver sci_driver
= {
2045 .remove
= sci_remove
,
2048 .owner
= THIS_MODULE
,
2049 .pm
= &sci_dev_pm_ops
,
2053 static int __init
sci_init(void)
2059 ret
= uart_register_driver(&sci_uart_driver
);
2060 if (likely(ret
== 0)) {
2061 ret
= platform_driver_register(&sci_driver
);
2063 uart_unregister_driver(&sci_uart_driver
);
2069 static void __exit
sci_exit(void)
2071 platform_driver_unregister(&sci_driver
);
2072 uart_unregister_driver(&sci_uart_driver
);
2075 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2076 early_platform_init_buffer("earlyprintk", &sci_driver
,
2077 early_serial_buf
, ARRAY_SIZE(early_serial_buf
));
2079 module_init(sci_init
);
2080 module_exit(sci_exit
);
2082 MODULE_LICENSE("GPL");
2083 MODULE_ALIAS("platform:sh-sci");