2 * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
4 * Copyright (C) 2002 - 2011 Paul Mundt
5 * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
7 * based off of the old drivers/char/sh-sci.c by:
9 * Copyright (C) 1999, 2000 Niibe Yutaka
10 * Copyright (C) 2000 Sugioka Toshinobu
11 * Modified to support multiple serial ports. Stuart Menefy (May 2000).
12 * Modified to support SecureEdge. David McCullough (2002)
13 * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
14 * Removed SH7300 support (Jul 2007).
16 * This file is subject to the terms and conditions of the GNU General Public
17 * License. See the file "COPYING" in the main directory of this archive
20 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/timer.h>
29 #include <linux/interrupt.h>
30 #include <linux/tty.h>
31 #include <linux/tty_flip.h>
32 #include <linux/serial.h>
33 #include <linux/major.h>
34 #include <linux/string.h>
35 #include <linux/sysrq.h>
36 #include <linux/ioport.h>
38 #include <linux/init.h>
39 #include <linux/delay.h>
40 #include <linux/console.h>
41 #include <linux/platform_device.h>
42 #include <linux/serial_sci.h>
43 #include <linux/notifier.h>
44 #include <linux/pm_runtime.h>
45 #include <linux/cpufreq.h>
46 #include <linux/clk.h>
47 #include <linux/ctype.h>
48 #include <linux/err.h>
49 #include <linux/dmaengine.h>
50 #include <linux/dma-mapping.h>
51 #include <linux/scatterlist.h>
52 #include <linux/slab.h>
55 #include <asm/sh_bios.h>
61 struct uart_port port
;
63 /* Platform configuration */
64 struct plat_sci_port
*cfg
;
67 struct timer_list break_timer
;
75 char *irqstr
[SCIx_NR_IRQS
];
77 struct dma_chan
*chan_tx
;
78 struct dma_chan
*chan_rx
;
80 #ifdef CONFIG_SERIAL_SH_SCI_DMA
81 struct dma_async_tx_descriptor
*desc_tx
;
82 struct dma_async_tx_descriptor
*desc_rx
[2];
83 dma_cookie_t cookie_tx
;
84 dma_cookie_t cookie_rx
[2];
85 dma_cookie_t active_rx
;
86 struct scatterlist sg_tx
;
87 unsigned int sg_len_tx
;
88 struct scatterlist sg_rx
[2];
90 struct sh_dmae_slave param_tx
;
91 struct sh_dmae_slave param_rx
;
92 struct work_struct work_tx
;
93 struct work_struct work_rx
;
94 struct timer_list rx_timer
;
95 unsigned int rx_timeout
;
98 struct notifier_block freq_transition
;
100 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
101 unsigned short saved_smr
;
102 unsigned short saved_fcr
;
103 unsigned char saved_brr
;
107 /* Function prototypes */
108 static void sci_start_tx(struct uart_port
*port
);
109 static void sci_stop_tx(struct uart_port
*port
);
110 static void sci_start_rx(struct uart_port
*port
);
112 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
114 static struct sci_port sci_ports
[SCI_NPORTS
];
115 static struct uart_driver sci_uart_driver
;
117 static inline struct sci_port
*
118 to_sci_port(struct uart_port
*uart
)
120 return container_of(uart
, struct sci_port
, port
);
123 struct plat_sci_reg
{
127 /* Helper for invalidating specific entries of an inherited map. */
128 #define sci_reg_invalid { .offset = 0, .size = 0 }
130 static struct plat_sci_reg sci_regmap
[SCIx_NR_REGTYPES
][SCIx_NR_REGS
] = {
131 [SCIx_PROBE_REGTYPE
] = {
132 [0 ... SCIx_NR_REGS
- 1] = sci_reg_invalid
,
136 * Common SCI definitions, dependent on the port's regshift
139 [SCIx_SCI_REGTYPE
] = {
140 [SCSMR
] = { 0x00, 8 },
141 [SCBRR
] = { 0x01, 8 },
142 [SCSCR
] = { 0x02, 8 },
143 [SCxTDR
] = { 0x03, 8 },
144 [SCxSR
] = { 0x04, 8 },
145 [SCxRDR
] = { 0x05, 8 },
146 [SCFCR
] = sci_reg_invalid
,
147 [SCFDR
] = sci_reg_invalid
,
148 [SCTFDR
] = sci_reg_invalid
,
149 [SCRFDR
] = sci_reg_invalid
,
150 [SCSPTR
] = sci_reg_invalid
,
151 [SCLSR
] = sci_reg_invalid
,
155 * Common definitions for legacy IrDA ports, dependent on
158 [SCIx_IRDA_REGTYPE
] = {
159 [SCSMR
] = { 0x00, 8 },
160 [SCBRR
] = { 0x01, 8 },
161 [SCSCR
] = { 0x02, 8 },
162 [SCxTDR
] = { 0x03, 8 },
163 [SCxSR
] = { 0x04, 8 },
164 [SCxRDR
] = { 0x05, 8 },
165 [SCFCR
] = { 0x06, 8 },
166 [SCFDR
] = { 0x07, 16 },
167 [SCTFDR
] = sci_reg_invalid
,
168 [SCRFDR
] = sci_reg_invalid
,
169 [SCSPTR
] = sci_reg_invalid
,
170 [SCLSR
] = sci_reg_invalid
,
174 * Common SCIFA definitions.
176 [SCIx_SCIFA_REGTYPE
] = {
177 [SCSMR
] = { 0x00, 16 },
178 [SCBRR
] = { 0x04, 8 },
179 [SCSCR
] = { 0x08, 16 },
180 [SCxTDR
] = { 0x20, 8 },
181 [SCxSR
] = { 0x14, 16 },
182 [SCxRDR
] = { 0x24, 8 },
183 [SCFCR
] = { 0x18, 16 },
184 [SCFDR
] = { 0x1c, 16 },
185 [SCTFDR
] = sci_reg_invalid
,
186 [SCRFDR
] = sci_reg_invalid
,
187 [SCSPTR
] = sci_reg_invalid
,
188 [SCLSR
] = sci_reg_invalid
,
192 * Common SCIFB definitions.
194 [SCIx_SCIFB_REGTYPE
] = {
195 [SCSMR
] = { 0x00, 16 },
196 [SCBRR
] = { 0x04, 8 },
197 [SCSCR
] = { 0x08, 16 },
198 [SCxTDR
] = { 0x40, 8 },
199 [SCxSR
] = { 0x14, 16 },
200 [SCxRDR
] = { 0x60, 8 },
201 [SCFCR
] = { 0x18, 16 },
202 [SCFDR
] = { 0x1c, 16 },
203 [SCTFDR
] = sci_reg_invalid
,
204 [SCRFDR
] = sci_reg_invalid
,
205 [SCSPTR
] = sci_reg_invalid
,
206 [SCLSR
] = sci_reg_invalid
,
210 * Common SH-3 SCIF definitions.
212 [SCIx_SH3_SCIF_REGTYPE
] = {
213 [SCSMR
] = { 0x00, 8 },
214 [SCBRR
] = { 0x02, 8 },
215 [SCSCR
] = { 0x04, 8 },
216 [SCxTDR
] = { 0x06, 8 },
217 [SCxSR
] = { 0x08, 16 },
218 [SCxRDR
] = { 0x0a, 8 },
219 [SCFCR
] = { 0x0c, 8 },
220 [SCFDR
] = { 0x0e, 16 },
221 [SCTFDR
] = sci_reg_invalid
,
222 [SCRFDR
] = sci_reg_invalid
,
223 [SCSPTR
] = sci_reg_invalid
,
224 [SCLSR
] = sci_reg_invalid
,
228 * Common SH-4(A) SCIF(B) definitions.
230 [SCIx_SH4_SCIF_REGTYPE
] = {
231 [SCSMR
] = { 0x00, 16 },
232 [SCBRR
] = { 0x04, 8 },
233 [SCSCR
] = { 0x08, 16 },
234 [SCxTDR
] = { 0x0c, 8 },
235 [SCxSR
] = { 0x10, 16 },
236 [SCxRDR
] = { 0x14, 8 },
237 [SCFCR
] = { 0x18, 16 },
238 [SCFDR
] = { 0x1c, 16 },
239 [SCTFDR
] = sci_reg_invalid
,
240 [SCRFDR
] = sci_reg_invalid
,
241 [SCSPTR
] = { 0x20, 16 },
242 [SCLSR
] = { 0x24, 16 },
246 * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
249 [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE
] = {
250 [SCSMR
] = { 0x00, 16 },
251 [SCBRR
] = { 0x04, 8 },
252 [SCSCR
] = { 0x08, 16 },
253 [SCxTDR
] = { 0x0c, 8 },
254 [SCxSR
] = { 0x10, 16 },
255 [SCxRDR
] = { 0x14, 8 },
256 [SCFCR
] = { 0x18, 16 },
257 [SCFDR
] = { 0x1c, 16 },
258 [SCTFDR
] = sci_reg_invalid
,
259 [SCRFDR
] = sci_reg_invalid
,
260 [SCSPTR
] = sci_reg_invalid
,
261 [SCLSR
] = { 0x24, 16 },
265 * Common SH-4(A) SCIF(B) definitions for ports with FIFO data
268 [SCIx_SH4_SCIF_FIFODATA_REGTYPE
] = {
269 [SCSMR
] = { 0x00, 16 },
270 [SCBRR
] = { 0x04, 8 },
271 [SCSCR
] = { 0x08, 16 },
272 [SCxTDR
] = { 0x0c, 8 },
273 [SCxSR
] = { 0x10, 16 },
274 [SCxRDR
] = { 0x14, 8 },
275 [SCFCR
] = { 0x18, 16 },
276 [SCFDR
] = { 0x1c, 16 },
277 [SCTFDR
] = { 0x1c, 16 }, /* aliased to SCFDR */
278 [SCRFDR
] = { 0x20, 16 },
279 [SCSPTR
] = { 0x24, 16 },
280 [SCLSR
] = { 0x28, 16 },
284 * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
287 [SCIx_SH7705_SCIF_REGTYPE
] = {
288 [SCSMR
] = { 0x00, 16 },
289 [SCBRR
] = { 0x04, 8 },
290 [SCSCR
] = { 0x08, 16 },
291 [SCxTDR
] = { 0x20, 8 },
292 [SCxSR
] = { 0x14, 16 },
293 [SCxRDR
] = { 0x24, 8 },
294 [SCFCR
] = { 0x18, 16 },
295 [SCFDR
] = { 0x1c, 16 },
296 [SCTFDR
] = sci_reg_invalid
,
297 [SCRFDR
] = sci_reg_invalid
,
298 [SCSPTR
] = sci_reg_invalid
,
299 [SCLSR
] = sci_reg_invalid
,
303 #define sci_getreg(up, offset) (sci_regmap[to_sci_port(up)->cfg->regtype] + offset)
306 * The "offset" here is rather misleading, in that it refers to an enum
307 * value relative to the port mapping rather than the fixed offset
308 * itself, which needs to be manually retrieved from the platform's
309 * register map for the given port.
311 static unsigned int sci_serial_in(struct uart_port
*p
, int offset
)
313 struct plat_sci_reg
*reg
= sci_getreg(p
, offset
);
316 return ioread8(p
->membase
+ (reg
->offset
<< p
->regshift
));
317 else if (reg
->size
== 16)
318 return ioread16(p
->membase
+ (reg
->offset
<< p
->regshift
));
320 WARN(1, "Invalid register access\n");
325 static void sci_serial_out(struct uart_port
*p
, int offset
, int value
)
327 struct plat_sci_reg
*reg
= sci_getreg(p
, offset
);
330 iowrite8(value
, p
->membase
+ (reg
->offset
<< p
->regshift
));
331 else if (reg
->size
== 16)
332 iowrite16(value
, p
->membase
+ (reg
->offset
<< p
->regshift
));
334 WARN(1, "Invalid register access\n");
337 #define sci_in(up, offset) (up->serial_in(up, offset))
338 #define sci_out(up, offset, value) (up->serial_out(up, offset, value))
340 static int sci_probe_regmap(struct plat_sci_port
*cfg
)
344 cfg
->regtype
= SCIx_SCI_REGTYPE
;
347 cfg
->regtype
= SCIx_IRDA_REGTYPE
;
350 cfg
->regtype
= SCIx_SCIFA_REGTYPE
;
353 cfg
->regtype
= SCIx_SCIFB_REGTYPE
;
357 * The SH-4 is a bit of a misnomer here, although that's
358 * where this particular port layout originated. This
359 * configuration (or some slight variation thereof)
360 * remains the dominant model for all SCIFs.
362 cfg
->regtype
= SCIx_SH4_SCIF_REGTYPE
;
365 printk(KERN_ERR
"Can't probe register map for given port\n");
372 static void sci_port_enable(struct sci_port
*sci_port
)
374 if (!sci_port
->port
.dev
)
377 pm_runtime_get_sync(sci_port
->port
.dev
);
379 clk_enable(sci_port
->iclk
);
380 sci_port
->port
.uartclk
= clk_get_rate(sci_port
->iclk
);
381 clk_enable(sci_port
->fclk
);
384 static void sci_port_disable(struct sci_port
*sci_port
)
386 if (!sci_port
->port
.dev
)
389 clk_disable(sci_port
->fclk
);
390 clk_disable(sci_port
->iclk
);
392 pm_runtime_put_sync(sci_port
->port
.dev
);
395 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
397 #ifdef CONFIG_CONSOLE_POLL
398 static int sci_poll_get_char(struct uart_port
*port
)
400 unsigned short status
;
404 status
= sci_in(port
, SCxSR
);
405 if (status
& SCxSR_ERRORS(port
)) {
406 sci_out(port
, SCxSR
, SCxSR_ERROR_CLEAR(port
));
412 if (!(status
& SCxSR_RDxF(port
)))
415 c
= sci_in(port
, SCxRDR
);
419 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
425 static void sci_poll_put_char(struct uart_port
*port
, unsigned char c
)
427 unsigned short status
;
430 status
= sci_in(port
, SCxSR
);
431 } while (!(status
& SCxSR_TDxE(port
)));
433 sci_out(port
, SCxTDR
, c
);
434 sci_out(port
, SCxSR
, SCxSR_TDxE_CLEAR(port
) & ~SCxSR_TEND(port
));
436 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
438 static void sci_init_pins(struct uart_port
*port
, unsigned int cflag
)
440 struct sci_port
*s
= to_sci_port(port
);
441 struct plat_sci_reg
*reg
= sci_regmap
[s
->cfg
->regtype
] + SCSPTR
;
444 * Use port-specific handler if provided.
446 if (s
->cfg
->ops
&& s
->cfg
->ops
->init_pins
) {
447 s
->cfg
->ops
->init_pins(port
, cflag
);
452 * For the generic path SCSPTR is necessary. Bail out if that's
458 if (!(cflag
& CRTSCTS
))
459 sci_out(port
, SCSPTR
, 0x0080); /* Set RTS = 1 */
462 static int sci_txfill(struct uart_port
*port
)
464 struct plat_sci_reg
*reg
;
466 reg
= sci_getreg(port
, SCTFDR
);
468 return sci_in(port
, SCTFDR
) & 0xff;
470 reg
= sci_getreg(port
, SCFDR
);
472 return sci_in(port
, SCFDR
) >> 8;
474 return !(sci_in(port
, SCxSR
) & SCI_TDRE
);
477 static int sci_txroom(struct uart_port
*port
)
479 return port
->fifosize
- sci_txfill(port
);
482 static int sci_rxfill(struct uart_port
*port
)
484 struct plat_sci_reg
*reg
;
486 reg
= sci_getreg(port
, SCRFDR
);
488 return sci_in(port
, SCRFDR
) & 0xff;
490 reg
= sci_getreg(port
, SCFDR
);
492 return sci_in(port
, SCFDR
) & ((port
->fifosize
<< 1) - 1);
494 return (sci_in(port
, SCxSR
) & SCxSR_RDxF(port
)) != 0;
498 * SCI helper for checking the state of the muxed port/RXD pins.
500 static inline int sci_rxd_in(struct uart_port
*port
)
502 struct sci_port
*s
= to_sci_port(port
);
504 if (s
->cfg
->port_reg
<= 0)
507 return !!__raw_readb(s
->cfg
->port_reg
);
510 /* ********************************************************************** *
511 * the interrupt related routines *
512 * ********************************************************************** */
514 static void sci_transmit_chars(struct uart_port
*port
)
516 struct circ_buf
*xmit
= &port
->state
->xmit
;
517 unsigned int stopped
= uart_tx_stopped(port
);
518 unsigned short status
;
522 status
= sci_in(port
, SCxSR
);
523 if (!(status
& SCxSR_TDxE(port
))) {
524 ctrl
= sci_in(port
, SCSCR
);
525 if (uart_circ_empty(xmit
))
529 sci_out(port
, SCSCR
, ctrl
);
533 count
= sci_txroom(port
);
541 } else if (!uart_circ_empty(xmit
) && !stopped
) {
542 c
= xmit
->buf
[xmit
->tail
];
543 xmit
->tail
= (xmit
->tail
+ 1) & (UART_XMIT_SIZE
- 1);
548 sci_out(port
, SCxTDR
, c
);
551 } while (--count
> 0);
553 sci_out(port
, SCxSR
, SCxSR_TDxE_CLEAR(port
));
555 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
556 uart_write_wakeup(port
);
557 if (uart_circ_empty(xmit
)) {
560 ctrl
= sci_in(port
, SCSCR
);
562 if (port
->type
!= PORT_SCI
) {
563 sci_in(port
, SCxSR
); /* Dummy read */
564 sci_out(port
, SCxSR
, SCxSR_TDxE_CLEAR(port
));
568 sci_out(port
, SCSCR
, ctrl
);
572 /* On SH3, SCIF may read end-of-break as a space->mark char */
573 #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
575 static void sci_receive_chars(struct uart_port
*port
)
577 struct sci_port
*sci_port
= to_sci_port(port
);
578 struct tty_struct
*tty
= port
->state
->port
.tty
;
579 int i
, count
, copied
= 0;
580 unsigned short status
;
583 status
= sci_in(port
, SCxSR
);
584 if (!(status
& SCxSR_RDxF(port
)))
588 /* Don't copy more bytes than there is room for in the buffer */
589 count
= tty_buffer_request_room(tty
, sci_rxfill(port
));
591 /* If for any reason we can't copy more data, we're done! */
595 if (port
->type
== PORT_SCI
) {
596 char c
= sci_in(port
, SCxRDR
);
597 if (uart_handle_sysrq_char(port
, c
) ||
598 sci_port
->break_flag
)
601 tty_insert_flip_char(tty
, c
, TTY_NORMAL
);
603 for (i
= 0; i
< count
; i
++) {
604 char c
= sci_in(port
, SCxRDR
);
605 status
= sci_in(port
, SCxSR
);
606 #if defined(CONFIG_CPU_SH3)
607 /* Skip "chars" during break */
608 if (sci_port
->break_flag
) {
610 (status
& SCxSR_FER(port
))) {
615 /* Nonzero => end-of-break */
616 dev_dbg(port
->dev
, "debounce<%02x>\n", c
);
617 sci_port
->break_flag
= 0;
624 #endif /* CONFIG_CPU_SH3 */
625 if (uart_handle_sysrq_char(port
, c
)) {
630 /* Store data and status */
631 if (status
& SCxSR_FER(port
)) {
633 dev_notice(port
->dev
, "frame error\n");
634 } else if (status
& SCxSR_PER(port
)) {
636 dev_notice(port
->dev
, "parity error\n");
640 tty_insert_flip_char(tty
, c
, flag
);
644 sci_in(port
, SCxSR
); /* dummy read */
645 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
648 port
->icount
.rx
+= count
;
652 /* Tell the rest of the system the news. New characters! */
653 tty_flip_buffer_push(tty
);
655 sci_in(port
, SCxSR
); /* dummy read */
656 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
660 #define SCI_BREAK_JIFFIES (HZ/20)
663 * The sci generates interrupts during the break,
664 * 1 per millisecond or so during the break period, for 9600 baud.
665 * So dont bother disabling interrupts.
666 * But dont want more than 1 break event.
667 * Use a kernel timer to periodically poll the rx line until
668 * the break is finished.
670 static inline void sci_schedule_break_timer(struct sci_port
*port
)
672 mod_timer(&port
->break_timer
, jiffies
+ SCI_BREAK_JIFFIES
);
675 /* Ensure that two consecutive samples find the break over. */
676 static void sci_break_timer(unsigned long data
)
678 struct sci_port
*port
= (struct sci_port
*)data
;
680 sci_port_enable(port
);
682 if (sci_rxd_in(&port
->port
) == 0) {
683 port
->break_flag
= 1;
684 sci_schedule_break_timer(port
);
685 } else if (port
->break_flag
== 1) {
687 port
->break_flag
= 2;
688 sci_schedule_break_timer(port
);
690 port
->break_flag
= 0;
692 sci_port_disable(port
);
695 static int sci_handle_errors(struct uart_port
*port
)
698 unsigned short status
= sci_in(port
, SCxSR
);
699 struct tty_struct
*tty
= port
->state
->port
.tty
;
700 struct sci_port
*s
= to_sci_port(port
);
703 * Handle overruns, if supported.
705 if (s
->cfg
->overrun_bit
!= SCIx_NOT_SUPPORTED
) {
706 if (status
& (1 << s
->cfg
->overrun_bit
)) {
708 if (tty_insert_flip_char(tty
, 0, TTY_OVERRUN
))
711 dev_notice(port
->dev
, "overrun error");
715 if (status
& SCxSR_FER(port
)) {
716 if (sci_rxd_in(port
) == 0) {
717 /* Notify of BREAK */
718 struct sci_port
*sci_port
= to_sci_port(port
);
720 if (!sci_port
->break_flag
) {
721 sci_port
->break_flag
= 1;
722 sci_schedule_break_timer(sci_port
);
724 /* Do sysrq handling. */
725 if (uart_handle_break(port
))
728 dev_dbg(port
->dev
, "BREAK detected\n");
730 if (tty_insert_flip_char(tty
, 0, TTY_BREAK
))
736 if (tty_insert_flip_char(tty
, 0, TTY_FRAME
))
739 dev_notice(port
->dev
, "frame error\n");
743 if (status
& SCxSR_PER(port
)) {
745 if (tty_insert_flip_char(tty
, 0, TTY_PARITY
))
748 dev_notice(port
->dev
, "parity error");
752 tty_flip_buffer_push(tty
);
757 static int sci_handle_fifo_overrun(struct uart_port
*port
)
759 struct tty_struct
*tty
= port
->state
->port
.tty
;
760 struct sci_port
*s
= to_sci_port(port
);
761 struct plat_sci_reg
*reg
;
764 reg
= sci_getreg(port
, SCLSR
);
768 if ((sci_in(port
, SCLSR
) & (1 << s
->cfg
->overrun_bit
))) {
769 sci_out(port
, SCLSR
, 0);
771 tty_insert_flip_char(tty
, 0, TTY_OVERRUN
);
772 tty_flip_buffer_push(tty
);
774 dev_notice(port
->dev
, "overrun error\n");
781 static int sci_handle_breaks(struct uart_port
*port
)
784 unsigned short status
= sci_in(port
, SCxSR
);
785 struct tty_struct
*tty
= port
->state
->port
.tty
;
786 struct sci_port
*s
= to_sci_port(port
);
788 if (uart_handle_break(port
))
791 if (!s
->break_flag
&& status
& SCxSR_BRK(port
)) {
792 #if defined(CONFIG_CPU_SH3)
796 /* Notify of BREAK */
797 if (tty_insert_flip_char(tty
, 0, TTY_BREAK
))
800 dev_dbg(port
->dev
, "BREAK detected\n");
804 tty_flip_buffer_push(tty
);
806 copied
+= sci_handle_fifo_overrun(port
);
811 static irqreturn_t
sci_rx_interrupt(int irq
, void *ptr
)
813 #ifdef CONFIG_SERIAL_SH_SCI_DMA
814 struct uart_port
*port
= ptr
;
815 struct sci_port
*s
= to_sci_port(port
);
818 u16 scr
= sci_in(port
, SCSCR
);
819 u16 ssr
= sci_in(port
, SCxSR
);
821 /* Disable future Rx interrupts */
822 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
823 disable_irq_nosync(irq
);
828 sci_out(port
, SCSCR
, scr
);
829 /* Clear current interrupt */
830 sci_out(port
, SCxSR
, ssr
& ~(1 | SCxSR_RDxF(port
)));
831 dev_dbg(port
->dev
, "Rx IRQ %lu: setup t-out in %u jiffies\n",
832 jiffies
, s
->rx_timeout
);
833 mod_timer(&s
->rx_timer
, jiffies
+ s
->rx_timeout
);
839 /* I think sci_receive_chars has to be called irrespective
840 * of whether the I_IXOFF is set, otherwise, how is the interrupt
843 sci_receive_chars(ptr
);
848 static irqreturn_t
sci_tx_interrupt(int irq
, void *ptr
)
850 struct uart_port
*port
= ptr
;
853 spin_lock_irqsave(&port
->lock
, flags
);
854 sci_transmit_chars(port
);
855 spin_unlock_irqrestore(&port
->lock
, flags
);
860 static irqreturn_t
sci_er_interrupt(int irq
, void *ptr
)
862 struct uart_port
*port
= ptr
;
865 if (port
->type
== PORT_SCI
) {
866 if (sci_handle_errors(port
)) {
867 /* discard character in rx buffer */
869 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
872 sci_handle_fifo_overrun(port
);
873 sci_rx_interrupt(irq
, ptr
);
876 sci_out(port
, SCxSR
, SCxSR_ERROR_CLEAR(port
));
878 /* Kick the transmission */
879 sci_tx_interrupt(irq
, ptr
);
884 static irqreturn_t
sci_br_interrupt(int irq
, void *ptr
)
886 struct uart_port
*port
= ptr
;
889 sci_handle_breaks(port
);
890 sci_out(port
, SCxSR
, SCxSR_BREAK_CLEAR(port
));
895 static inline unsigned long port_rx_irq_mask(struct uart_port
*port
)
898 * Not all ports (such as SCIFA) will support REIE. Rather than
899 * special-casing the port type, we check the port initialization
900 * IRQ enable mask to see whether the IRQ is desired at all. If
901 * it's unset, it's logically inferred that there's no point in
904 return SCSCR_RIE
| (to_sci_port(port
)->cfg
->scscr
& SCSCR_REIE
);
907 static irqreturn_t
sci_mpxed_interrupt(int irq
, void *ptr
)
909 unsigned short ssr_status
, scr_status
, err_enabled
;
910 struct uart_port
*port
= ptr
;
911 struct sci_port
*s
= to_sci_port(port
);
912 irqreturn_t ret
= IRQ_NONE
;
914 ssr_status
= sci_in(port
, SCxSR
);
915 scr_status
= sci_in(port
, SCSCR
);
916 err_enabled
= scr_status
& port_rx_irq_mask(port
);
919 if ((ssr_status
& SCxSR_TDxE(port
)) && (scr_status
& SCSCR_TIE
) &&
921 ret
= sci_tx_interrupt(irq
, ptr
);
924 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
927 if (((ssr_status
& SCxSR_RDxF(port
)) || s
->chan_rx
) &&
928 (scr_status
& SCSCR_RIE
))
929 ret
= sci_rx_interrupt(irq
, ptr
);
931 /* Error Interrupt */
932 if ((ssr_status
& SCxSR_ERRORS(port
)) && err_enabled
)
933 ret
= sci_er_interrupt(irq
, ptr
);
935 /* Break Interrupt */
936 if ((ssr_status
& SCxSR_BRK(port
)) && err_enabled
)
937 ret
= sci_br_interrupt(irq
, ptr
);
943 * Here we define a transition notifier so that we can update all of our
944 * ports' baud rate when the peripheral clock changes.
946 static int sci_notifier(struct notifier_block
*self
,
947 unsigned long phase
, void *p
)
949 struct sci_port
*sci_port
;
952 sci_port
= container_of(self
, struct sci_port
, freq_transition
);
954 if ((phase
== CPUFREQ_POSTCHANGE
) ||
955 (phase
== CPUFREQ_RESUMECHANGE
)) {
956 struct uart_port
*port
= &sci_port
->port
;
958 spin_lock_irqsave(&port
->lock
, flags
);
959 port
->uartclk
= clk_get_rate(sci_port
->iclk
);
960 spin_unlock_irqrestore(&port
->lock
, flags
);
966 static struct sci_irq_desc
{
968 irq_handler_t handler
;
971 * Split out handlers, the default case.
975 .handler
= sci_er_interrupt
,
980 .handler
= sci_rx_interrupt
,
985 .handler
= sci_tx_interrupt
,
990 .handler
= sci_br_interrupt
,
994 * Special muxed handler.
998 .handler
= sci_mpxed_interrupt
,
1002 static int sci_request_irq(struct sci_port
*port
)
1004 struct uart_port
*up
= &port
->port
;
1007 for (i
= j
= 0; i
< SCIx_NR_IRQS
; i
++, j
++) {
1008 struct sci_irq_desc
*desc
;
1011 if (SCIx_IRQ_IS_MUXED(port
)) {
1015 irq
= port
->cfg
->irqs
[i
];
1017 desc
= sci_irq_desc
+ i
;
1018 port
->irqstr
[j
] = kasprintf(GFP_KERNEL
, "%s:%s",
1019 dev_name(up
->dev
), desc
->desc
);
1020 if (!port
->irqstr
[j
]) {
1021 dev_err(up
->dev
, "Failed to allocate %s IRQ string\n",
1026 ret
= request_irq(irq
, desc
->handler
, up
->irqflags
,
1027 port
->irqstr
[j
], port
);
1028 if (unlikely(ret
)) {
1029 dev_err(up
->dev
, "Can't allocate %s IRQ\n", desc
->desc
);
1038 free_irq(port
->cfg
->irqs
[i
], port
);
1042 kfree(port
->irqstr
[j
]);
1047 static void sci_free_irq(struct sci_port
*port
)
1052 * Intentionally in reverse order so we iterate over the muxed
1055 for (i
= 0; i
< SCIx_NR_IRQS
; i
++) {
1056 free_irq(port
->cfg
->irqs
[i
], port
);
1057 kfree(port
->irqstr
[i
]);
1059 if (SCIx_IRQ_IS_MUXED(port
)) {
1060 /* If there's only one IRQ, we're done. */
1066 static unsigned int sci_tx_empty(struct uart_port
*port
)
1068 unsigned short status
= sci_in(port
, SCxSR
);
1069 unsigned short in_tx_fifo
= sci_txfill(port
);
1071 return (status
& SCxSR_TEND(port
)) && !in_tx_fifo
? TIOCSER_TEMT
: 0;
1074 static void sci_set_mctrl(struct uart_port
*port
, unsigned int mctrl
)
1076 /* This routine is used for seting signals of: DTR, DCD, CTS/RTS */
1077 /* We use SCIF's hardware for CTS/RTS, so don't need any for that. */
1078 /* If you have signals for DTR and DCD, please implement here. */
1081 static unsigned int sci_get_mctrl(struct uart_port
*port
)
1083 /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
1086 return TIOCM_DTR
| TIOCM_RTS
| TIOCM_CTS
| TIOCM_DSR
;
1089 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1090 static void sci_dma_tx_complete(void *arg
)
1092 struct sci_port
*s
= arg
;
1093 struct uart_port
*port
= &s
->port
;
1094 struct circ_buf
*xmit
= &port
->state
->xmit
;
1095 unsigned long flags
;
1097 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
1099 spin_lock_irqsave(&port
->lock
, flags
);
1101 xmit
->tail
+= sg_dma_len(&s
->sg_tx
);
1102 xmit
->tail
&= UART_XMIT_SIZE
- 1;
1104 port
->icount
.tx
+= sg_dma_len(&s
->sg_tx
);
1106 async_tx_ack(s
->desc_tx
);
1107 s
->cookie_tx
= -EINVAL
;
1110 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
1111 uart_write_wakeup(port
);
1113 if (!uart_circ_empty(xmit
)) {
1114 schedule_work(&s
->work_tx
);
1115 } else if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1116 u16 ctrl
= sci_in(port
, SCSCR
);
1117 sci_out(port
, SCSCR
, ctrl
& ~SCSCR_TIE
);
1120 spin_unlock_irqrestore(&port
->lock
, flags
);
1123 /* Locking: called with port lock held */
1124 static int sci_dma_rx_push(struct sci_port
*s
, struct tty_struct
*tty
,
1127 struct uart_port
*port
= &s
->port
;
1128 int i
, active
, room
;
1130 room
= tty_buffer_request_room(tty
, count
);
1132 if (s
->active_rx
== s
->cookie_rx
[0]) {
1134 } else if (s
->active_rx
== s
->cookie_rx
[1]) {
1137 dev_err(port
->dev
, "cookie %d not found!\n", s
->active_rx
);
1142 dev_warn(port
->dev
, "Rx overrun: dropping %u bytes\n",
1147 for (i
= 0; i
< room
; i
++)
1148 tty_insert_flip_char(tty
, ((u8
*)sg_virt(&s
->sg_rx
[active
]))[i
],
1151 port
->icount
.rx
+= room
;
1156 static void sci_dma_rx_complete(void *arg
)
1158 struct sci_port
*s
= arg
;
1159 struct uart_port
*port
= &s
->port
;
1160 struct tty_struct
*tty
= port
->state
->port
.tty
;
1161 unsigned long flags
;
1164 dev_dbg(port
->dev
, "%s(%d) active #%d\n", __func__
, port
->line
, s
->active_rx
);
1166 spin_lock_irqsave(&port
->lock
, flags
);
1168 count
= sci_dma_rx_push(s
, tty
, s
->buf_len_rx
);
1170 mod_timer(&s
->rx_timer
, jiffies
+ s
->rx_timeout
);
1172 spin_unlock_irqrestore(&port
->lock
, flags
);
1175 tty_flip_buffer_push(tty
);
1177 schedule_work(&s
->work_rx
);
1180 static void sci_rx_dma_release(struct sci_port
*s
, bool enable_pio
)
1182 struct dma_chan
*chan
= s
->chan_rx
;
1183 struct uart_port
*port
= &s
->port
;
1186 s
->cookie_rx
[0] = s
->cookie_rx
[1] = -EINVAL
;
1187 dma_release_channel(chan
);
1188 if (sg_dma_address(&s
->sg_rx
[0]))
1189 dma_free_coherent(port
->dev
, s
->buf_len_rx
* 2,
1190 sg_virt(&s
->sg_rx
[0]), sg_dma_address(&s
->sg_rx
[0]));
1195 static void sci_tx_dma_release(struct sci_port
*s
, bool enable_pio
)
1197 struct dma_chan
*chan
= s
->chan_tx
;
1198 struct uart_port
*port
= &s
->port
;
1201 s
->cookie_tx
= -EINVAL
;
1202 dma_release_channel(chan
);
1207 static void sci_submit_rx(struct sci_port
*s
)
1209 struct dma_chan
*chan
= s
->chan_rx
;
1212 for (i
= 0; i
< 2; i
++) {
1213 struct scatterlist
*sg
= &s
->sg_rx
[i
];
1214 struct dma_async_tx_descriptor
*desc
;
1216 desc
= chan
->device
->device_prep_slave_sg(chan
,
1217 sg
, 1, DMA_FROM_DEVICE
, DMA_PREP_INTERRUPT
);
1220 s
->desc_rx
[i
] = desc
;
1221 desc
->callback
= sci_dma_rx_complete
;
1222 desc
->callback_param
= s
;
1223 s
->cookie_rx
[i
] = desc
->tx_submit(desc
);
1226 if (!desc
|| s
->cookie_rx
[i
] < 0) {
1228 async_tx_ack(s
->desc_rx
[0]);
1229 s
->cookie_rx
[0] = -EINVAL
;
1233 s
->cookie_rx
[i
] = -EINVAL
;
1235 dev_warn(s
->port
.dev
,
1236 "failed to re-start DMA, using PIO\n");
1237 sci_rx_dma_release(s
, true);
1240 dev_dbg(s
->port
.dev
, "%s(): cookie %d to #%d\n", __func__
,
1241 s
->cookie_rx
[i
], i
);
1244 s
->active_rx
= s
->cookie_rx
[0];
1246 dma_async_issue_pending(chan
);
1249 static void work_fn_rx(struct work_struct
*work
)
1251 struct sci_port
*s
= container_of(work
, struct sci_port
, work_rx
);
1252 struct uart_port
*port
= &s
->port
;
1253 struct dma_async_tx_descriptor
*desc
;
1256 if (s
->active_rx
== s
->cookie_rx
[0]) {
1258 } else if (s
->active_rx
== s
->cookie_rx
[1]) {
1261 dev_err(port
->dev
, "cookie %d not found!\n", s
->active_rx
);
1264 desc
= s
->desc_rx
[new];
1266 if (dma_async_is_tx_complete(s
->chan_rx
, s
->active_rx
, NULL
, NULL
) !=
1268 /* Handle incomplete DMA receive */
1269 struct tty_struct
*tty
= port
->state
->port
.tty
;
1270 struct dma_chan
*chan
= s
->chan_rx
;
1271 struct sh_desc
*sh_desc
= container_of(desc
, struct sh_desc
,
1273 unsigned long flags
;
1276 chan
->device
->device_control(chan
, DMA_TERMINATE_ALL
, 0);
1277 dev_dbg(port
->dev
, "Read %u bytes with cookie %d\n",
1278 sh_desc
->partial
, sh_desc
->cookie
);
1280 spin_lock_irqsave(&port
->lock
, flags
);
1281 count
= sci_dma_rx_push(s
, tty
, sh_desc
->partial
);
1282 spin_unlock_irqrestore(&port
->lock
, flags
);
1285 tty_flip_buffer_push(tty
);
1292 s
->cookie_rx
[new] = desc
->tx_submit(desc
);
1293 if (s
->cookie_rx
[new] < 0) {
1294 dev_warn(port
->dev
, "Failed submitting Rx DMA descriptor\n");
1295 sci_rx_dma_release(s
, true);
1299 s
->active_rx
= s
->cookie_rx
[!new];
1301 dev_dbg(port
->dev
, "%s: cookie %d #%d, new active #%d\n", __func__
,
1302 s
->cookie_rx
[new], new, s
->active_rx
);
1305 static void work_fn_tx(struct work_struct
*work
)
1307 struct sci_port
*s
= container_of(work
, struct sci_port
, work_tx
);
1308 struct dma_async_tx_descriptor
*desc
;
1309 struct dma_chan
*chan
= s
->chan_tx
;
1310 struct uart_port
*port
= &s
->port
;
1311 struct circ_buf
*xmit
= &port
->state
->xmit
;
1312 struct scatterlist
*sg
= &s
->sg_tx
;
1316 * Port xmit buffer is already mapped, and it is one page... Just adjust
1317 * offsets and lengths. Since it is a circular buffer, we have to
1318 * transmit till the end, and then the rest. Take the port lock to get a
1319 * consistent xmit buffer state.
1321 spin_lock_irq(&port
->lock
);
1322 sg
->offset
= xmit
->tail
& (UART_XMIT_SIZE
- 1);
1323 sg_dma_address(sg
) = (sg_dma_address(sg
) & ~(UART_XMIT_SIZE
- 1)) +
1325 sg_dma_len(sg
) = min((int)CIRC_CNT(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
),
1326 CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
));
1327 spin_unlock_irq(&port
->lock
);
1329 BUG_ON(!sg_dma_len(sg
));
1331 desc
= chan
->device
->device_prep_slave_sg(chan
,
1332 sg
, s
->sg_len_tx
, DMA_TO_DEVICE
,
1333 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1336 sci_tx_dma_release(s
, true);
1340 dma_sync_sg_for_device(port
->dev
, sg
, 1, DMA_TO_DEVICE
);
1342 spin_lock_irq(&port
->lock
);
1344 desc
->callback
= sci_dma_tx_complete
;
1345 desc
->callback_param
= s
;
1346 spin_unlock_irq(&port
->lock
);
1347 s
->cookie_tx
= desc
->tx_submit(desc
);
1348 if (s
->cookie_tx
< 0) {
1349 dev_warn(port
->dev
, "Failed submitting Tx DMA descriptor\n");
1351 sci_tx_dma_release(s
, true);
1355 dev_dbg(port
->dev
, "%s: %p: %d...%d, cookie %d\n", __func__
,
1356 xmit
->buf
, xmit
->tail
, xmit
->head
, s
->cookie_tx
);
1358 dma_async_issue_pending(chan
);
1362 static void sci_start_tx(struct uart_port
*port
)
1364 struct sci_port
*s
= to_sci_port(port
);
1365 unsigned short ctrl
;
1367 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1368 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1369 u16
new, scr
= sci_in(port
, SCSCR
);
1373 new = scr
& ~0x8000;
1375 sci_out(port
, SCSCR
, new);
1378 if (s
->chan_tx
&& !uart_circ_empty(&s
->port
.state
->xmit
) &&
1380 schedule_work(&s
->work_tx
);
1383 if (!s
->chan_tx
|| port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1384 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1385 ctrl
= sci_in(port
, SCSCR
);
1386 sci_out(port
, SCSCR
, ctrl
| SCSCR_TIE
);
1390 static void sci_stop_tx(struct uart_port
*port
)
1392 unsigned short ctrl
;
1394 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1395 ctrl
= sci_in(port
, SCSCR
);
1397 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
1402 sci_out(port
, SCSCR
, ctrl
);
1405 static void sci_start_rx(struct uart_port
*port
)
1407 unsigned short ctrl
;
1409 ctrl
= sci_in(port
, SCSCR
) | port_rx_irq_mask(port
);
1411 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
1414 sci_out(port
, SCSCR
, ctrl
);
1417 static void sci_stop_rx(struct uart_port
*port
)
1419 unsigned short ctrl
;
1421 ctrl
= sci_in(port
, SCSCR
);
1423 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
1426 ctrl
&= ~port_rx_irq_mask(port
);
1428 sci_out(port
, SCSCR
, ctrl
);
1431 static void sci_enable_ms(struct uart_port
*port
)
1433 /* Nothing here yet .. */
1436 static void sci_break_ctl(struct uart_port
*port
, int break_state
)
1438 /* Nothing here yet .. */
1441 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1442 static bool filter(struct dma_chan
*chan
, void *slave
)
1444 struct sh_dmae_slave
*param
= slave
;
1446 dev_dbg(chan
->device
->dev
, "%s: slave ID %d\n", __func__
,
1449 if (param
->dma_dev
== chan
->device
->dev
) {
1450 chan
->private = param
;
1457 static void rx_timer_fn(unsigned long arg
)
1459 struct sci_port
*s
= (struct sci_port
*)arg
;
1460 struct uart_port
*port
= &s
->port
;
1461 u16 scr
= sci_in(port
, SCSCR
);
1463 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1465 enable_irq(s
->cfg
->irqs
[1]);
1467 sci_out(port
, SCSCR
, scr
| SCSCR_RIE
);
1468 dev_dbg(port
->dev
, "DMA Rx timed out\n");
1469 schedule_work(&s
->work_rx
);
1472 static void sci_request_dma(struct uart_port
*port
)
1474 struct sci_port
*s
= to_sci_port(port
);
1475 struct sh_dmae_slave
*param
;
1476 struct dma_chan
*chan
;
1477 dma_cap_mask_t mask
;
1480 dev_dbg(port
->dev
, "%s: port %d DMA %p\n", __func__
,
1481 port
->line
, s
->cfg
->dma_dev
);
1483 if (!s
->cfg
->dma_dev
)
1487 dma_cap_set(DMA_SLAVE
, mask
);
1489 param
= &s
->param_tx
;
1491 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
1492 param
->slave_id
= s
->cfg
->dma_slave_tx
;
1493 param
->dma_dev
= s
->cfg
->dma_dev
;
1495 s
->cookie_tx
= -EINVAL
;
1496 chan
= dma_request_channel(mask
, filter
, param
);
1497 dev_dbg(port
->dev
, "%s: TX: got channel %p\n", __func__
, chan
);
1500 sg_init_table(&s
->sg_tx
, 1);
1501 /* UART circular tx buffer is an aligned page. */
1502 BUG_ON((int)port
->state
->xmit
.buf
& ~PAGE_MASK
);
1503 sg_set_page(&s
->sg_tx
, virt_to_page(port
->state
->xmit
.buf
),
1504 UART_XMIT_SIZE
, (int)port
->state
->xmit
.buf
& ~PAGE_MASK
);
1505 nent
= dma_map_sg(port
->dev
, &s
->sg_tx
, 1, DMA_TO_DEVICE
);
1507 sci_tx_dma_release(s
, false);
1509 dev_dbg(port
->dev
, "%s: mapped %d@%p to %x\n", __func__
,
1510 sg_dma_len(&s
->sg_tx
),
1511 port
->state
->xmit
.buf
, sg_dma_address(&s
->sg_tx
));
1513 s
->sg_len_tx
= nent
;
1515 INIT_WORK(&s
->work_tx
, work_fn_tx
);
1518 param
= &s
->param_rx
;
1520 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
1521 param
->slave_id
= s
->cfg
->dma_slave_rx
;
1522 param
->dma_dev
= s
->cfg
->dma_dev
;
1524 chan
= dma_request_channel(mask
, filter
, param
);
1525 dev_dbg(port
->dev
, "%s: RX: got channel %p\n", __func__
, chan
);
1533 s
->buf_len_rx
= 2 * max(16, (int)port
->fifosize
);
1534 buf
[0] = dma_alloc_coherent(port
->dev
, s
->buf_len_rx
* 2,
1535 &dma
[0], GFP_KERNEL
);
1539 "failed to allocate dma buffer, using PIO\n");
1540 sci_rx_dma_release(s
, true);
1544 buf
[1] = buf
[0] + s
->buf_len_rx
;
1545 dma
[1] = dma
[0] + s
->buf_len_rx
;
1547 for (i
= 0; i
< 2; i
++) {
1548 struct scatterlist
*sg
= &s
->sg_rx
[i
];
1550 sg_init_table(sg
, 1);
1551 sg_set_page(sg
, virt_to_page(buf
[i
]), s
->buf_len_rx
,
1552 (int)buf
[i
] & ~PAGE_MASK
);
1553 sg_dma_address(sg
) = dma
[i
];
1556 INIT_WORK(&s
->work_rx
, work_fn_rx
);
1557 setup_timer(&s
->rx_timer
, rx_timer_fn
, (unsigned long)s
);
1563 static void sci_free_dma(struct uart_port
*port
)
1565 struct sci_port
*s
= to_sci_port(port
);
1567 if (!s
->cfg
->dma_dev
)
1571 sci_tx_dma_release(s
, false);
1573 sci_rx_dma_release(s
, false);
1576 static inline void sci_request_dma(struct uart_port
*port
)
1580 static inline void sci_free_dma(struct uart_port
*port
)
1585 static int sci_startup(struct uart_port
*port
)
1587 struct sci_port
*s
= to_sci_port(port
);
1590 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
1594 ret
= sci_request_irq(s
);
1595 if (unlikely(ret
< 0))
1598 sci_request_dma(port
);
1606 static void sci_shutdown(struct uart_port
*port
)
1608 struct sci_port
*s
= to_sci_port(port
);
1610 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
1618 sci_port_disable(s
);
1621 static unsigned int sci_scbrr_calc(unsigned int algo_id
, unsigned int bps
,
1626 return ((freq
+ 16 * bps
) / (16 * bps
) - 1);
1628 return ((freq
+ 16 * bps
) / (32 * bps
) - 1);
1630 return (((freq
* 2) + 16 * bps
) / (16 * bps
) - 1);
1632 return (((freq
* 2) + 16 * bps
) / (32 * bps
) - 1);
1634 return (((freq
* 1000 / 32) / bps
) - 1);
1637 /* Warn, but use a safe default */
1640 return ((freq
+ 16 * bps
) / (32 * bps
) - 1);
1643 static void sci_reset(struct uart_port
*port
)
1645 unsigned int status
;
1648 status
= sci_in(port
, SCxSR
);
1649 } while (!(status
& SCxSR_TEND(port
)));
1651 sci_out(port
, SCSCR
, 0x00); /* TE=0, RE=0, CKE1=0 */
1653 if (port
->type
!= PORT_SCI
)
1654 sci_out(port
, SCFCR
, SCFCR_RFRST
| SCFCR_TFRST
);
1657 static void sci_set_termios(struct uart_port
*port
, struct ktermios
*termios
,
1658 struct ktermios
*old
)
1660 struct sci_port
*s
= to_sci_port(port
);
1661 unsigned int baud
, smr_val
, max_baud
;
1666 * earlyprintk comes here early on with port->uartclk set to zero.
1667 * the clock framework is not up and running at this point so here
1668 * we assume that 115200 is the maximum baud rate. please note that
1669 * the baud rate is not programmed during earlyprintk - it is assumed
1670 * that the previous boot loader has enabled required clocks and
1671 * setup the baud rate generator hardware for us already.
1673 max_baud
= port
->uartclk
? port
->uartclk
/ 16 : 115200;
1675 baud
= uart_get_baud_rate(port
, termios
, old
, 0, max_baud
);
1676 if (likely(baud
&& port
->uartclk
))
1677 t
= sci_scbrr_calc(s
->cfg
->scbrr_algo_id
, baud
, port
->uartclk
);
1683 smr_val
= sci_in(port
, SCSMR
) & 3;
1685 if ((termios
->c_cflag
& CSIZE
) == CS7
)
1687 if (termios
->c_cflag
& PARENB
)
1689 if (termios
->c_cflag
& PARODD
)
1691 if (termios
->c_cflag
& CSTOPB
)
1694 uart_update_timeout(port
, termios
->c_cflag
, baud
);
1696 sci_out(port
, SCSMR
, smr_val
);
1698 dev_dbg(port
->dev
, "%s: SMR %x, t %x, SCSCR %x\n", __func__
, smr_val
, t
,
1703 sci_out(port
, SCSMR
, (sci_in(port
, SCSMR
) & ~3) | 1);
1706 sci_out(port
, SCSMR
, sci_in(port
, SCSMR
) & ~3);
1708 sci_out(port
, SCBRR
, t
);
1709 udelay((1000000+(baud
-1)) / baud
); /* Wait one bit interval */
1712 sci_init_pins(port
, termios
->c_cflag
);
1713 sci_out(port
, SCFCR
, scfcr
| ((termios
->c_cflag
& CRTSCTS
) ? SCFCR_MCE
: 0));
1715 sci_out(port
, SCSCR
, s
->cfg
->scscr
);
1717 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1719 * Calculate delay for 1.5 DMA buffers: see
1720 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1721 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1722 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1723 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1724 * sizes), but it has been found out experimentally, that this is not
1725 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1726 * as a minimum seem to work perfectly.
1729 s
->rx_timeout
= (port
->timeout
- HZ
/ 50) * s
->buf_len_rx
* 3 /
1732 "DMA Rx t-out %ums, tty t-out %u jiffies\n",
1733 s
->rx_timeout
* 1000 / HZ
, port
->timeout
);
1734 if (s
->rx_timeout
< msecs_to_jiffies(20))
1735 s
->rx_timeout
= msecs_to_jiffies(20);
1739 if ((termios
->c_cflag
& CREAD
) != 0)
1742 sci_port_disable(s
);
1745 static const char *sci_type(struct uart_port
*port
)
1747 switch (port
->type
) {
1763 static inline unsigned long sci_port_size(struct uart_port
*port
)
1766 * Pick an arbitrary size that encapsulates all of the base
1767 * registers by default. This can be optimized later, or derived
1768 * from platform resource data at such a time that ports begin to
1769 * behave more erratically.
1774 static int sci_remap_port(struct uart_port
*port
)
1776 unsigned long size
= sci_port_size(port
);
1779 * Nothing to do if there's already an established membase.
1784 if (port
->flags
& UPF_IOREMAP
) {
1785 port
->membase
= ioremap_nocache(port
->mapbase
, size
);
1786 if (unlikely(!port
->membase
)) {
1787 dev_err(port
->dev
, "can't remap port#%d\n", port
->line
);
1792 * For the simple (and majority of) cases where we don't
1793 * need to do any remapping, just cast the cookie
1796 port
->membase
= (void __iomem
*)port
->mapbase
;
1802 static void sci_release_port(struct uart_port
*port
)
1804 if (port
->flags
& UPF_IOREMAP
) {
1805 iounmap(port
->membase
);
1806 port
->membase
= NULL
;
1809 release_mem_region(port
->mapbase
, sci_port_size(port
));
1812 static int sci_request_port(struct uart_port
*port
)
1814 unsigned long size
= sci_port_size(port
);
1815 struct resource
*res
;
1818 res
= request_mem_region(port
->mapbase
, size
, dev_name(port
->dev
));
1819 if (unlikely(res
== NULL
))
1822 ret
= sci_remap_port(port
);
1823 if (unlikely(ret
!= 0)) {
1824 release_resource(res
);
1831 static void sci_config_port(struct uart_port
*port
, int flags
)
1833 if (flags
& UART_CONFIG_TYPE
) {
1834 struct sci_port
*sport
= to_sci_port(port
);
1836 port
->type
= sport
->cfg
->type
;
1837 sci_request_port(port
);
1841 static int sci_verify_port(struct uart_port
*port
, struct serial_struct
*ser
)
1843 struct sci_port
*s
= to_sci_port(port
);
1845 if (ser
->irq
!= s
->cfg
->irqs
[SCIx_TXI_IRQ
] || ser
->irq
> nr_irqs
)
1847 if (ser
->baud_base
< 2400)
1848 /* No paper tape reader for Mitch.. */
1854 static struct uart_ops sci_uart_ops
= {
1855 .tx_empty
= sci_tx_empty
,
1856 .set_mctrl
= sci_set_mctrl
,
1857 .get_mctrl
= sci_get_mctrl
,
1858 .start_tx
= sci_start_tx
,
1859 .stop_tx
= sci_stop_tx
,
1860 .stop_rx
= sci_stop_rx
,
1861 .enable_ms
= sci_enable_ms
,
1862 .break_ctl
= sci_break_ctl
,
1863 .startup
= sci_startup
,
1864 .shutdown
= sci_shutdown
,
1865 .set_termios
= sci_set_termios
,
1867 .release_port
= sci_release_port
,
1868 .request_port
= sci_request_port
,
1869 .config_port
= sci_config_port
,
1870 .verify_port
= sci_verify_port
,
1871 #ifdef CONFIG_CONSOLE_POLL
1872 .poll_get_char
= sci_poll_get_char
,
1873 .poll_put_char
= sci_poll_put_char
,
1877 static int __devinit
sci_init_single(struct platform_device
*dev
,
1878 struct sci_port
*sci_port
,
1880 struct plat_sci_port
*p
)
1882 struct uart_port
*port
= &sci_port
->port
;
1885 port
->ops
= &sci_uart_ops
;
1886 port
->iotype
= UPIO_MEM
;
1891 port
->fifosize
= 256;
1894 port
->fifosize
= 64;
1897 port
->fifosize
= 16;
1904 if (p
->regtype
== SCIx_PROBE_REGTYPE
) {
1905 ret
= sci_probe_regmap(p
);
1911 sci_port
->iclk
= clk_get(&dev
->dev
, "sci_ick");
1912 if (IS_ERR(sci_port
->iclk
)) {
1913 sci_port
->iclk
= clk_get(&dev
->dev
, "peripheral_clk");
1914 if (IS_ERR(sci_port
->iclk
)) {
1915 dev_err(&dev
->dev
, "can't get iclk\n");
1916 return PTR_ERR(sci_port
->iclk
);
1921 * The function clock is optional, ignore it if we can't
1924 sci_port
->fclk
= clk_get(&dev
->dev
, "sci_fck");
1925 if (IS_ERR(sci_port
->fclk
))
1926 sci_port
->fclk
= NULL
;
1928 port
->dev
= &dev
->dev
;
1930 pm_runtime_irq_safe(&dev
->dev
);
1931 pm_runtime_enable(&dev
->dev
);
1934 sci_port
->break_timer
.data
= (unsigned long)sci_port
;
1935 sci_port
->break_timer
.function
= sci_break_timer
;
1936 init_timer(&sci_port
->break_timer
);
1939 * Establish some sensible defaults for the error detection.
1942 p
->error_mask
= (p
->type
== PORT_SCI
) ?
1943 SCI_DEFAULT_ERROR_MASK
: SCIF_DEFAULT_ERROR_MASK
;
1946 * Establish sensible defaults for the overrun detection, unless
1947 * the part has explicitly disabled support for it.
1949 if (p
->overrun_bit
!= SCIx_NOT_SUPPORTED
) {
1950 if (p
->type
== PORT_SCI
)
1952 else if (p
->scbrr_algo_id
== SCBRR_ALGO_4
)
1958 * Make the error mask inclusive of overrun detection, if
1961 p
->error_mask
|= (1 << p
->overrun_bit
);
1966 port
->mapbase
= p
->mapbase
;
1967 port
->type
= p
->type
;
1968 port
->flags
= p
->flags
;
1969 port
->regshift
= p
->regshift
;
1972 * The UART port needs an IRQ value, so we peg this to the RX IRQ
1973 * for the multi-IRQ ports, which is where we are primarily
1974 * concerned with the shutdown path synchronization.
1976 * For the muxed case there's nothing more to do.
1978 port
->irq
= p
->irqs
[SCIx_RXI_IRQ
];
1979 port
->irqflags
= IRQF_DISABLED
;
1981 port
->serial_in
= sci_serial_in
;
1982 port
->serial_out
= sci_serial_out
;
1985 dev_dbg(port
->dev
, "DMA device %p, tx %d, rx %d\n",
1986 p
->dma_dev
, p
->dma_slave_tx
, p
->dma_slave_rx
);
1991 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
1992 static void serial_console_putchar(struct uart_port
*port
, int ch
)
1994 sci_poll_put_char(port
, ch
);
1998 * Print a string to the serial port trying not to disturb
1999 * any possible real use of the port...
2001 static void serial_console_write(struct console
*co
, const char *s
,
2004 struct sci_port
*sci_port
= &sci_ports
[co
->index
];
2005 struct uart_port
*port
= &sci_port
->port
;
2006 unsigned short bits
;
2008 sci_port_enable(sci_port
);
2010 uart_console_write(port
, s
, count
, serial_console_putchar
);
2012 /* wait until fifo is empty and last bit has been transmitted */
2013 bits
= SCxSR_TDxE(port
) | SCxSR_TEND(port
);
2014 while ((sci_in(port
, SCxSR
) & bits
) != bits
)
2017 sci_port_disable(sci_port
);
2020 static int __devinit
serial_console_setup(struct console
*co
, char *options
)
2022 struct sci_port
*sci_port
;
2023 struct uart_port
*port
;
2031 * Refuse to handle any bogus ports.
2033 if (co
->index
< 0 || co
->index
>= SCI_NPORTS
)
2036 sci_port
= &sci_ports
[co
->index
];
2037 port
= &sci_port
->port
;
2040 * Refuse to handle uninitialized ports.
2045 ret
= sci_remap_port(port
);
2046 if (unlikely(ret
!= 0))
2049 sci_port_enable(sci_port
);
2052 uart_parse_options(options
, &baud
, &parity
, &bits
, &flow
);
2054 sci_port_disable(sci_port
);
2056 return uart_set_options(port
, co
, baud
, parity
, bits
, flow
);
2059 static struct console serial_console
= {
2061 .device
= uart_console_device
,
2062 .write
= serial_console_write
,
2063 .setup
= serial_console_setup
,
2064 .flags
= CON_PRINTBUFFER
,
2066 .data
= &sci_uart_driver
,
2069 static struct console early_serial_console
= {
2070 .name
= "early_ttySC",
2071 .write
= serial_console_write
,
2072 .flags
= CON_PRINTBUFFER
,
2076 static char early_serial_buf
[32];
2078 static int __devinit
sci_probe_earlyprintk(struct platform_device
*pdev
)
2080 struct plat_sci_port
*cfg
= pdev
->dev
.platform_data
;
2082 if (early_serial_console
.data
)
2085 early_serial_console
.index
= pdev
->id
;
2087 sci_init_single(NULL
, &sci_ports
[pdev
->id
], pdev
->id
, cfg
);
2089 serial_console_setup(&early_serial_console
, early_serial_buf
);
2091 if (!strstr(early_serial_buf
, "keep"))
2092 early_serial_console
.flags
|= CON_BOOT
;
2094 register_console(&early_serial_console
);
2098 #define uart_console(port) ((port)->cons->index == (port)->line)
2100 static int sci_runtime_suspend(struct device
*dev
)
2102 struct sci_port
*sci_port
= dev_get_drvdata(dev
);
2103 struct uart_port
*port
= &sci_port
->port
;
2105 if (uart_console(port
)) {
2106 sci_port
->saved_smr
= sci_in(port
, SCSMR
);
2107 sci_port
->saved_brr
= sci_in(port
, SCBRR
);
2108 sci_port
->saved_fcr
= sci_in(port
, SCFCR
);
2113 static int sci_runtime_resume(struct device
*dev
)
2115 struct sci_port
*sci_port
= dev_get_drvdata(dev
);
2116 struct uart_port
*port
= &sci_port
->port
;
2118 if (uart_console(port
)) {
2120 sci_out(port
, SCSMR
, sci_port
->saved_smr
);
2121 sci_out(port
, SCBRR
, sci_port
->saved_brr
);
2122 sci_out(port
, SCFCR
, sci_port
->saved_fcr
);
2123 sci_out(port
, SCSCR
, sci_port
->cfg
->scscr
);
2128 #define SCI_CONSOLE (&serial_console)
2131 static inline int __devinit
sci_probe_earlyprintk(struct platform_device
*pdev
)
2136 #define SCI_CONSOLE NULL
2137 #define sci_runtime_suspend NULL
2138 #define sci_runtime_resume NULL
2140 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
2142 static char banner
[] __initdata
=
2143 KERN_INFO
"SuperH SCI(F) driver initialized\n";
2145 static struct uart_driver sci_uart_driver
= {
2146 .owner
= THIS_MODULE
,
2147 .driver_name
= "sci",
2148 .dev_name
= "ttySC",
2150 .minor
= SCI_MINOR_START
,
2152 .cons
= SCI_CONSOLE
,
2155 static int sci_remove(struct platform_device
*dev
)
2157 struct sci_port
*port
= platform_get_drvdata(dev
);
2159 cpufreq_unregister_notifier(&port
->freq_transition
,
2160 CPUFREQ_TRANSITION_NOTIFIER
);
2162 uart_remove_one_port(&sci_uart_driver
, &port
->port
);
2164 clk_put(port
->iclk
);
2165 clk_put(port
->fclk
);
2167 pm_runtime_disable(&dev
->dev
);
2171 static int __devinit
sci_probe_single(struct platform_device
*dev
,
2173 struct plat_sci_port
*p
,
2174 struct sci_port
*sciport
)
2179 if (unlikely(index
>= SCI_NPORTS
)) {
2180 dev_notice(&dev
->dev
, "Attempting to register port "
2181 "%d when only %d are available.\n",
2182 index
+1, SCI_NPORTS
);
2183 dev_notice(&dev
->dev
, "Consider bumping "
2184 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
2188 ret
= sci_init_single(dev
, sciport
, index
, p
);
2192 return uart_add_one_port(&sci_uart_driver
, &sciport
->port
);
2195 static int __devinit
sci_probe(struct platform_device
*dev
)
2197 struct plat_sci_port
*p
= dev
->dev
.platform_data
;
2198 struct sci_port
*sp
= &sci_ports
[dev
->id
];
2202 * If we've come here via earlyprintk initialization, head off to
2203 * the special early probe. We don't have sufficient device state
2204 * to make it beyond this yet.
2206 if (is_early_platform_device(dev
))
2207 return sci_probe_earlyprintk(dev
);
2209 platform_set_drvdata(dev
, sp
);
2211 ret
= sci_probe_single(dev
, dev
->id
, p
, sp
);
2215 sp
->freq_transition
.notifier_call
= sci_notifier
;
2217 ret
= cpufreq_register_notifier(&sp
->freq_transition
,
2218 CPUFREQ_TRANSITION_NOTIFIER
);
2219 if (unlikely(ret
< 0))
2222 #ifdef CONFIG_SH_STANDARD_BIOS
2223 sh_bios_gdb_detach();
2233 static int sci_suspend(struct device
*dev
)
2235 struct sci_port
*sport
= dev_get_drvdata(dev
);
2238 uart_suspend_port(&sci_uart_driver
, &sport
->port
);
2243 static int sci_resume(struct device
*dev
)
2245 struct sci_port
*sport
= dev_get_drvdata(dev
);
2248 uart_resume_port(&sci_uart_driver
, &sport
->port
);
2253 static const struct dev_pm_ops sci_dev_pm_ops
= {
2254 .runtime_suspend
= sci_runtime_suspend
,
2255 .runtime_resume
= sci_runtime_resume
,
2256 .suspend
= sci_suspend
,
2257 .resume
= sci_resume
,
2260 static struct platform_driver sci_driver
= {
2262 .remove
= sci_remove
,
2265 .owner
= THIS_MODULE
,
2266 .pm
= &sci_dev_pm_ops
,
2270 static int __init
sci_init(void)
2276 ret
= uart_register_driver(&sci_uart_driver
);
2277 if (likely(ret
== 0)) {
2278 ret
= platform_driver_register(&sci_driver
);
2280 uart_unregister_driver(&sci_uart_driver
);
2286 static void __exit
sci_exit(void)
2288 platform_driver_unregister(&sci_driver
);
2289 uart_unregister_driver(&sci_uart_driver
);
2292 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2293 early_platform_init_buffer("earlyprintk", &sci_driver
,
2294 early_serial_buf
, ARRAY_SIZE(early_serial_buf
));
2296 module_init(sci_init
);
2297 module_exit(sci_exit
);
2299 MODULE_LICENSE("GPL");
2300 MODULE_ALIAS("platform:sh-sci");
2301 MODULE_AUTHOR("Paul Mundt");
2302 MODULE_DESCRIPTION("SuperH SCI(F) serial driver");