2 * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
4 * Copyright (C) 2002 - 2011 Paul Mundt
5 * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
7 * based off of the old drivers/char/sh-sci.c by:
9 * Copyright (C) 1999, 2000 Niibe Yutaka
10 * Copyright (C) 2000 Sugioka Toshinobu
11 * Modified to support multiple serial ports. Stuart Menefy (May 2000).
12 * Modified to support SecureEdge. David McCullough (2002)
13 * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
14 * Removed SH7300 support (Jul 2007).
16 * This file is subject to the terms and conditions of the GNU General Public
17 * License. See the file "COPYING" in the main directory of this archive
20 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/timer.h>
29 #include <linux/interrupt.h>
30 #include <linux/tty.h>
31 #include <linux/tty_flip.h>
32 #include <linux/serial.h>
33 #include <linux/major.h>
34 #include <linux/string.h>
35 #include <linux/sysrq.h>
36 #include <linux/ioport.h>
38 #include <linux/init.h>
39 #include <linux/delay.h>
40 #include <linux/console.h>
41 #include <linux/platform_device.h>
42 #include <linux/serial_sci.h>
43 #include <linux/notifier.h>
44 #include <linux/pm_runtime.h>
45 #include <linux/cpufreq.h>
46 #include <linux/clk.h>
47 #include <linux/ctype.h>
48 #include <linux/err.h>
49 #include <linux/dmaengine.h>
50 #include <linux/dma-mapping.h>
51 #include <linux/scatterlist.h>
52 #include <linux/slab.h>
55 #include <asm/sh_bios.h>
61 struct uart_port port
;
63 /* Platform configuration */
64 struct plat_sci_port
*cfg
;
67 struct timer_list break_timer
;
75 char *irqstr
[SCIx_NR_IRQS
];
77 struct dma_chan
*chan_tx
;
78 struct dma_chan
*chan_rx
;
80 #ifdef CONFIG_SERIAL_SH_SCI_DMA
81 struct dma_async_tx_descriptor
*desc_tx
;
82 struct dma_async_tx_descriptor
*desc_rx
[2];
83 dma_cookie_t cookie_tx
;
84 dma_cookie_t cookie_rx
[2];
85 dma_cookie_t active_rx
;
86 struct scatterlist sg_tx
;
87 unsigned int sg_len_tx
;
88 struct scatterlist sg_rx
[2];
90 struct sh_dmae_slave param_tx
;
91 struct sh_dmae_slave param_rx
;
92 struct work_struct work_tx
;
93 struct work_struct work_rx
;
94 struct timer_list rx_timer
;
95 unsigned int rx_timeout
;
98 struct notifier_block freq_transition
;
100 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
101 unsigned short saved_smr
;
102 unsigned short saved_fcr
;
103 unsigned char saved_brr
;
107 /* Function prototypes */
108 static void sci_start_tx(struct uart_port
*port
);
109 static void sci_stop_tx(struct uart_port
*port
);
110 static void sci_start_rx(struct uart_port
*port
);
112 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
114 static struct sci_port sci_ports
[SCI_NPORTS
];
115 static struct uart_driver sci_uart_driver
;
117 static inline struct sci_port
*
118 to_sci_port(struct uart_port
*uart
)
120 return container_of(uart
, struct sci_port
, port
);
123 struct plat_sci_reg
{
127 /* Helper for invalidating specific entries of an inherited map. */
128 #define sci_reg_invalid { .offset = 0, .size = 0 }
130 static struct plat_sci_reg sci_regmap
[SCIx_NR_REGTYPES
][SCIx_NR_REGS
] = {
131 [SCIx_PROBE_REGTYPE
] = {
132 [0 ... SCIx_NR_REGS
- 1] = sci_reg_invalid
,
136 * Common SCI definitions, dependent on the port's regshift
139 [SCIx_SCI_REGTYPE
] = {
140 [SCSMR
] = { 0x00, 8 },
141 [SCBRR
] = { 0x01, 8 },
142 [SCSCR
] = { 0x02, 8 },
143 [SCxTDR
] = { 0x03, 8 },
144 [SCxSR
] = { 0x04, 8 },
145 [SCxRDR
] = { 0x05, 8 },
146 [SCFCR
] = sci_reg_invalid
,
147 [SCFDR
] = sci_reg_invalid
,
148 [SCTFDR
] = sci_reg_invalid
,
149 [SCRFDR
] = sci_reg_invalid
,
150 [SCSPTR
] = sci_reg_invalid
,
151 [SCLSR
] = sci_reg_invalid
,
155 * Common definitions for legacy IrDA ports, dependent on
158 [SCIx_IRDA_REGTYPE
] = {
159 [SCSMR
] = { 0x00, 8 },
160 [SCBRR
] = { 0x01, 8 },
161 [SCSCR
] = { 0x02, 8 },
162 [SCxTDR
] = { 0x03, 8 },
163 [SCxSR
] = { 0x04, 8 },
164 [SCxRDR
] = { 0x05, 8 },
165 [SCFCR
] = { 0x06, 8 },
166 [SCFDR
] = { 0x07, 16 },
167 [SCTFDR
] = sci_reg_invalid
,
168 [SCRFDR
] = sci_reg_invalid
,
169 [SCSPTR
] = sci_reg_invalid
,
170 [SCLSR
] = sci_reg_invalid
,
174 * Common SCIFA definitions.
176 [SCIx_SCIFA_REGTYPE
] = {
177 [SCSMR
] = { 0x00, 16 },
178 [SCBRR
] = { 0x04, 8 },
179 [SCSCR
] = { 0x08, 16 },
180 [SCxTDR
] = { 0x20, 8 },
181 [SCxSR
] = { 0x14, 16 },
182 [SCxRDR
] = { 0x24, 8 },
183 [SCFCR
] = { 0x18, 16 },
184 [SCFDR
] = { 0x1c, 16 },
185 [SCTFDR
] = sci_reg_invalid
,
186 [SCRFDR
] = sci_reg_invalid
,
187 [SCSPTR
] = sci_reg_invalid
,
188 [SCLSR
] = sci_reg_invalid
,
192 * Common SCIFB definitions.
194 [SCIx_SCIFB_REGTYPE
] = {
195 [SCSMR
] = { 0x00, 16 },
196 [SCBRR
] = { 0x04, 8 },
197 [SCSCR
] = { 0x08, 16 },
198 [SCxTDR
] = { 0x40, 8 },
199 [SCxSR
] = { 0x14, 16 },
200 [SCxRDR
] = { 0x60, 8 },
201 [SCFCR
] = { 0x18, 16 },
202 [SCFDR
] = { 0x1c, 16 },
203 [SCTFDR
] = sci_reg_invalid
,
204 [SCRFDR
] = sci_reg_invalid
,
205 [SCSPTR
] = sci_reg_invalid
,
206 [SCLSR
] = sci_reg_invalid
,
210 * Common SH-2(A) SCIF definitions for ports with FIFO data
213 [SCIx_SH2_SCIF_FIFODATA_REGTYPE
] = {
214 [SCSMR
] = { 0x00, 16 },
215 [SCBRR
] = { 0x04, 8 },
216 [SCSCR
] = { 0x08, 16 },
217 [SCxTDR
] = { 0x0c, 8 },
218 [SCxSR
] = { 0x10, 16 },
219 [SCxRDR
] = { 0x14, 8 },
220 [SCFCR
] = { 0x18, 16 },
221 [SCFDR
] = { 0x1c, 16 },
222 [SCTFDR
] = sci_reg_invalid
,
223 [SCRFDR
] = sci_reg_invalid
,
224 [SCSPTR
] = { 0x20, 16 },
225 [SCLSR
] = { 0x24, 16 },
229 * Common SH-3 SCIF definitions.
231 [SCIx_SH3_SCIF_REGTYPE
] = {
232 [SCSMR
] = { 0x00, 8 },
233 [SCBRR
] = { 0x02, 8 },
234 [SCSCR
] = { 0x04, 8 },
235 [SCxTDR
] = { 0x06, 8 },
236 [SCxSR
] = { 0x08, 16 },
237 [SCxRDR
] = { 0x0a, 8 },
238 [SCFCR
] = { 0x0c, 8 },
239 [SCFDR
] = { 0x0e, 16 },
240 [SCTFDR
] = sci_reg_invalid
,
241 [SCRFDR
] = sci_reg_invalid
,
242 [SCSPTR
] = sci_reg_invalid
,
243 [SCLSR
] = sci_reg_invalid
,
247 * Common SH-4(A) SCIF(B) definitions.
249 [SCIx_SH4_SCIF_REGTYPE
] = {
250 [SCSMR
] = { 0x00, 16 },
251 [SCBRR
] = { 0x04, 8 },
252 [SCSCR
] = { 0x08, 16 },
253 [SCxTDR
] = { 0x0c, 8 },
254 [SCxSR
] = { 0x10, 16 },
255 [SCxRDR
] = { 0x14, 8 },
256 [SCFCR
] = { 0x18, 16 },
257 [SCFDR
] = { 0x1c, 16 },
258 [SCTFDR
] = sci_reg_invalid
,
259 [SCRFDR
] = sci_reg_invalid
,
260 [SCSPTR
] = { 0x20, 16 },
261 [SCLSR
] = { 0x24, 16 },
265 * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
268 [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE
] = {
269 [SCSMR
] = { 0x00, 16 },
270 [SCBRR
] = { 0x04, 8 },
271 [SCSCR
] = { 0x08, 16 },
272 [SCxTDR
] = { 0x0c, 8 },
273 [SCxSR
] = { 0x10, 16 },
274 [SCxRDR
] = { 0x14, 8 },
275 [SCFCR
] = { 0x18, 16 },
276 [SCFDR
] = { 0x1c, 16 },
277 [SCTFDR
] = sci_reg_invalid
,
278 [SCRFDR
] = sci_reg_invalid
,
279 [SCSPTR
] = sci_reg_invalid
,
280 [SCLSR
] = { 0x24, 16 },
284 * Common SH-4(A) SCIF(B) definitions for ports with FIFO data
287 [SCIx_SH4_SCIF_FIFODATA_REGTYPE
] = {
288 [SCSMR
] = { 0x00, 16 },
289 [SCBRR
] = { 0x04, 8 },
290 [SCSCR
] = { 0x08, 16 },
291 [SCxTDR
] = { 0x0c, 8 },
292 [SCxSR
] = { 0x10, 16 },
293 [SCxRDR
] = { 0x14, 8 },
294 [SCFCR
] = { 0x18, 16 },
295 [SCFDR
] = { 0x1c, 16 },
296 [SCTFDR
] = { 0x1c, 16 }, /* aliased to SCFDR */
297 [SCRFDR
] = { 0x20, 16 },
298 [SCSPTR
] = { 0x24, 16 },
299 [SCLSR
] = { 0x28, 16 },
303 * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
306 [SCIx_SH7705_SCIF_REGTYPE
] = {
307 [SCSMR
] = { 0x00, 16 },
308 [SCBRR
] = { 0x04, 8 },
309 [SCSCR
] = { 0x08, 16 },
310 [SCxTDR
] = { 0x20, 8 },
311 [SCxSR
] = { 0x14, 16 },
312 [SCxRDR
] = { 0x24, 8 },
313 [SCFCR
] = { 0x18, 16 },
314 [SCFDR
] = { 0x1c, 16 },
315 [SCTFDR
] = sci_reg_invalid
,
316 [SCRFDR
] = sci_reg_invalid
,
317 [SCSPTR
] = sci_reg_invalid
,
318 [SCLSR
] = sci_reg_invalid
,
322 #define sci_getreg(up, offset) (sci_regmap[to_sci_port(up)->cfg->regtype] + offset)
325 * The "offset" here is rather misleading, in that it refers to an enum
326 * value relative to the port mapping rather than the fixed offset
327 * itself, which needs to be manually retrieved from the platform's
328 * register map for the given port.
330 static unsigned int sci_serial_in(struct uart_port
*p
, int offset
)
332 struct plat_sci_reg
*reg
= sci_getreg(p
, offset
);
335 return ioread8(p
->membase
+ (reg
->offset
<< p
->regshift
));
336 else if (reg
->size
== 16)
337 return ioread16(p
->membase
+ (reg
->offset
<< p
->regshift
));
339 WARN(1, "Invalid register access\n");
344 static void sci_serial_out(struct uart_port
*p
, int offset
, int value
)
346 struct plat_sci_reg
*reg
= sci_getreg(p
, offset
);
349 iowrite8(value
, p
->membase
+ (reg
->offset
<< p
->regshift
));
350 else if (reg
->size
== 16)
351 iowrite16(value
, p
->membase
+ (reg
->offset
<< p
->regshift
));
353 WARN(1, "Invalid register access\n");
356 #define sci_in(up, offset) (up->serial_in(up, offset))
357 #define sci_out(up, offset, value) (up->serial_out(up, offset, value))
359 static int sci_probe_regmap(struct plat_sci_port
*cfg
)
363 cfg
->regtype
= SCIx_SCI_REGTYPE
;
366 cfg
->regtype
= SCIx_IRDA_REGTYPE
;
369 cfg
->regtype
= SCIx_SCIFA_REGTYPE
;
372 cfg
->regtype
= SCIx_SCIFB_REGTYPE
;
376 * The SH-4 is a bit of a misnomer here, although that's
377 * where this particular port layout originated. This
378 * configuration (or some slight variation thereof)
379 * remains the dominant model for all SCIFs.
381 cfg
->regtype
= SCIx_SH4_SCIF_REGTYPE
;
384 printk(KERN_ERR
"Can't probe register map for given port\n");
391 static void sci_port_enable(struct sci_port
*sci_port
)
393 if (!sci_port
->port
.dev
)
396 pm_runtime_get_sync(sci_port
->port
.dev
);
398 clk_enable(sci_port
->iclk
);
399 sci_port
->port
.uartclk
= clk_get_rate(sci_port
->iclk
);
400 clk_enable(sci_port
->fclk
);
403 static void sci_port_disable(struct sci_port
*sci_port
)
405 if (!sci_port
->port
.dev
)
408 clk_disable(sci_port
->fclk
);
409 clk_disable(sci_port
->iclk
);
411 pm_runtime_put_sync(sci_port
->port
.dev
);
414 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
416 #ifdef CONFIG_CONSOLE_POLL
417 static int sci_poll_get_char(struct uart_port
*port
)
419 unsigned short status
;
423 status
= sci_in(port
, SCxSR
);
424 if (status
& SCxSR_ERRORS(port
)) {
425 sci_out(port
, SCxSR
, SCxSR_ERROR_CLEAR(port
));
431 if (!(status
& SCxSR_RDxF(port
)))
434 c
= sci_in(port
, SCxRDR
);
438 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
444 static void sci_poll_put_char(struct uart_port
*port
, unsigned char c
)
446 unsigned short status
;
449 status
= sci_in(port
, SCxSR
);
450 } while (!(status
& SCxSR_TDxE(port
)));
452 sci_out(port
, SCxTDR
, c
);
453 sci_out(port
, SCxSR
, SCxSR_TDxE_CLEAR(port
) & ~SCxSR_TEND(port
));
455 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
457 static void sci_init_pins(struct uart_port
*port
, unsigned int cflag
)
459 struct sci_port
*s
= to_sci_port(port
);
460 struct plat_sci_reg
*reg
= sci_regmap
[s
->cfg
->regtype
] + SCSPTR
;
463 * Use port-specific handler if provided.
465 if (s
->cfg
->ops
&& s
->cfg
->ops
->init_pins
) {
466 s
->cfg
->ops
->init_pins(port
, cflag
);
471 * For the generic path SCSPTR is necessary. Bail out if that's
477 if (!(cflag
& CRTSCTS
))
478 sci_out(port
, SCSPTR
, 0x0080); /* Set RTS = 1 */
481 static int sci_txfill(struct uart_port
*port
)
483 struct plat_sci_reg
*reg
;
485 reg
= sci_getreg(port
, SCTFDR
);
487 return sci_in(port
, SCTFDR
) & 0xff;
489 reg
= sci_getreg(port
, SCFDR
);
491 return sci_in(port
, SCFDR
) >> 8;
493 return !(sci_in(port
, SCxSR
) & SCI_TDRE
);
496 static int sci_txroom(struct uart_port
*port
)
498 return port
->fifosize
- sci_txfill(port
);
501 static int sci_rxfill(struct uart_port
*port
)
503 struct plat_sci_reg
*reg
;
505 reg
= sci_getreg(port
, SCRFDR
);
507 return sci_in(port
, SCRFDR
) & 0xff;
509 reg
= sci_getreg(port
, SCFDR
);
511 return sci_in(port
, SCFDR
) & ((port
->fifosize
<< 1) - 1);
513 return (sci_in(port
, SCxSR
) & SCxSR_RDxF(port
)) != 0;
517 * SCI helper for checking the state of the muxed port/RXD pins.
519 static inline int sci_rxd_in(struct uart_port
*port
)
521 struct sci_port
*s
= to_sci_port(port
);
523 if (s
->cfg
->port_reg
<= 0)
526 return !!__raw_readb(s
->cfg
->port_reg
);
529 /* ********************************************************************** *
530 * the interrupt related routines *
531 * ********************************************************************** */
533 static void sci_transmit_chars(struct uart_port
*port
)
535 struct circ_buf
*xmit
= &port
->state
->xmit
;
536 unsigned int stopped
= uart_tx_stopped(port
);
537 unsigned short status
;
541 status
= sci_in(port
, SCxSR
);
542 if (!(status
& SCxSR_TDxE(port
))) {
543 ctrl
= sci_in(port
, SCSCR
);
544 if (uart_circ_empty(xmit
))
548 sci_out(port
, SCSCR
, ctrl
);
552 count
= sci_txroom(port
);
560 } else if (!uart_circ_empty(xmit
) && !stopped
) {
561 c
= xmit
->buf
[xmit
->tail
];
562 xmit
->tail
= (xmit
->tail
+ 1) & (UART_XMIT_SIZE
- 1);
567 sci_out(port
, SCxTDR
, c
);
570 } while (--count
> 0);
572 sci_out(port
, SCxSR
, SCxSR_TDxE_CLEAR(port
));
574 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
575 uart_write_wakeup(port
);
576 if (uart_circ_empty(xmit
)) {
579 ctrl
= sci_in(port
, SCSCR
);
581 if (port
->type
!= PORT_SCI
) {
582 sci_in(port
, SCxSR
); /* Dummy read */
583 sci_out(port
, SCxSR
, SCxSR_TDxE_CLEAR(port
));
587 sci_out(port
, SCSCR
, ctrl
);
591 /* On SH3, SCIF may read end-of-break as a space->mark char */
592 #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
594 static void sci_receive_chars(struct uart_port
*port
)
596 struct sci_port
*sci_port
= to_sci_port(port
);
597 struct tty_struct
*tty
= port
->state
->port
.tty
;
598 int i
, count
, copied
= 0;
599 unsigned short status
;
602 status
= sci_in(port
, SCxSR
);
603 if (!(status
& SCxSR_RDxF(port
)))
607 /* Don't copy more bytes than there is room for in the buffer */
608 count
= tty_buffer_request_room(tty
, sci_rxfill(port
));
610 /* If for any reason we can't copy more data, we're done! */
614 if (port
->type
== PORT_SCI
) {
615 char c
= sci_in(port
, SCxRDR
);
616 if (uart_handle_sysrq_char(port
, c
) ||
617 sci_port
->break_flag
)
620 tty_insert_flip_char(tty
, c
, TTY_NORMAL
);
622 for (i
= 0; i
< count
; i
++) {
623 char c
= sci_in(port
, SCxRDR
);
624 status
= sci_in(port
, SCxSR
);
625 #if defined(CONFIG_CPU_SH3)
626 /* Skip "chars" during break */
627 if (sci_port
->break_flag
) {
629 (status
& SCxSR_FER(port
))) {
634 /* Nonzero => end-of-break */
635 dev_dbg(port
->dev
, "debounce<%02x>\n", c
);
636 sci_port
->break_flag
= 0;
643 #endif /* CONFIG_CPU_SH3 */
644 if (uart_handle_sysrq_char(port
, c
)) {
649 /* Store data and status */
650 if (status
& SCxSR_FER(port
)) {
652 dev_notice(port
->dev
, "frame error\n");
653 } else if (status
& SCxSR_PER(port
)) {
655 dev_notice(port
->dev
, "parity error\n");
659 tty_insert_flip_char(tty
, c
, flag
);
663 sci_in(port
, SCxSR
); /* dummy read */
664 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
667 port
->icount
.rx
+= count
;
671 /* Tell the rest of the system the news. New characters! */
672 tty_flip_buffer_push(tty
);
674 sci_in(port
, SCxSR
); /* dummy read */
675 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
679 #define SCI_BREAK_JIFFIES (HZ/20)
682 * The sci generates interrupts during the break,
683 * 1 per millisecond or so during the break period, for 9600 baud.
684 * So dont bother disabling interrupts.
685 * But dont want more than 1 break event.
686 * Use a kernel timer to periodically poll the rx line until
687 * the break is finished.
689 static inline void sci_schedule_break_timer(struct sci_port
*port
)
691 mod_timer(&port
->break_timer
, jiffies
+ SCI_BREAK_JIFFIES
);
694 /* Ensure that two consecutive samples find the break over. */
695 static void sci_break_timer(unsigned long data
)
697 struct sci_port
*port
= (struct sci_port
*)data
;
699 sci_port_enable(port
);
701 if (sci_rxd_in(&port
->port
) == 0) {
702 port
->break_flag
= 1;
703 sci_schedule_break_timer(port
);
704 } else if (port
->break_flag
== 1) {
706 port
->break_flag
= 2;
707 sci_schedule_break_timer(port
);
709 port
->break_flag
= 0;
711 sci_port_disable(port
);
714 static int sci_handle_errors(struct uart_port
*port
)
717 unsigned short status
= sci_in(port
, SCxSR
);
718 struct tty_struct
*tty
= port
->state
->port
.tty
;
719 struct sci_port
*s
= to_sci_port(port
);
722 * Handle overruns, if supported.
724 if (s
->cfg
->overrun_bit
!= SCIx_NOT_SUPPORTED
) {
725 if (status
& (1 << s
->cfg
->overrun_bit
)) {
727 if (tty_insert_flip_char(tty
, 0, TTY_OVERRUN
))
730 dev_notice(port
->dev
, "overrun error");
734 if (status
& SCxSR_FER(port
)) {
735 if (sci_rxd_in(port
) == 0) {
736 /* Notify of BREAK */
737 struct sci_port
*sci_port
= to_sci_port(port
);
739 if (!sci_port
->break_flag
) {
740 sci_port
->break_flag
= 1;
741 sci_schedule_break_timer(sci_port
);
743 /* Do sysrq handling. */
744 if (uart_handle_break(port
))
747 dev_dbg(port
->dev
, "BREAK detected\n");
749 if (tty_insert_flip_char(tty
, 0, TTY_BREAK
))
755 if (tty_insert_flip_char(tty
, 0, TTY_FRAME
))
758 dev_notice(port
->dev
, "frame error\n");
762 if (status
& SCxSR_PER(port
)) {
764 if (tty_insert_flip_char(tty
, 0, TTY_PARITY
))
767 dev_notice(port
->dev
, "parity error");
771 tty_flip_buffer_push(tty
);
776 static int sci_handle_fifo_overrun(struct uart_port
*port
)
778 struct tty_struct
*tty
= port
->state
->port
.tty
;
779 struct sci_port
*s
= to_sci_port(port
);
780 struct plat_sci_reg
*reg
;
783 reg
= sci_getreg(port
, SCLSR
);
787 if ((sci_in(port
, SCLSR
) & (1 << s
->cfg
->overrun_bit
))) {
788 sci_out(port
, SCLSR
, 0);
790 tty_insert_flip_char(tty
, 0, TTY_OVERRUN
);
791 tty_flip_buffer_push(tty
);
793 dev_notice(port
->dev
, "overrun error\n");
800 static int sci_handle_breaks(struct uart_port
*port
)
803 unsigned short status
= sci_in(port
, SCxSR
);
804 struct tty_struct
*tty
= port
->state
->port
.tty
;
805 struct sci_port
*s
= to_sci_port(port
);
807 if (uart_handle_break(port
))
810 if (!s
->break_flag
&& status
& SCxSR_BRK(port
)) {
811 #if defined(CONFIG_CPU_SH3)
815 /* Notify of BREAK */
816 if (tty_insert_flip_char(tty
, 0, TTY_BREAK
))
819 dev_dbg(port
->dev
, "BREAK detected\n");
823 tty_flip_buffer_push(tty
);
825 copied
+= sci_handle_fifo_overrun(port
);
830 static irqreturn_t
sci_rx_interrupt(int irq
, void *ptr
)
832 #ifdef CONFIG_SERIAL_SH_SCI_DMA
833 struct uart_port
*port
= ptr
;
834 struct sci_port
*s
= to_sci_port(port
);
837 u16 scr
= sci_in(port
, SCSCR
);
838 u16 ssr
= sci_in(port
, SCxSR
);
840 /* Disable future Rx interrupts */
841 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
842 disable_irq_nosync(irq
);
847 sci_out(port
, SCSCR
, scr
);
848 /* Clear current interrupt */
849 sci_out(port
, SCxSR
, ssr
& ~(1 | SCxSR_RDxF(port
)));
850 dev_dbg(port
->dev
, "Rx IRQ %lu: setup t-out in %u jiffies\n",
851 jiffies
, s
->rx_timeout
);
852 mod_timer(&s
->rx_timer
, jiffies
+ s
->rx_timeout
);
858 /* I think sci_receive_chars has to be called irrespective
859 * of whether the I_IXOFF is set, otherwise, how is the interrupt
862 sci_receive_chars(ptr
);
867 static irqreturn_t
sci_tx_interrupt(int irq
, void *ptr
)
869 struct uart_port
*port
= ptr
;
872 spin_lock_irqsave(&port
->lock
, flags
);
873 sci_transmit_chars(port
);
874 spin_unlock_irqrestore(&port
->lock
, flags
);
879 static irqreturn_t
sci_er_interrupt(int irq
, void *ptr
)
881 struct uart_port
*port
= ptr
;
884 if (port
->type
== PORT_SCI
) {
885 if (sci_handle_errors(port
)) {
886 /* discard character in rx buffer */
888 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
891 sci_handle_fifo_overrun(port
);
892 sci_rx_interrupt(irq
, ptr
);
895 sci_out(port
, SCxSR
, SCxSR_ERROR_CLEAR(port
));
897 /* Kick the transmission */
898 sci_tx_interrupt(irq
, ptr
);
903 static irqreturn_t
sci_br_interrupt(int irq
, void *ptr
)
905 struct uart_port
*port
= ptr
;
908 sci_handle_breaks(port
);
909 sci_out(port
, SCxSR
, SCxSR_BREAK_CLEAR(port
));
914 static inline unsigned long port_rx_irq_mask(struct uart_port
*port
)
917 * Not all ports (such as SCIFA) will support REIE. Rather than
918 * special-casing the port type, we check the port initialization
919 * IRQ enable mask to see whether the IRQ is desired at all. If
920 * it's unset, it's logically inferred that there's no point in
923 return SCSCR_RIE
| (to_sci_port(port
)->cfg
->scscr
& SCSCR_REIE
);
926 static irqreturn_t
sci_mpxed_interrupt(int irq
, void *ptr
)
928 unsigned short ssr_status
, scr_status
, err_enabled
;
929 struct uart_port
*port
= ptr
;
930 struct sci_port
*s
= to_sci_port(port
);
931 irqreturn_t ret
= IRQ_NONE
;
933 ssr_status
= sci_in(port
, SCxSR
);
934 scr_status
= sci_in(port
, SCSCR
);
935 err_enabled
= scr_status
& port_rx_irq_mask(port
);
938 if ((ssr_status
& SCxSR_TDxE(port
)) && (scr_status
& SCSCR_TIE
) &&
940 ret
= sci_tx_interrupt(irq
, ptr
);
943 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
946 if (((ssr_status
& SCxSR_RDxF(port
)) || s
->chan_rx
) &&
947 (scr_status
& SCSCR_RIE
))
948 ret
= sci_rx_interrupt(irq
, ptr
);
950 /* Error Interrupt */
951 if ((ssr_status
& SCxSR_ERRORS(port
)) && err_enabled
)
952 ret
= sci_er_interrupt(irq
, ptr
);
954 /* Break Interrupt */
955 if ((ssr_status
& SCxSR_BRK(port
)) && err_enabled
)
956 ret
= sci_br_interrupt(irq
, ptr
);
962 * Here we define a transition notifier so that we can update all of our
963 * ports' baud rate when the peripheral clock changes.
965 static int sci_notifier(struct notifier_block
*self
,
966 unsigned long phase
, void *p
)
968 struct sci_port
*sci_port
;
971 sci_port
= container_of(self
, struct sci_port
, freq_transition
);
973 if ((phase
== CPUFREQ_POSTCHANGE
) ||
974 (phase
== CPUFREQ_RESUMECHANGE
)) {
975 struct uart_port
*port
= &sci_port
->port
;
977 spin_lock_irqsave(&port
->lock
, flags
);
978 port
->uartclk
= clk_get_rate(sci_port
->iclk
);
979 spin_unlock_irqrestore(&port
->lock
, flags
);
985 static struct sci_irq_desc
{
987 irq_handler_t handler
;
990 * Split out handlers, the default case.
994 .handler
= sci_er_interrupt
,
999 .handler
= sci_rx_interrupt
,
1004 .handler
= sci_tx_interrupt
,
1009 .handler
= sci_br_interrupt
,
1013 * Special muxed handler.
1017 .handler
= sci_mpxed_interrupt
,
1021 static int sci_request_irq(struct sci_port
*port
)
1023 struct uart_port
*up
= &port
->port
;
1026 for (i
= j
= 0; i
< SCIx_NR_IRQS
; i
++, j
++) {
1027 struct sci_irq_desc
*desc
;
1030 if (SCIx_IRQ_IS_MUXED(port
)) {
1034 irq
= port
->cfg
->irqs
[i
];
1036 desc
= sci_irq_desc
+ i
;
1037 port
->irqstr
[j
] = kasprintf(GFP_KERNEL
, "%s:%s",
1038 dev_name(up
->dev
), desc
->desc
);
1039 if (!port
->irqstr
[j
]) {
1040 dev_err(up
->dev
, "Failed to allocate %s IRQ string\n",
1045 ret
= request_irq(irq
, desc
->handler
, up
->irqflags
,
1046 port
->irqstr
[j
], port
);
1047 if (unlikely(ret
)) {
1048 dev_err(up
->dev
, "Can't allocate %s IRQ\n", desc
->desc
);
1057 free_irq(port
->cfg
->irqs
[i
], port
);
1061 kfree(port
->irqstr
[j
]);
1066 static void sci_free_irq(struct sci_port
*port
)
1071 * Intentionally in reverse order so we iterate over the muxed
1074 for (i
= 0; i
< SCIx_NR_IRQS
; i
++) {
1075 free_irq(port
->cfg
->irqs
[i
], port
);
1076 kfree(port
->irqstr
[i
]);
1078 if (SCIx_IRQ_IS_MUXED(port
)) {
1079 /* If there's only one IRQ, we're done. */
1085 static unsigned int sci_tx_empty(struct uart_port
*port
)
1087 unsigned short status
= sci_in(port
, SCxSR
);
1088 unsigned short in_tx_fifo
= sci_txfill(port
);
1090 return (status
& SCxSR_TEND(port
)) && !in_tx_fifo
? TIOCSER_TEMT
: 0;
1093 static void sci_set_mctrl(struct uart_port
*port
, unsigned int mctrl
)
1095 /* This routine is used for seting signals of: DTR, DCD, CTS/RTS */
1096 /* We use SCIF's hardware for CTS/RTS, so don't need any for that. */
1097 /* If you have signals for DTR and DCD, please implement here. */
1100 static unsigned int sci_get_mctrl(struct uart_port
*port
)
1102 /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
1105 return TIOCM_DTR
| TIOCM_RTS
| TIOCM_CTS
| TIOCM_DSR
;
1108 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1109 static void sci_dma_tx_complete(void *arg
)
1111 struct sci_port
*s
= arg
;
1112 struct uart_port
*port
= &s
->port
;
1113 struct circ_buf
*xmit
= &port
->state
->xmit
;
1114 unsigned long flags
;
1116 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
1118 spin_lock_irqsave(&port
->lock
, flags
);
1120 xmit
->tail
+= sg_dma_len(&s
->sg_tx
);
1121 xmit
->tail
&= UART_XMIT_SIZE
- 1;
1123 port
->icount
.tx
+= sg_dma_len(&s
->sg_tx
);
1125 async_tx_ack(s
->desc_tx
);
1126 s
->cookie_tx
= -EINVAL
;
1129 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
1130 uart_write_wakeup(port
);
1132 if (!uart_circ_empty(xmit
)) {
1133 schedule_work(&s
->work_tx
);
1134 } else if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1135 u16 ctrl
= sci_in(port
, SCSCR
);
1136 sci_out(port
, SCSCR
, ctrl
& ~SCSCR_TIE
);
1139 spin_unlock_irqrestore(&port
->lock
, flags
);
1142 /* Locking: called with port lock held */
1143 static int sci_dma_rx_push(struct sci_port
*s
, struct tty_struct
*tty
,
1146 struct uart_port
*port
= &s
->port
;
1147 int i
, active
, room
;
1149 room
= tty_buffer_request_room(tty
, count
);
1151 if (s
->active_rx
== s
->cookie_rx
[0]) {
1153 } else if (s
->active_rx
== s
->cookie_rx
[1]) {
1156 dev_err(port
->dev
, "cookie %d not found!\n", s
->active_rx
);
1161 dev_warn(port
->dev
, "Rx overrun: dropping %u bytes\n",
1166 for (i
= 0; i
< room
; i
++)
1167 tty_insert_flip_char(tty
, ((u8
*)sg_virt(&s
->sg_rx
[active
]))[i
],
1170 port
->icount
.rx
+= room
;
1175 static void sci_dma_rx_complete(void *arg
)
1177 struct sci_port
*s
= arg
;
1178 struct uart_port
*port
= &s
->port
;
1179 struct tty_struct
*tty
= port
->state
->port
.tty
;
1180 unsigned long flags
;
1183 dev_dbg(port
->dev
, "%s(%d) active #%d\n", __func__
, port
->line
, s
->active_rx
);
1185 spin_lock_irqsave(&port
->lock
, flags
);
1187 count
= sci_dma_rx_push(s
, tty
, s
->buf_len_rx
);
1189 mod_timer(&s
->rx_timer
, jiffies
+ s
->rx_timeout
);
1191 spin_unlock_irqrestore(&port
->lock
, flags
);
1194 tty_flip_buffer_push(tty
);
1196 schedule_work(&s
->work_rx
);
1199 static void sci_rx_dma_release(struct sci_port
*s
, bool enable_pio
)
1201 struct dma_chan
*chan
= s
->chan_rx
;
1202 struct uart_port
*port
= &s
->port
;
1205 s
->cookie_rx
[0] = s
->cookie_rx
[1] = -EINVAL
;
1206 dma_release_channel(chan
);
1207 if (sg_dma_address(&s
->sg_rx
[0]))
1208 dma_free_coherent(port
->dev
, s
->buf_len_rx
* 2,
1209 sg_virt(&s
->sg_rx
[0]), sg_dma_address(&s
->sg_rx
[0]));
1214 static void sci_tx_dma_release(struct sci_port
*s
, bool enable_pio
)
1216 struct dma_chan
*chan
= s
->chan_tx
;
1217 struct uart_port
*port
= &s
->port
;
1220 s
->cookie_tx
= -EINVAL
;
1221 dma_release_channel(chan
);
1226 static void sci_submit_rx(struct sci_port
*s
)
1228 struct dma_chan
*chan
= s
->chan_rx
;
1231 for (i
= 0; i
< 2; i
++) {
1232 struct scatterlist
*sg
= &s
->sg_rx
[i
];
1233 struct dma_async_tx_descriptor
*desc
;
1235 desc
= chan
->device
->device_prep_slave_sg(chan
,
1236 sg
, 1, DMA_FROM_DEVICE
, DMA_PREP_INTERRUPT
);
1239 s
->desc_rx
[i
] = desc
;
1240 desc
->callback
= sci_dma_rx_complete
;
1241 desc
->callback_param
= s
;
1242 s
->cookie_rx
[i
] = desc
->tx_submit(desc
);
1245 if (!desc
|| s
->cookie_rx
[i
] < 0) {
1247 async_tx_ack(s
->desc_rx
[0]);
1248 s
->cookie_rx
[0] = -EINVAL
;
1252 s
->cookie_rx
[i
] = -EINVAL
;
1254 dev_warn(s
->port
.dev
,
1255 "failed to re-start DMA, using PIO\n");
1256 sci_rx_dma_release(s
, true);
1259 dev_dbg(s
->port
.dev
, "%s(): cookie %d to #%d\n", __func__
,
1260 s
->cookie_rx
[i
], i
);
1263 s
->active_rx
= s
->cookie_rx
[0];
1265 dma_async_issue_pending(chan
);
1268 static void work_fn_rx(struct work_struct
*work
)
1270 struct sci_port
*s
= container_of(work
, struct sci_port
, work_rx
);
1271 struct uart_port
*port
= &s
->port
;
1272 struct dma_async_tx_descriptor
*desc
;
1275 if (s
->active_rx
== s
->cookie_rx
[0]) {
1277 } else if (s
->active_rx
== s
->cookie_rx
[1]) {
1280 dev_err(port
->dev
, "cookie %d not found!\n", s
->active_rx
);
1283 desc
= s
->desc_rx
[new];
1285 if (dma_async_is_tx_complete(s
->chan_rx
, s
->active_rx
, NULL
, NULL
) !=
1287 /* Handle incomplete DMA receive */
1288 struct tty_struct
*tty
= port
->state
->port
.tty
;
1289 struct dma_chan
*chan
= s
->chan_rx
;
1290 struct sh_desc
*sh_desc
= container_of(desc
, struct sh_desc
,
1292 unsigned long flags
;
1295 chan
->device
->device_control(chan
, DMA_TERMINATE_ALL
, 0);
1296 dev_dbg(port
->dev
, "Read %u bytes with cookie %d\n",
1297 sh_desc
->partial
, sh_desc
->cookie
);
1299 spin_lock_irqsave(&port
->lock
, flags
);
1300 count
= sci_dma_rx_push(s
, tty
, sh_desc
->partial
);
1301 spin_unlock_irqrestore(&port
->lock
, flags
);
1304 tty_flip_buffer_push(tty
);
1311 s
->cookie_rx
[new] = desc
->tx_submit(desc
);
1312 if (s
->cookie_rx
[new] < 0) {
1313 dev_warn(port
->dev
, "Failed submitting Rx DMA descriptor\n");
1314 sci_rx_dma_release(s
, true);
1318 s
->active_rx
= s
->cookie_rx
[!new];
1320 dev_dbg(port
->dev
, "%s: cookie %d #%d, new active #%d\n", __func__
,
1321 s
->cookie_rx
[new], new, s
->active_rx
);
1324 static void work_fn_tx(struct work_struct
*work
)
1326 struct sci_port
*s
= container_of(work
, struct sci_port
, work_tx
);
1327 struct dma_async_tx_descriptor
*desc
;
1328 struct dma_chan
*chan
= s
->chan_tx
;
1329 struct uart_port
*port
= &s
->port
;
1330 struct circ_buf
*xmit
= &port
->state
->xmit
;
1331 struct scatterlist
*sg
= &s
->sg_tx
;
1335 * Port xmit buffer is already mapped, and it is one page... Just adjust
1336 * offsets and lengths. Since it is a circular buffer, we have to
1337 * transmit till the end, and then the rest. Take the port lock to get a
1338 * consistent xmit buffer state.
1340 spin_lock_irq(&port
->lock
);
1341 sg
->offset
= xmit
->tail
& (UART_XMIT_SIZE
- 1);
1342 sg_dma_address(sg
) = (sg_dma_address(sg
) & ~(UART_XMIT_SIZE
- 1)) +
1344 sg_dma_len(sg
) = min((int)CIRC_CNT(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
),
1345 CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
));
1346 spin_unlock_irq(&port
->lock
);
1348 BUG_ON(!sg_dma_len(sg
));
1350 desc
= chan
->device
->device_prep_slave_sg(chan
,
1351 sg
, s
->sg_len_tx
, DMA_TO_DEVICE
,
1352 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1355 sci_tx_dma_release(s
, true);
1359 dma_sync_sg_for_device(port
->dev
, sg
, 1, DMA_TO_DEVICE
);
1361 spin_lock_irq(&port
->lock
);
1363 desc
->callback
= sci_dma_tx_complete
;
1364 desc
->callback_param
= s
;
1365 spin_unlock_irq(&port
->lock
);
1366 s
->cookie_tx
= desc
->tx_submit(desc
);
1367 if (s
->cookie_tx
< 0) {
1368 dev_warn(port
->dev
, "Failed submitting Tx DMA descriptor\n");
1370 sci_tx_dma_release(s
, true);
1374 dev_dbg(port
->dev
, "%s: %p: %d...%d, cookie %d\n", __func__
,
1375 xmit
->buf
, xmit
->tail
, xmit
->head
, s
->cookie_tx
);
1377 dma_async_issue_pending(chan
);
1381 static void sci_start_tx(struct uart_port
*port
)
1383 struct sci_port
*s
= to_sci_port(port
);
1384 unsigned short ctrl
;
1386 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1387 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1388 u16
new, scr
= sci_in(port
, SCSCR
);
1392 new = scr
& ~0x8000;
1394 sci_out(port
, SCSCR
, new);
1397 if (s
->chan_tx
&& !uart_circ_empty(&s
->port
.state
->xmit
) &&
1399 schedule_work(&s
->work_tx
);
1402 if (!s
->chan_tx
|| port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1403 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1404 ctrl
= sci_in(port
, SCSCR
);
1405 sci_out(port
, SCSCR
, ctrl
| SCSCR_TIE
);
1409 static void sci_stop_tx(struct uart_port
*port
)
1411 unsigned short ctrl
;
1413 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1414 ctrl
= sci_in(port
, SCSCR
);
1416 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
1421 sci_out(port
, SCSCR
, ctrl
);
1424 static void sci_start_rx(struct uart_port
*port
)
1426 unsigned short ctrl
;
1428 ctrl
= sci_in(port
, SCSCR
) | port_rx_irq_mask(port
);
1430 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
1433 sci_out(port
, SCSCR
, ctrl
);
1436 static void sci_stop_rx(struct uart_port
*port
)
1438 unsigned short ctrl
;
1440 ctrl
= sci_in(port
, SCSCR
);
1442 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
1445 ctrl
&= ~port_rx_irq_mask(port
);
1447 sci_out(port
, SCSCR
, ctrl
);
1450 static void sci_enable_ms(struct uart_port
*port
)
1452 /* Nothing here yet .. */
1455 static void sci_break_ctl(struct uart_port
*port
, int break_state
)
1457 /* Nothing here yet .. */
1460 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1461 static bool filter(struct dma_chan
*chan
, void *slave
)
1463 struct sh_dmae_slave
*param
= slave
;
1465 dev_dbg(chan
->device
->dev
, "%s: slave ID %d\n", __func__
,
1468 chan
->private = param
;
1472 static void rx_timer_fn(unsigned long arg
)
1474 struct sci_port
*s
= (struct sci_port
*)arg
;
1475 struct uart_port
*port
= &s
->port
;
1476 u16 scr
= sci_in(port
, SCSCR
);
1478 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1480 enable_irq(s
->cfg
->irqs
[1]);
1482 sci_out(port
, SCSCR
, scr
| SCSCR_RIE
);
1483 dev_dbg(port
->dev
, "DMA Rx timed out\n");
1484 schedule_work(&s
->work_rx
);
1487 static void sci_request_dma(struct uart_port
*port
)
1489 struct sci_port
*s
= to_sci_port(port
);
1490 struct sh_dmae_slave
*param
;
1491 struct dma_chan
*chan
;
1492 dma_cap_mask_t mask
;
1495 dev_dbg(port
->dev
, "%s: port %d\n", __func__
,
1498 if (s
->cfg
->dma_slave_tx
<= 0 || s
->cfg
->dma_slave_rx
<= 0)
1502 dma_cap_set(DMA_SLAVE
, mask
);
1504 param
= &s
->param_tx
;
1506 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
1507 param
->slave_id
= s
->cfg
->dma_slave_tx
;
1509 s
->cookie_tx
= -EINVAL
;
1510 chan
= dma_request_channel(mask
, filter
, param
);
1511 dev_dbg(port
->dev
, "%s: TX: got channel %p\n", __func__
, chan
);
1514 sg_init_table(&s
->sg_tx
, 1);
1515 /* UART circular tx buffer is an aligned page. */
1516 BUG_ON((int)port
->state
->xmit
.buf
& ~PAGE_MASK
);
1517 sg_set_page(&s
->sg_tx
, virt_to_page(port
->state
->xmit
.buf
),
1518 UART_XMIT_SIZE
, (int)port
->state
->xmit
.buf
& ~PAGE_MASK
);
1519 nent
= dma_map_sg(port
->dev
, &s
->sg_tx
, 1, DMA_TO_DEVICE
);
1521 sci_tx_dma_release(s
, false);
1523 dev_dbg(port
->dev
, "%s: mapped %d@%p to %x\n", __func__
,
1524 sg_dma_len(&s
->sg_tx
),
1525 port
->state
->xmit
.buf
, sg_dma_address(&s
->sg_tx
));
1527 s
->sg_len_tx
= nent
;
1529 INIT_WORK(&s
->work_tx
, work_fn_tx
);
1532 param
= &s
->param_rx
;
1534 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
1535 param
->slave_id
= s
->cfg
->dma_slave_rx
;
1537 chan
= dma_request_channel(mask
, filter
, param
);
1538 dev_dbg(port
->dev
, "%s: RX: got channel %p\n", __func__
, chan
);
1546 s
->buf_len_rx
= 2 * max(16, (int)port
->fifosize
);
1547 buf
[0] = dma_alloc_coherent(port
->dev
, s
->buf_len_rx
* 2,
1548 &dma
[0], GFP_KERNEL
);
1552 "failed to allocate dma buffer, using PIO\n");
1553 sci_rx_dma_release(s
, true);
1557 buf
[1] = buf
[0] + s
->buf_len_rx
;
1558 dma
[1] = dma
[0] + s
->buf_len_rx
;
1560 for (i
= 0; i
< 2; i
++) {
1561 struct scatterlist
*sg
= &s
->sg_rx
[i
];
1563 sg_init_table(sg
, 1);
1564 sg_set_page(sg
, virt_to_page(buf
[i
]), s
->buf_len_rx
,
1565 (int)buf
[i
] & ~PAGE_MASK
);
1566 sg_dma_address(sg
) = dma
[i
];
1569 INIT_WORK(&s
->work_rx
, work_fn_rx
);
1570 setup_timer(&s
->rx_timer
, rx_timer_fn
, (unsigned long)s
);
1576 static void sci_free_dma(struct uart_port
*port
)
1578 struct sci_port
*s
= to_sci_port(port
);
1581 sci_tx_dma_release(s
, false);
1583 sci_rx_dma_release(s
, false);
1586 static inline void sci_request_dma(struct uart_port
*port
)
1590 static inline void sci_free_dma(struct uart_port
*port
)
1595 static int sci_startup(struct uart_port
*port
)
1597 struct sci_port
*s
= to_sci_port(port
);
1600 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
1604 ret
= sci_request_irq(s
);
1605 if (unlikely(ret
< 0))
1608 sci_request_dma(port
);
1616 static void sci_shutdown(struct uart_port
*port
)
1618 struct sci_port
*s
= to_sci_port(port
);
1620 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
1628 sci_port_disable(s
);
1631 static unsigned int sci_scbrr_calc(unsigned int algo_id
, unsigned int bps
,
1636 return ((freq
+ 16 * bps
) / (16 * bps
) - 1);
1638 return ((freq
+ 16 * bps
) / (32 * bps
) - 1);
1640 return (((freq
* 2) + 16 * bps
) / (16 * bps
) - 1);
1642 return (((freq
* 2) + 16 * bps
) / (32 * bps
) - 1);
1644 return (((freq
* 1000 / 32) / bps
) - 1);
1647 /* Warn, but use a safe default */
1650 return ((freq
+ 16 * bps
) / (32 * bps
) - 1);
1653 static void sci_reset(struct uart_port
*port
)
1655 unsigned int status
;
1658 status
= sci_in(port
, SCxSR
);
1659 } while (!(status
& SCxSR_TEND(port
)));
1661 sci_out(port
, SCSCR
, 0x00); /* TE=0, RE=0, CKE1=0 */
1663 if (port
->type
!= PORT_SCI
)
1664 sci_out(port
, SCFCR
, SCFCR_RFRST
| SCFCR_TFRST
);
1667 static void sci_set_termios(struct uart_port
*port
, struct ktermios
*termios
,
1668 struct ktermios
*old
)
1670 struct sci_port
*s
= to_sci_port(port
);
1671 unsigned int baud
, smr_val
, max_baud
;
1676 * earlyprintk comes here early on with port->uartclk set to zero.
1677 * the clock framework is not up and running at this point so here
1678 * we assume that 115200 is the maximum baud rate. please note that
1679 * the baud rate is not programmed during earlyprintk - it is assumed
1680 * that the previous boot loader has enabled required clocks and
1681 * setup the baud rate generator hardware for us already.
1683 max_baud
= port
->uartclk
? port
->uartclk
/ 16 : 115200;
1685 baud
= uart_get_baud_rate(port
, termios
, old
, 0, max_baud
);
1686 if (likely(baud
&& port
->uartclk
))
1687 t
= sci_scbrr_calc(s
->cfg
->scbrr_algo_id
, baud
, port
->uartclk
);
1693 smr_val
= sci_in(port
, SCSMR
) & 3;
1695 if ((termios
->c_cflag
& CSIZE
) == CS7
)
1697 if (termios
->c_cflag
& PARENB
)
1699 if (termios
->c_cflag
& PARODD
)
1701 if (termios
->c_cflag
& CSTOPB
)
1704 uart_update_timeout(port
, termios
->c_cflag
, baud
);
1706 sci_out(port
, SCSMR
, smr_val
);
1708 dev_dbg(port
->dev
, "%s: SMR %x, t %x, SCSCR %x\n", __func__
, smr_val
, t
,
1713 sci_out(port
, SCSMR
, (sci_in(port
, SCSMR
) & ~3) | 1);
1716 sci_out(port
, SCSMR
, sci_in(port
, SCSMR
) & ~3);
1718 sci_out(port
, SCBRR
, t
);
1719 udelay((1000000+(baud
-1)) / baud
); /* Wait one bit interval */
1722 sci_init_pins(port
, termios
->c_cflag
);
1723 sci_out(port
, SCFCR
, scfcr
| ((termios
->c_cflag
& CRTSCTS
) ? SCFCR_MCE
: 0));
1725 sci_out(port
, SCSCR
, s
->cfg
->scscr
);
1727 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1729 * Calculate delay for 1.5 DMA buffers: see
1730 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1731 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1732 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1733 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1734 * sizes), but it has been found out experimentally, that this is not
1735 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1736 * as a minimum seem to work perfectly.
1739 s
->rx_timeout
= (port
->timeout
- HZ
/ 50) * s
->buf_len_rx
* 3 /
1742 "DMA Rx t-out %ums, tty t-out %u jiffies\n",
1743 s
->rx_timeout
* 1000 / HZ
, port
->timeout
);
1744 if (s
->rx_timeout
< msecs_to_jiffies(20))
1745 s
->rx_timeout
= msecs_to_jiffies(20);
1749 if ((termios
->c_cflag
& CREAD
) != 0)
1752 sci_port_disable(s
);
1755 static const char *sci_type(struct uart_port
*port
)
1757 switch (port
->type
) {
1773 static inline unsigned long sci_port_size(struct uart_port
*port
)
1776 * Pick an arbitrary size that encapsulates all of the base
1777 * registers by default. This can be optimized later, or derived
1778 * from platform resource data at such a time that ports begin to
1779 * behave more erratically.
1784 static int sci_remap_port(struct uart_port
*port
)
1786 unsigned long size
= sci_port_size(port
);
1789 * Nothing to do if there's already an established membase.
1794 if (port
->flags
& UPF_IOREMAP
) {
1795 port
->membase
= ioremap_nocache(port
->mapbase
, size
);
1796 if (unlikely(!port
->membase
)) {
1797 dev_err(port
->dev
, "can't remap port#%d\n", port
->line
);
1802 * For the simple (and majority of) cases where we don't
1803 * need to do any remapping, just cast the cookie
1806 port
->membase
= (void __iomem
*)port
->mapbase
;
1812 static void sci_release_port(struct uart_port
*port
)
1814 if (port
->flags
& UPF_IOREMAP
) {
1815 iounmap(port
->membase
);
1816 port
->membase
= NULL
;
1819 release_mem_region(port
->mapbase
, sci_port_size(port
));
1822 static int sci_request_port(struct uart_port
*port
)
1824 unsigned long size
= sci_port_size(port
);
1825 struct resource
*res
;
1828 res
= request_mem_region(port
->mapbase
, size
, dev_name(port
->dev
));
1829 if (unlikely(res
== NULL
))
1832 ret
= sci_remap_port(port
);
1833 if (unlikely(ret
!= 0)) {
1834 release_resource(res
);
1841 static void sci_config_port(struct uart_port
*port
, int flags
)
1843 if (flags
& UART_CONFIG_TYPE
) {
1844 struct sci_port
*sport
= to_sci_port(port
);
1846 port
->type
= sport
->cfg
->type
;
1847 sci_request_port(port
);
1851 static int sci_verify_port(struct uart_port
*port
, struct serial_struct
*ser
)
1853 struct sci_port
*s
= to_sci_port(port
);
1855 if (ser
->irq
!= s
->cfg
->irqs
[SCIx_TXI_IRQ
] || ser
->irq
> nr_irqs
)
1857 if (ser
->baud_base
< 2400)
1858 /* No paper tape reader for Mitch.. */
1864 static struct uart_ops sci_uart_ops
= {
1865 .tx_empty
= sci_tx_empty
,
1866 .set_mctrl
= sci_set_mctrl
,
1867 .get_mctrl
= sci_get_mctrl
,
1868 .start_tx
= sci_start_tx
,
1869 .stop_tx
= sci_stop_tx
,
1870 .stop_rx
= sci_stop_rx
,
1871 .enable_ms
= sci_enable_ms
,
1872 .break_ctl
= sci_break_ctl
,
1873 .startup
= sci_startup
,
1874 .shutdown
= sci_shutdown
,
1875 .set_termios
= sci_set_termios
,
1877 .release_port
= sci_release_port
,
1878 .request_port
= sci_request_port
,
1879 .config_port
= sci_config_port
,
1880 .verify_port
= sci_verify_port
,
1881 #ifdef CONFIG_CONSOLE_POLL
1882 .poll_get_char
= sci_poll_get_char
,
1883 .poll_put_char
= sci_poll_put_char
,
1887 static int __devinit
sci_init_single(struct platform_device
*dev
,
1888 struct sci_port
*sci_port
,
1890 struct plat_sci_port
*p
)
1892 struct uart_port
*port
= &sci_port
->port
;
1895 port
->ops
= &sci_uart_ops
;
1896 port
->iotype
= UPIO_MEM
;
1901 port
->fifosize
= 256;
1904 port
->fifosize
= 64;
1907 port
->fifosize
= 16;
1914 if (p
->regtype
== SCIx_PROBE_REGTYPE
) {
1915 ret
= sci_probe_regmap(p
);
1921 sci_port
->iclk
= clk_get(&dev
->dev
, "sci_ick");
1922 if (IS_ERR(sci_port
->iclk
)) {
1923 sci_port
->iclk
= clk_get(&dev
->dev
, "peripheral_clk");
1924 if (IS_ERR(sci_port
->iclk
)) {
1925 dev_err(&dev
->dev
, "can't get iclk\n");
1926 return PTR_ERR(sci_port
->iclk
);
1931 * The function clock is optional, ignore it if we can't
1934 sci_port
->fclk
= clk_get(&dev
->dev
, "sci_fck");
1935 if (IS_ERR(sci_port
->fclk
))
1936 sci_port
->fclk
= NULL
;
1938 port
->dev
= &dev
->dev
;
1940 pm_runtime_irq_safe(&dev
->dev
);
1941 pm_runtime_enable(&dev
->dev
);
1944 sci_port
->break_timer
.data
= (unsigned long)sci_port
;
1945 sci_port
->break_timer
.function
= sci_break_timer
;
1946 init_timer(&sci_port
->break_timer
);
1949 * Establish some sensible defaults for the error detection.
1952 p
->error_mask
= (p
->type
== PORT_SCI
) ?
1953 SCI_DEFAULT_ERROR_MASK
: SCIF_DEFAULT_ERROR_MASK
;
1956 * Establish sensible defaults for the overrun detection, unless
1957 * the part has explicitly disabled support for it.
1959 if (p
->overrun_bit
!= SCIx_NOT_SUPPORTED
) {
1960 if (p
->type
== PORT_SCI
)
1962 else if (p
->scbrr_algo_id
== SCBRR_ALGO_4
)
1968 * Make the error mask inclusive of overrun detection, if
1971 p
->error_mask
|= (1 << p
->overrun_bit
);
1976 port
->mapbase
= p
->mapbase
;
1977 port
->type
= p
->type
;
1978 port
->flags
= p
->flags
;
1979 port
->regshift
= p
->regshift
;
1982 * The UART port needs an IRQ value, so we peg this to the RX IRQ
1983 * for the multi-IRQ ports, which is where we are primarily
1984 * concerned with the shutdown path synchronization.
1986 * For the muxed case there's nothing more to do.
1988 port
->irq
= p
->irqs
[SCIx_RXI_IRQ
];
1991 port
->serial_in
= sci_serial_in
;
1992 port
->serial_out
= sci_serial_out
;
1994 if (p
->dma_slave_tx
> 0 && p
->dma_slave_rx
> 0)
1995 dev_dbg(port
->dev
, "DMA tx %d, rx %d\n",
1996 p
->dma_slave_tx
, p
->dma_slave_rx
);
2001 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2002 static void serial_console_putchar(struct uart_port
*port
, int ch
)
2004 sci_poll_put_char(port
, ch
);
2008 * Print a string to the serial port trying not to disturb
2009 * any possible real use of the port...
2011 static void serial_console_write(struct console
*co
, const char *s
,
2014 struct sci_port
*sci_port
= &sci_ports
[co
->index
];
2015 struct uart_port
*port
= &sci_port
->port
;
2016 unsigned short bits
;
2018 sci_port_enable(sci_port
);
2020 uart_console_write(port
, s
, count
, serial_console_putchar
);
2022 /* wait until fifo is empty and last bit has been transmitted */
2023 bits
= SCxSR_TDxE(port
) | SCxSR_TEND(port
);
2024 while ((sci_in(port
, SCxSR
) & bits
) != bits
)
2027 sci_port_disable(sci_port
);
2030 static int __devinit
serial_console_setup(struct console
*co
, char *options
)
2032 struct sci_port
*sci_port
;
2033 struct uart_port
*port
;
2041 * Refuse to handle any bogus ports.
2043 if (co
->index
< 0 || co
->index
>= SCI_NPORTS
)
2046 sci_port
= &sci_ports
[co
->index
];
2047 port
= &sci_port
->port
;
2050 * Refuse to handle uninitialized ports.
2055 ret
= sci_remap_port(port
);
2056 if (unlikely(ret
!= 0))
2059 sci_port_enable(sci_port
);
2062 uart_parse_options(options
, &baud
, &parity
, &bits
, &flow
);
2064 sci_port_disable(sci_port
);
2066 return uart_set_options(port
, co
, baud
, parity
, bits
, flow
);
2069 static struct console serial_console
= {
2071 .device
= uart_console_device
,
2072 .write
= serial_console_write
,
2073 .setup
= serial_console_setup
,
2074 .flags
= CON_PRINTBUFFER
,
2076 .data
= &sci_uart_driver
,
2079 static struct console early_serial_console
= {
2080 .name
= "early_ttySC",
2081 .write
= serial_console_write
,
2082 .flags
= CON_PRINTBUFFER
,
2086 static char early_serial_buf
[32];
2088 static int __devinit
sci_probe_earlyprintk(struct platform_device
*pdev
)
2090 struct plat_sci_port
*cfg
= pdev
->dev
.platform_data
;
2092 if (early_serial_console
.data
)
2095 early_serial_console
.index
= pdev
->id
;
2097 sci_init_single(NULL
, &sci_ports
[pdev
->id
], pdev
->id
, cfg
);
2099 serial_console_setup(&early_serial_console
, early_serial_buf
);
2101 if (!strstr(early_serial_buf
, "keep"))
2102 early_serial_console
.flags
|= CON_BOOT
;
2104 register_console(&early_serial_console
);
2108 #define uart_console(port) ((port)->cons->index == (port)->line)
2110 static int sci_runtime_suspend(struct device
*dev
)
2112 struct sci_port
*sci_port
= dev_get_drvdata(dev
);
2113 struct uart_port
*port
= &sci_port
->port
;
2115 if (uart_console(port
)) {
2116 sci_port
->saved_smr
= sci_in(port
, SCSMR
);
2117 sci_port
->saved_brr
= sci_in(port
, SCBRR
);
2118 sci_port
->saved_fcr
= sci_in(port
, SCFCR
);
2123 static int sci_runtime_resume(struct device
*dev
)
2125 struct sci_port
*sci_port
= dev_get_drvdata(dev
);
2126 struct uart_port
*port
= &sci_port
->port
;
2128 if (uart_console(port
)) {
2130 sci_out(port
, SCSMR
, sci_port
->saved_smr
);
2131 sci_out(port
, SCBRR
, sci_port
->saved_brr
);
2132 sci_out(port
, SCFCR
, sci_port
->saved_fcr
);
2133 sci_out(port
, SCSCR
, sci_port
->cfg
->scscr
);
2138 #define SCI_CONSOLE (&serial_console)
2141 static inline int __devinit
sci_probe_earlyprintk(struct platform_device
*pdev
)
2146 #define SCI_CONSOLE NULL
2147 #define sci_runtime_suspend NULL
2148 #define sci_runtime_resume NULL
2150 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
2152 static char banner
[] __initdata
=
2153 KERN_INFO
"SuperH SCI(F) driver initialized\n";
2155 static struct uart_driver sci_uart_driver
= {
2156 .owner
= THIS_MODULE
,
2157 .driver_name
= "sci",
2158 .dev_name
= "ttySC",
2160 .minor
= SCI_MINOR_START
,
2162 .cons
= SCI_CONSOLE
,
2165 static int sci_remove(struct platform_device
*dev
)
2167 struct sci_port
*port
= platform_get_drvdata(dev
);
2169 cpufreq_unregister_notifier(&port
->freq_transition
,
2170 CPUFREQ_TRANSITION_NOTIFIER
);
2172 uart_remove_one_port(&sci_uart_driver
, &port
->port
);
2174 clk_put(port
->iclk
);
2175 clk_put(port
->fclk
);
2177 pm_runtime_disable(&dev
->dev
);
2181 static int __devinit
sci_probe_single(struct platform_device
*dev
,
2183 struct plat_sci_port
*p
,
2184 struct sci_port
*sciport
)
2189 if (unlikely(index
>= SCI_NPORTS
)) {
2190 dev_notice(&dev
->dev
, "Attempting to register port "
2191 "%d when only %d are available.\n",
2192 index
+1, SCI_NPORTS
);
2193 dev_notice(&dev
->dev
, "Consider bumping "
2194 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
2198 ret
= sci_init_single(dev
, sciport
, index
, p
);
2202 return uart_add_one_port(&sci_uart_driver
, &sciport
->port
);
2205 static int __devinit
sci_probe(struct platform_device
*dev
)
2207 struct plat_sci_port
*p
= dev
->dev
.platform_data
;
2208 struct sci_port
*sp
= &sci_ports
[dev
->id
];
2212 * If we've come here via earlyprintk initialization, head off to
2213 * the special early probe. We don't have sufficient device state
2214 * to make it beyond this yet.
2216 if (is_early_platform_device(dev
))
2217 return sci_probe_earlyprintk(dev
);
2219 platform_set_drvdata(dev
, sp
);
2221 ret
= sci_probe_single(dev
, dev
->id
, p
, sp
);
2225 sp
->freq_transition
.notifier_call
= sci_notifier
;
2227 ret
= cpufreq_register_notifier(&sp
->freq_transition
,
2228 CPUFREQ_TRANSITION_NOTIFIER
);
2229 if (unlikely(ret
< 0))
2232 #ifdef CONFIG_SH_STANDARD_BIOS
2233 sh_bios_gdb_detach();
2243 static int sci_suspend(struct device
*dev
)
2245 struct sci_port
*sport
= dev_get_drvdata(dev
);
2248 uart_suspend_port(&sci_uart_driver
, &sport
->port
);
2253 static int sci_resume(struct device
*dev
)
2255 struct sci_port
*sport
= dev_get_drvdata(dev
);
2258 uart_resume_port(&sci_uart_driver
, &sport
->port
);
2263 static const struct dev_pm_ops sci_dev_pm_ops
= {
2264 .runtime_suspend
= sci_runtime_suspend
,
2265 .runtime_resume
= sci_runtime_resume
,
2266 .suspend
= sci_suspend
,
2267 .resume
= sci_resume
,
2270 static struct platform_driver sci_driver
= {
2272 .remove
= sci_remove
,
2275 .owner
= THIS_MODULE
,
2276 .pm
= &sci_dev_pm_ops
,
2280 static int __init
sci_init(void)
2286 ret
= uart_register_driver(&sci_uart_driver
);
2287 if (likely(ret
== 0)) {
2288 ret
= platform_driver_register(&sci_driver
);
2290 uart_unregister_driver(&sci_uart_driver
);
2296 static void __exit
sci_exit(void)
2298 platform_driver_unregister(&sci_driver
);
2299 uart_unregister_driver(&sci_uart_driver
);
2302 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2303 early_platform_init_buffer("earlyprintk", &sci_driver
,
2304 early_serial_buf
, ARRAY_SIZE(early_serial_buf
));
2306 module_init(sci_init
);
2307 module_exit(sci_exit
);
2309 MODULE_LICENSE("GPL");
2310 MODULE_ALIAS("platform:sh-sci");
2311 MODULE_AUTHOR("Paul Mundt");
2312 MODULE_DESCRIPTION("SuperH SCI(F) serial driver");