2 * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
4 * Copyright (C) 2002 - 2011 Paul Mundt
5 * Copyright (C) 2015 Glider bvba
6 * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
8 * based off of the old drivers/char/sh-sci.c by:
10 * Copyright (C) 1999, 2000 Niibe Yutaka
11 * Copyright (C) 2000 Sugioka Toshinobu
12 * Modified to support multiple serial ports. Stuart Menefy (May 2000).
13 * Modified to support SecureEdge. David McCullough (2002)
14 * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
15 * Removed SH7300 support (Jul 2007).
17 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file "COPYING" in the main directory of this archive
21 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
27 #include <linux/clk.h>
28 #include <linux/console.h>
29 #include <linux/ctype.h>
30 #include <linux/cpufreq.h>
31 #include <linux/delay.h>
32 #include <linux/dmaengine.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/err.h>
35 #include <linux/errno.h>
36 #include <linux/init.h>
37 #include <linux/interrupt.h>
38 #include <linux/ioport.h>
39 #include <linux/major.h>
40 #include <linux/module.h>
43 #include <linux/platform_device.h>
44 #include <linux/pm_runtime.h>
45 #include <linux/scatterlist.h>
46 #include <linux/serial.h>
47 #include <linux/serial_sci.h>
48 #include <linux/sh_dma.h>
49 #include <linux/slab.h>
50 #include <linux/string.h>
51 #include <linux/sysrq.h>
52 #include <linux/timer.h>
53 #include <linux/tty.h>
54 #include <linux/tty_flip.h>
57 #include <asm/sh_bios.h>
62 /* Offsets into the sci_port->irqs array */
70 SCIx_MUX_IRQ
= SCIx_NR_IRQS
, /* special case */
73 #define SCIx_IRQ_IS_MUXED(port) \
74 ((port)->irqs[SCIx_ERI_IRQ] == \
75 (port)->irqs[SCIx_RXI_IRQ]) || \
76 ((port)->irqs[SCIx_ERI_IRQ] && \
77 ((port)->irqs[SCIx_RXI_IRQ] < 0))
80 SCI_FCK
, /* Functional Clock */
81 SCI_SCK
, /* Optional External Clock */
82 SCI_BRG_INT
, /* Optional BRG Internal Clock Source */
83 SCI_SCIF_CLK
, /* Optional BRG External Clock Source */
87 /* Bit x set means sampling rate x + 1 is supported */
88 #define SCI_SR(x) BIT((x) - 1)
89 #define SCI_SR_RANGE(x, y) GENMASK((y) - 1, (x) - 1)
91 #define SCI_SR_SCIFAB SCI_SR(5) | SCI_SR(7) | SCI_SR(11) | \
92 SCI_SR(13) | SCI_SR(16) | SCI_SR(17) | \
93 SCI_SR(19) | SCI_SR(27)
95 #define min_sr(_port) ffs((_port)->sampling_rate_mask)
96 #define max_sr(_port) fls((_port)->sampling_rate_mask)
98 /* Iterate over all supported sampling rates, from high to low */
99 #define for_each_sr(_sr, _port) \
100 for ((_sr) = max_sr(_port); (_sr) >= min_sr(_port); (_sr)--) \
101 if ((_port)->sampling_rate_mask & SCI_SR((_sr)))
104 struct uart_port port
;
106 /* Platform configuration */
107 struct plat_sci_port
*cfg
;
108 unsigned int overrun_reg
;
109 unsigned int overrun_mask
;
110 unsigned int error_mask
;
111 unsigned int error_clear
;
112 unsigned int sampling_rate_mask
;
113 resource_size_t reg_size
;
116 struct timer_list break_timer
;
120 struct clk
*clks
[SCI_NUM_CLKS
];
121 unsigned long clk_rates
[SCI_NUM_CLKS
];
123 int irqs
[SCIx_NR_IRQS
];
124 char *irqstr
[SCIx_NR_IRQS
];
126 struct dma_chan
*chan_tx
;
127 struct dma_chan
*chan_rx
;
129 #ifdef CONFIG_SERIAL_SH_SCI_DMA
130 dma_cookie_t cookie_tx
;
131 dma_cookie_t cookie_rx
[2];
132 dma_cookie_t active_rx
;
133 dma_addr_t tx_dma_addr
;
134 unsigned int tx_dma_len
;
135 struct scatterlist sg_rx
[2];
138 struct work_struct work_tx
;
139 struct timer_list rx_timer
;
140 unsigned int rx_timeout
;
144 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
146 static struct sci_port sci_ports
[SCI_NPORTS
];
147 static struct uart_driver sci_uart_driver
;
149 static inline struct sci_port
*
150 to_sci_port(struct uart_port
*uart
)
152 return container_of(uart
, struct sci_port
, port
);
155 struct plat_sci_reg
{
159 /* Helper for invalidating specific entries of an inherited map. */
160 #define sci_reg_invalid { .offset = 0, .size = 0 }
162 static const struct plat_sci_reg sci_regmap
[SCIx_NR_REGTYPES
][SCIx_NR_REGS
] = {
163 [SCIx_PROBE_REGTYPE
] = {
164 [0 ... SCIx_NR_REGS
- 1] = sci_reg_invalid
,
168 * Common SCI definitions, dependent on the port's regshift
171 [SCIx_SCI_REGTYPE
] = {
172 [SCSMR
] = { 0x00, 8 },
173 [SCBRR
] = { 0x01, 8 },
174 [SCSCR
] = { 0x02, 8 },
175 [SCxTDR
] = { 0x03, 8 },
176 [SCxSR
] = { 0x04, 8 },
177 [SCxRDR
] = { 0x05, 8 },
178 [SCFCR
] = sci_reg_invalid
,
179 [SCFDR
] = sci_reg_invalid
,
180 [SCTFDR
] = sci_reg_invalid
,
181 [SCRFDR
] = sci_reg_invalid
,
182 [SCSPTR
] = sci_reg_invalid
,
183 [SCLSR
] = sci_reg_invalid
,
184 [HSSRR
] = sci_reg_invalid
,
185 [SCPCR
] = sci_reg_invalid
,
186 [SCPDR
] = sci_reg_invalid
,
187 [SCDL
] = sci_reg_invalid
,
188 [SCCKS
] = sci_reg_invalid
,
192 * Common definitions for legacy IrDA ports, dependent on
195 [SCIx_IRDA_REGTYPE
] = {
196 [SCSMR
] = { 0x00, 8 },
197 [SCBRR
] = { 0x01, 8 },
198 [SCSCR
] = { 0x02, 8 },
199 [SCxTDR
] = { 0x03, 8 },
200 [SCxSR
] = { 0x04, 8 },
201 [SCxRDR
] = { 0x05, 8 },
202 [SCFCR
] = { 0x06, 8 },
203 [SCFDR
] = { 0x07, 16 },
204 [SCTFDR
] = sci_reg_invalid
,
205 [SCRFDR
] = sci_reg_invalid
,
206 [SCSPTR
] = sci_reg_invalid
,
207 [SCLSR
] = sci_reg_invalid
,
208 [HSSRR
] = sci_reg_invalid
,
209 [SCPCR
] = sci_reg_invalid
,
210 [SCPDR
] = sci_reg_invalid
,
211 [SCDL
] = sci_reg_invalid
,
212 [SCCKS
] = sci_reg_invalid
,
216 * Common SCIFA definitions.
218 [SCIx_SCIFA_REGTYPE
] = {
219 [SCSMR
] = { 0x00, 16 },
220 [SCBRR
] = { 0x04, 8 },
221 [SCSCR
] = { 0x08, 16 },
222 [SCxTDR
] = { 0x20, 8 },
223 [SCxSR
] = { 0x14, 16 },
224 [SCxRDR
] = { 0x24, 8 },
225 [SCFCR
] = { 0x18, 16 },
226 [SCFDR
] = { 0x1c, 16 },
227 [SCTFDR
] = sci_reg_invalid
,
228 [SCRFDR
] = sci_reg_invalid
,
229 [SCSPTR
] = sci_reg_invalid
,
230 [SCLSR
] = sci_reg_invalid
,
231 [HSSRR
] = sci_reg_invalid
,
232 [SCPCR
] = { 0x30, 16 },
233 [SCPDR
] = { 0x34, 16 },
234 [SCDL
] = sci_reg_invalid
,
235 [SCCKS
] = sci_reg_invalid
,
239 * Common SCIFB definitions.
241 [SCIx_SCIFB_REGTYPE
] = {
242 [SCSMR
] = { 0x00, 16 },
243 [SCBRR
] = { 0x04, 8 },
244 [SCSCR
] = { 0x08, 16 },
245 [SCxTDR
] = { 0x40, 8 },
246 [SCxSR
] = { 0x14, 16 },
247 [SCxRDR
] = { 0x60, 8 },
248 [SCFCR
] = { 0x18, 16 },
249 [SCFDR
] = sci_reg_invalid
,
250 [SCTFDR
] = { 0x38, 16 },
251 [SCRFDR
] = { 0x3c, 16 },
252 [SCSPTR
] = sci_reg_invalid
,
253 [SCLSR
] = sci_reg_invalid
,
254 [HSSRR
] = sci_reg_invalid
,
255 [SCPCR
] = { 0x30, 16 },
256 [SCPDR
] = { 0x34, 16 },
257 [SCDL
] = sci_reg_invalid
,
258 [SCCKS
] = sci_reg_invalid
,
262 * Common SH-2(A) SCIF definitions for ports with FIFO data
265 [SCIx_SH2_SCIF_FIFODATA_REGTYPE
] = {
266 [SCSMR
] = { 0x00, 16 },
267 [SCBRR
] = { 0x04, 8 },
268 [SCSCR
] = { 0x08, 16 },
269 [SCxTDR
] = { 0x0c, 8 },
270 [SCxSR
] = { 0x10, 16 },
271 [SCxRDR
] = { 0x14, 8 },
272 [SCFCR
] = { 0x18, 16 },
273 [SCFDR
] = { 0x1c, 16 },
274 [SCTFDR
] = sci_reg_invalid
,
275 [SCRFDR
] = sci_reg_invalid
,
276 [SCSPTR
] = { 0x20, 16 },
277 [SCLSR
] = { 0x24, 16 },
278 [HSSRR
] = sci_reg_invalid
,
279 [SCPCR
] = sci_reg_invalid
,
280 [SCPDR
] = sci_reg_invalid
,
281 [SCDL
] = sci_reg_invalid
,
282 [SCCKS
] = sci_reg_invalid
,
286 * Common SH-3 SCIF definitions.
288 [SCIx_SH3_SCIF_REGTYPE
] = {
289 [SCSMR
] = { 0x00, 8 },
290 [SCBRR
] = { 0x02, 8 },
291 [SCSCR
] = { 0x04, 8 },
292 [SCxTDR
] = { 0x06, 8 },
293 [SCxSR
] = { 0x08, 16 },
294 [SCxRDR
] = { 0x0a, 8 },
295 [SCFCR
] = { 0x0c, 8 },
296 [SCFDR
] = { 0x0e, 16 },
297 [SCTFDR
] = sci_reg_invalid
,
298 [SCRFDR
] = sci_reg_invalid
,
299 [SCSPTR
] = sci_reg_invalid
,
300 [SCLSR
] = sci_reg_invalid
,
301 [HSSRR
] = sci_reg_invalid
,
302 [SCPCR
] = sci_reg_invalid
,
303 [SCPDR
] = sci_reg_invalid
,
304 [SCDL
] = sci_reg_invalid
,
305 [SCCKS
] = sci_reg_invalid
,
309 * Common SH-4(A) SCIF(B) definitions.
311 [SCIx_SH4_SCIF_REGTYPE
] = {
312 [SCSMR
] = { 0x00, 16 },
313 [SCBRR
] = { 0x04, 8 },
314 [SCSCR
] = { 0x08, 16 },
315 [SCxTDR
] = { 0x0c, 8 },
316 [SCxSR
] = { 0x10, 16 },
317 [SCxRDR
] = { 0x14, 8 },
318 [SCFCR
] = { 0x18, 16 },
319 [SCFDR
] = { 0x1c, 16 },
320 [SCTFDR
] = sci_reg_invalid
,
321 [SCRFDR
] = sci_reg_invalid
,
322 [SCSPTR
] = { 0x20, 16 },
323 [SCLSR
] = { 0x24, 16 },
324 [HSSRR
] = sci_reg_invalid
,
325 [SCPCR
] = sci_reg_invalid
,
326 [SCPDR
] = sci_reg_invalid
,
327 [SCDL
] = sci_reg_invalid
,
328 [SCCKS
] = sci_reg_invalid
,
332 * Common SCIF definitions for ports with a Baud Rate Generator for
333 * External Clock (BRG).
335 [SCIx_SH4_SCIF_BRG_REGTYPE
] = {
336 [SCSMR
] = { 0x00, 16 },
337 [SCBRR
] = { 0x04, 8 },
338 [SCSCR
] = { 0x08, 16 },
339 [SCxTDR
] = { 0x0c, 8 },
340 [SCxSR
] = { 0x10, 16 },
341 [SCxRDR
] = { 0x14, 8 },
342 [SCFCR
] = { 0x18, 16 },
343 [SCFDR
] = { 0x1c, 16 },
344 [SCTFDR
] = sci_reg_invalid
,
345 [SCRFDR
] = sci_reg_invalid
,
346 [SCSPTR
] = { 0x20, 16 },
347 [SCLSR
] = { 0x24, 16 },
348 [HSSRR
] = sci_reg_invalid
,
349 [SCPCR
] = sci_reg_invalid
,
350 [SCPDR
] = sci_reg_invalid
,
351 [SCDL
] = { 0x30, 16 },
352 [SCCKS
] = { 0x34, 16 },
356 * Common HSCIF definitions.
358 [SCIx_HSCIF_REGTYPE
] = {
359 [SCSMR
] = { 0x00, 16 },
360 [SCBRR
] = { 0x04, 8 },
361 [SCSCR
] = { 0x08, 16 },
362 [SCxTDR
] = { 0x0c, 8 },
363 [SCxSR
] = { 0x10, 16 },
364 [SCxRDR
] = { 0x14, 8 },
365 [SCFCR
] = { 0x18, 16 },
366 [SCFDR
] = { 0x1c, 16 },
367 [SCTFDR
] = sci_reg_invalid
,
368 [SCRFDR
] = sci_reg_invalid
,
369 [SCSPTR
] = { 0x20, 16 },
370 [SCLSR
] = { 0x24, 16 },
371 [HSSRR
] = { 0x40, 16 },
372 [SCPCR
] = sci_reg_invalid
,
373 [SCPDR
] = sci_reg_invalid
,
374 [SCDL
] = { 0x30, 16 },
375 [SCCKS
] = { 0x34, 16 },
379 * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
382 [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE
] = {
383 [SCSMR
] = { 0x00, 16 },
384 [SCBRR
] = { 0x04, 8 },
385 [SCSCR
] = { 0x08, 16 },
386 [SCxTDR
] = { 0x0c, 8 },
387 [SCxSR
] = { 0x10, 16 },
388 [SCxRDR
] = { 0x14, 8 },
389 [SCFCR
] = { 0x18, 16 },
390 [SCFDR
] = { 0x1c, 16 },
391 [SCTFDR
] = sci_reg_invalid
,
392 [SCRFDR
] = sci_reg_invalid
,
393 [SCSPTR
] = sci_reg_invalid
,
394 [SCLSR
] = { 0x24, 16 },
395 [HSSRR
] = sci_reg_invalid
,
396 [SCPCR
] = sci_reg_invalid
,
397 [SCPDR
] = sci_reg_invalid
,
398 [SCDL
] = sci_reg_invalid
,
399 [SCCKS
] = sci_reg_invalid
,
403 * Common SH-4(A) SCIF(B) definitions for ports with FIFO data
406 [SCIx_SH4_SCIF_FIFODATA_REGTYPE
] = {
407 [SCSMR
] = { 0x00, 16 },
408 [SCBRR
] = { 0x04, 8 },
409 [SCSCR
] = { 0x08, 16 },
410 [SCxTDR
] = { 0x0c, 8 },
411 [SCxSR
] = { 0x10, 16 },
412 [SCxRDR
] = { 0x14, 8 },
413 [SCFCR
] = { 0x18, 16 },
414 [SCFDR
] = { 0x1c, 16 },
415 [SCTFDR
] = { 0x1c, 16 }, /* aliased to SCFDR */
416 [SCRFDR
] = { 0x20, 16 },
417 [SCSPTR
] = { 0x24, 16 },
418 [SCLSR
] = { 0x28, 16 },
419 [HSSRR
] = sci_reg_invalid
,
420 [SCPCR
] = sci_reg_invalid
,
421 [SCPDR
] = sci_reg_invalid
,
422 [SCDL
] = sci_reg_invalid
,
423 [SCCKS
] = sci_reg_invalid
,
427 * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
430 [SCIx_SH7705_SCIF_REGTYPE
] = {
431 [SCSMR
] = { 0x00, 16 },
432 [SCBRR
] = { 0x04, 8 },
433 [SCSCR
] = { 0x08, 16 },
434 [SCxTDR
] = { 0x20, 8 },
435 [SCxSR
] = { 0x14, 16 },
436 [SCxRDR
] = { 0x24, 8 },
437 [SCFCR
] = { 0x18, 16 },
438 [SCFDR
] = { 0x1c, 16 },
439 [SCTFDR
] = sci_reg_invalid
,
440 [SCRFDR
] = sci_reg_invalid
,
441 [SCSPTR
] = sci_reg_invalid
,
442 [SCLSR
] = sci_reg_invalid
,
443 [HSSRR
] = sci_reg_invalid
,
444 [SCPCR
] = sci_reg_invalid
,
445 [SCPDR
] = sci_reg_invalid
,
446 [SCDL
] = sci_reg_invalid
,
447 [SCCKS
] = sci_reg_invalid
,
451 #define sci_getreg(up, offset) (sci_regmap[to_sci_port(up)->cfg->regtype] + offset)
454 * The "offset" here is rather misleading, in that it refers to an enum
455 * value relative to the port mapping rather than the fixed offset
456 * itself, which needs to be manually retrieved from the platform's
457 * register map for the given port.
459 static unsigned int sci_serial_in(struct uart_port
*p
, int offset
)
461 const struct plat_sci_reg
*reg
= sci_getreg(p
, offset
);
464 return ioread8(p
->membase
+ (reg
->offset
<< p
->regshift
));
465 else if (reg
->size
== 16)
466 return ioread16(p
->membase
+ (reg
->offset
<< p
->regshift
));
468 WARN(1, "Invalid register access\n");
473 static void sci_serial_out(struct uart_port
*p
, int offset
, int value
)
475 const struct plat_sci_reg
*reg
= sci_getreg(p
, offset
);
478 iowrite8(value
, p
->membase
+ (reg
->offset
<< p
->regshift
));
479 else if (reg
->size
== 16)
480 iowrite16(value
, p
->membase
+ (reg
->offset
<< p
->regshift
));
482 WARN(1, "Invalid register access\n");
485 static int sci_probe_regmap(struct plat_sci_port
*cfg
)
489 cfg
->regtype
= SCIx_SCI_REGTYPE
;
492 cfg
->regtype
= SCIx_IRDA_REGTYPE
;
495 cfg
->regtype
= SCIx_SCIFA_REGTYPE
;
498 cfg
->regtype
= SCIx_SCIFB_REGTYPE
;
502 * The SH-4 is a bit of a misnomer here, although that's
503 * where this particular port layout originated. This
504 * configuration (or some slight variation thereof)
505 * remains the dominant model for all SCIFs.
507 cfg
->regtype
= SCIx_SH4_SCIF_REGTYPE
;
510 cfg
->regtype
= SCIx_HSCIF_REGTYPE
;
513 pr_err("Can't probe register map for given port\n");
520 static void sci_port_enable(struct sci_port
*sci_port
)
524 if (!sci_port
->port
.dev
)
527 pm_runtime_get_sync(sci_port
->port
.dev
);
529 for (i
= 0; i
< SCI_NUM_CLKS
; i
++) {
530 clk_prepare_enable(sci_port
->clks
[i
]);
531 sci_port
->clk_rates
[i
] = clk_get_rate(sci_port
->clks
[i
]);
533 sci_port
->port
.uartclk
= sci_port
->clk_rates
[SCI_FCK
];
536 static void sci_port_disable(struct sci_port
*sci_port
)
540 if (!sci_port
->port
.dev
)
543 /* Cancel the break timer to ensure that the timer handler will not try
544 * to access the hardware with clocks and power disabled. Reset the
545 * break flag to make the break debouncing state machine ready for the
548 del_timer_sync(&sci_port
->break_timer
);
549 sci_port
->break_flag
= 0;
551 for (i
= SCI_NUM_CLKS
; i
-- > 0; )
552 clk_disable_unprepare(sci_port
->clks
[i
]);
554 pm_runtime_put_sync(sci_port
->port
.dev
);
557 static inline unsigned long port_rx_irq_mask(struct uart_port
*port
)
560 * Not all ports (such as SCIFA) will support REIE. Rather than
561 * special-casing the port type, we check the port initialization
562 * IRQ enable mask to see whether the IRQ is desired at all. If
563 * it's unset, it's logically inferred that there's no point in
566 return SCSCR_RIE
| (to_sci_port(port
)->cfg
->scscr
& SCSCR_REIE
);
569 static void sci_start_tx(struct uart_port
*port
)
571 struct sci_port
*s
= to_sci_port(port
);
574 #ifdef CONFIG_SERIAL_SH_SCI_DMA
575 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
576 u16
new, scr
= serial_port_in(port
, SCSCR
);
578 new = scr
| SCSCR_TDRQE
;
580 new = scr
& ~SCSCR_TDRQE
;
582 serial_port_out(port
, SCSCR
, new);
585 if (s
->chan_tx
&& !uart_circ_empty(&s
->port
.state
->xmit
) &&
586 dma_submit_error(s
->cookie_tx
)) {
588 schedule_work(&s
->work_tx
);
592 if (!s
->chan_tx
|| port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
593 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
594 ctrl
= serial_port_in(port
, SCSCR
);
595 serial_port_out(port
, SCSCR
, ctrl
| SCSCR_TIE
);
599 static void sci_stop_tx(struct uart_port
*port
)
603 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
604 ctrl
= serial_port_in(port
, SCSCR
);
606 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
607 ctrl
&= ~SCSCR_TDRQE
;
611 serial_port_out(port
, SCSCR
, ctrl
);
614 static void sci_start_rx(struct uart_port
*port
)
618 ctrl
= serial_port_in(port
, SCSCR
) | port_rx_irq_mask(port
);
620 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
621 ctrl
&= ~SCSCR_RDRQE
;
623 serial_port_out(port
, SCSCR
, ctrl
);
626 static void sci_stop_rx(struct uart_port
*port
)
630 ctrl
= serial_port_in(port
, SCSCR
);
632 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
633 ctrl
&= ~SCSCR_RDRQE
;
635 ctrl
&= ~port_rx_irq_mask(port
);
637 serial_port_out(port
, SCSCR
, ctrl
);
640 static void sci_clear_SCxSR(struct uart_port
*port
, unsigned int mask
)
642 if (port
->type
== PORT_SCI
) {
643 /* Just store the mask */
644 serial_port_out(port
, SCxSR
, mask
);
645 } else if (to_sci_port(port
)->overrun_mask
== SCIFA_ORER
) {
646 /* SCIFA/SCIFB and SCIF on SH7705/SH7720/SH7721 */
647 /* Only clear the status bits we want to clear */
648 serial_port_out(port
, SCxSR
,
649 serial_port_in(port
, SCxSR
) & mask
);
651 /* Store the mask, clear parity/framing errors */
652 serial_port_out(port
, SCxSR
, mask
& ~(SCIF_FERC
| SCIF_PERC
));
656 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
657 defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
659 #ifdef CONFIG_CONSOLE_POLL
660 static int sci_poll_get_char(struct uart_port
*port
)
662 unsigned short status
;
666 status
= serial_port_in(port
, SCxSR
);
667 if (status
& SCxSR_ERRORS(port
)) {
668 sci_clear_SCxSR(port
, SCxSR_ERROR_CLEAR(port
));
674 if (!(status
& SCxSR_RDxF(port
)))
677 c
= serial_port_in(port
, SCxRDR
);
680 serial_port_in(port
, SCxSR
);
681 sci_clear_SCxSR(port
, SCxSR_RDxF_CLEAR(port
));
687 static void sci_poll_put_char(struct uart_port
*port
, unsigned char c
)
689 unsigned short status
;
692 status
= serial_port_in(port
, SCxSR
);
693 } while (!(status
& SCxSR_TDxE(port
)));
695 serial_port_out(port
, SCxTDR
, c
);
696 sci_clear_SCxSR(port
, SCxSR_TDxE_CLEAR(port
) & ~SCxSR_TEND(port
));
698 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE ||
699 CONFIG_SERIAL_SH_SCI_EARLYCON */
701 static void sci_init_pins(struct uart_port
*port
, unsigned int cflag
)
703 struct sci_port
*s
= to_sci_port(port
);
704 const struct plat_sci_reg
*reg
= sci_regmap
[s
->cfg
->regtype
] + SCSPTR
;
707 * Use port-specific handler if provided.
709 if (s
->cfg
->ops
&& s
->cfg
->ops
->init_pins
) {
710 s
->cfg
->ops
->init_pins(port
, cflag
);
715 * For the generic path SCSPTR is necessary. Bail out if that's
721 if ((s
->cfg
->capabilities
& SCIx_HAVE_RTSCTS
) &&
722 ((!(cflag
& CRTSCTS
)))) {
723 unsigned short status
;
725 status
= serial_port_in(port
, SCSPTR
);
726 status
&= ~SCSPTR_CTSIO
;
727 status
|= SCSPTR_RTSIO
;
728 serial_port_out(port
, SCSPTR
, status
); /* Set RTS = 1 */
732 static int sci_txfill(struct uart_port
*port
)
734 const struct plat_sci_reg
*reg
;
736 reg
= sci_getreg(port
, SCTFDR
);
738 return serial_port_in(port
, SCTFDR
) & ((port
->fifosize
<< 1) - 1);
740 reg
= sci_getreg(port
, SCFDR
);
742 return serial_port_in(port
, SCFDR
) >> 8;
744 return !(serial_port_in(port
, SCxSR
) & SCI_TDRE
);
747 static int sci_txroom(struct uart_port
*port
)
749 return port
->fifosize
- sci_txfill(port
);
752 static int sci_rxfill(struct uart_port
*port
)
754 const struct plat_sci_reg
*reg
;
756 reg
= sci_getreg(port
, SCRFDR
);
758 return serial_port_in(port
, SCRFDR
) & ((port
->fifosize
<< 1) - 1);
760 reg
= sci_getreg(port
, SCFDR
);
762 return serial_port_in(port
, SCFDR
) & ((port
->fifosize
<< 1) - 1);
764 return (serial_port_in(port
, SCxSR
) & SCxSR_RDxF(port
)) != 0;
768 * SCI helper for checking the state of the muxed port/RXD pins.
770 static inline int sci_rxd_in(struct uart_port
*port
)
772 struct sci_port
*s
= to_sci_port(port
);
774 if (s
->cfg
->port_reg
<= 0)
777 /* Cast for ARM damage */
778 return !!__raw_readb((void __iomem
*)(uintptr_t)s
->cfg
->port_reg
);
781 /* ********************************************************************** *
782 * the interrupt related routines *
783 * ********************************************************************** */
785 static void sci_transmit_chars(struct uart_port
*port
)
787 struct circ_buf
*xmit
= &port
->state
->xmit
;
788 unsigned int stopped
= uart_tx_stopped(port
);
789 unsigned short status
;
793 status
= serial_port_in(port
, SCxSR
);
794 if (!(status
& SCxSR_TDxE(port
))) {
795 ctrl
= serial_port_in(port
, SCSCR
);
796 if (uart_circ_empty(xmit
))
800 serial_port_out(port
, SCSCR
, ctrl
);
804 count
= sci_txroom(port
);
812 } else if (!uart_circ_empty(xmit
) && !stopped
) {
813 c
= xmit
->buf
[xmit
->tail
];
814 xmit
->tail
= (xmit
->tail
+ 1) & (UART_XMIT_SIZE
- 1);
819 serial_port_out(port
, SCxTDR
, c
);
822 } while (--count
> 0);
824 sci_clear_SCxSR(port
, SCxSR_TDxE_CLEAR(port
));
826 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
827 uart_write_wakeup(port
);
828 if (uart_circ_empty(xmit
)) {
831 ctrl
= serial_port_in(port
, SCSCR
);
833 if (port
->type
!= PORT_SCI
) {
834 serial_port_in(port
, SCxSR
); /* Dummy read */
835 sci_clear_SCxSR(port
, SCxSR_TDxE_CLEAR(port
));
839 serial_port_out(port
, SCSCR
, ctrl
);
843 /* On SH3, SCIF may read end-of-break as a space->mark char */
844 #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
846 static void sci_receive_chars(struct uart_port
*port
)
848 struct sci_port
*sci_port
= to_sci_port(port
);
849 struct tty_port
*tport
= &port
->state
->port
;
850 int i
, count
, copied
= 0;
851 unsigned short status
;
854 status
= serial_port_in(port
, SCxSR
);
855 if (!(status
& SCxSR_RDxF(port
)))
859 /* Don't copy more bytes than there is room for in the buffer */
860 count
= tty_buffer_request_room(tport
, sci_rxfill(port
));
862 /* If for any reason we can't copy more data, we're done! */
866 if (port
->type
== PORT_SCI
) {
867 char c
= serial_port_in(port
, SCxRDR
);
868 if (uart_handle_sysrq_char(port
, c
) ||
869 sci_port
->break_flag
)
872 tty_insert_flip_char(tport
, c
, TTY_NORMAL
);
874 for (i
= 0; i
< count
; i
++) {
875 char c
= serial_port_in(port
, SCxRDR
);
877 status
= serial_port_in(port
, SCxSR
);
878 #if defined(CONFIG_CPU_SH3)
879 /* Skip "chars" during break */
880 if (sci_port
->break_flag
) {
882 (status
& SCxSR_FER(port
))) {
887 /* Nonzero => end-of-break */
888 dev_dbg(port
->dev
, "debounce<%02x>\n", c
);
889 sci_port
->break_flag
= 0;
896 #endif /* CONFIG_CPU_SH3 */
897 if (uart_handle_sysrq_char(port
, c
)) {
902 /* Store data and status */
903 if (status
& SCxSR_FER(port
)) {
905 port
->icount
.frame
++;
906 dev_notice(port
->dev
, "frame error\n");
907 } else if (status
& SCxSR_PER(port
)) {
909 port
->icount
.parity
++;
910 dev_notice(port
->dev
, "parity error\n");
914 tty_insert_flip_char(tport
, c
, flag
);
918 serial_port_in(port
, SCxSR
); /* dummy read */
919 sci_clear_SCxSR(port
, SCxSR_RDxF_CLEAR(port
));
922 port
->icount
.rx
+= count
;
926 /* Tell the rest of the system the news. New characters! */
927 tty_flip_buffer_push(tport
);
929 serial_port_in(port
, SCxSR
); /* dummy read */
930 sci_clear_SCxSR(port
, SCxSR_RDxF_CLEAR(port
));
934 #define SCI_BREAK_JIFFIES (HZ/20)
937 * The sci generates interrupts during the break,
938 * 1 per millisecond or so during the break period, for 9600 baud.
939 * So dont bother disabling interrupts.
940 * But dont want more than 1 break event.
941 * Use a kernel timer to periodically poll the rx line until
942 * the break is finished.
944 static inline void sci_schedule_break_timer(struct sci_port
*port
)
946 mod_timer(&port
->break_timer
, jiffies
+ SCI_BREAK_JIFFIES
);
949 /* Ensure that two consecutive samples find the break over. */
950 static void sci_break_timer(unsigned long data
)
952 struct sci_port
*port
= (struct sci_port
*)data
;
954 if (sci_rxd_in(&port
->port
) == 0) {
955 port
->break_flag
= 1;
956 sci_schedule_break_timer(port
);
957 } else if (port
->break_flag
== 1) {
959 port
->break_flag
= 2;
960 sci_schedule_break_timer(port
);
962 port
->break_flag
= 0;
965 static int sci_handle_errors(struct uart_port
*port
)
968 unsigned short status
= serial_port_in(port
, SCxSR
);
969 struct tty_port
*tport
= &port
->state
->port
;
970 struct sci_port
*s
= to_sci_port(port
);
972 /* Handle overruns */
973 if (status
& s
->overrun_mask
) {
974 port
->icount
.overrun
++;
977 if (tty_insert_flip_char(tport
, 0, TTY_OVERRUN
))
980 dev_notice(port
->dev
, "overrun error\n");
983 if (status
& SCxSR_FER(port
)) {
984 if (sci_rxd_in(port
) == 0) {
985 /* Notify of BREAK */
986 struct sci_port
*sci_port
= to_sci_port(port
);
988 if (!sci_port
->break_flag
) {
991 sci_port
->break_flag
= 1;
992 sci_schedule_break_timer(sci_port
);
994 /* Do sysrq handling. */
995 if (uart_handle_break(port
))
998 dev_dbg(port
->dev
, "BREAK detected\n");
1000 if (tty_insert_flip_char(tport
, 0, TTY_BREAK
))
1006 port
->icount
.frame
++;
1008 if (tty_insert_flip_char(tport
, 0, TTY_FRAME
))
1011 dev_notice(port
->dev
, "frame error\n");
1015 if (status
& SCxSR_PER(port
)) {
1017 port
->icount
.parity
++;
1019 if (tty_insert_flip_char(tport
, 0, TTY_PARITY
))
1022 dev_notice(port
->dev
, "parity error\n");
1026 tty_flip_buffer_push(tport
);
1031 static int sci_handle_fifo_overrun(struct uart_port
*port
)
1033 struct tty_port
*tport
= &port
->state
->port
;
1034 struct sci_port
*s
= to_sci_port(port
);
1035 const struct plat_sci_reg
*reg
;
1039 reg
= sci_getreg(port
, s
->overrun_reg
);
1043 status
= serial_port_in(port
, s
->overrun_reg
);
1044 if (status
& s
->overrun_mask
) {
1045 status
&= ~s
->overrun_mask
;
1046 serial_port_out(port
, s
->overrun_reg
, status
);
1048 port
->icount
.overrun
++;
1050 tty_insert_flip_char(tport
, 0, TTY_OVERRUN
);
1051 tty_flip_buffer_push(tport
);
1053 dev_dbg(port
->dev
, "overrun error\n");
1060 static int sci_handle_breaks(struct uart_port
*port
)
1063 unsigned short status
= serial_port_in(port
, SCxSR
);
1064 struct tty_port
*tport
= &port
->state
->port
;
1065 struct sci_port
*s
= to_sci_port(port
);
1067 if (uart_handle_break(port
))
1070 if (!s
->break_flag
&& status
& SCxSR_BRK(port
)) {
1071 #if defined(CONFIG_CPU_SH3)
1072 /* Debounce break */
1078 /* Notify of BREAK */
1079 if (tty_insert_flip_char(tport
, 0, TTY_BREAK
))
1082 dev_dbg(port
->dev
, "BREAK detected\n");
1086 tty_flip_buffer_push(tport
);
1088 copied
+= sci_handle_fifo_overrun(port
);
1093 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1094 static void sci_dma_tx_complete(void *arg
)
1096 struct sci_port
*s
= arg
;
1097 struct uart_port
*port
= &s
->port
;
1098 struct circ_buf
*xmit
= &port
->state
->xmit
;
1099 unsigned long flags
;
1101 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
1103 spin_lock_irqsave(&port
->lock
, flags
);
1105 xmit
->tail
+= s
->tx_dma_len
;
1106 xmit
->tail
&= UART_XMIT_SIZE
- 1;
1108 port
->icount
.tx
+= s
->tx_dma_len
;
1110 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
1111 uart_write_wakeup(port
);
1113 if (!uart_circ_empty(xmit
)) {
1115 schedule_work(&s
->work_tx
);
1117 s
->cookie_tx
= -EINVAL
;
1118 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1119 u16 ctrl
= serial_port_in(port
, SCSCR
);
1120 serial_port_out(port
, SCSCR
, ctrl
& ~SCSCR_TIE
);
1124 spin_unlock_irqrestore(&port
->lock
, flags
);
1127 /* Locking: called with port lock held */
1128 static int sci_dma_rx_push(struct sci_port
*s
, void *buf
, size_t count
)
1130 struct uart_port
*port
= &s
->port
;
1131 struct tty_port
*tport
= &port
->state
->port
;
1134 copied
= tty_insert_flip_string(tport
, buf
, count
);
1135 if (copied
< count
) {
1136 dev_warn(port
->dev
, "Rx overrun: dropping %zu bytes\n",
1138 port
->icount
.buf_overrun
++;
1141 port
->icount
.rx
+= copied
;
1146 static int sci_dma_rx_find_active(struct sci_port
*s
)
1150 for (i
= 0; i
< ARRAY_SIZE(s
->cookie_rx
); i
++)
1151 if (s
->active_rx
== s
->cookie_rx
[i
])
1154 dev_err(s
->port
.dev
, "%s: Rx cookie %d not found!\n", __func__
,
1159 static void sci_rx_dma_release(struct sci_port
*s
, bool enable_pio
)
1161 struct dma_chan
*chan
= s
->chan_rx
;
1162 struct uart_port
*port
= &s
->port
;
1163 unsigned long flags
;
1165 spin_lock_irqsave(&port
->lock
, flags
);
1167 s
->cookie_rx
[0] = s
->cookie_rx
[1] = -EINVAL
;
1168 spin_unlock_irqrestore(&port
->lock
, flags
);
1169 dmaengine_terminate_all(chan
);
1170 dma_free_coherent(chan
->device
->dev
, s
->buf_len_rx
* 2, s
->rx_buf
[0],
1171 sg_dma_address(&s
->sg_rx
[0]));
1172 dma_release_channel(chan
);
1177 static void sci_dma_rx_complete(void *arg
)
1179 struct sci_port
*s
= arg
;
1180 struct dma_chan
*chan
= s
->chan_rx
;
1181 struct uart_port
*port
= &s
->port
;
1182 struct dma_async_tx_descriptor
*desc
;
1183 unsigned long flags
;
1184 int active
, count
= 0;
1186 dev_dbg(port
->dev
, "%s(%d) active cookie %d\n", __func__
, port
->line
,
1189 spin_lock_irqsave(&port
->lock
, flags
);
1191 active
= sci_dma_rx_find_active(s
);
1193 count
= sci_dma_rx_push(s
, s
->rx_buf
[active
], s
->buf_len_rx
);
1195 mod_timer(&s
->rx_timer
, jiffies
+ s
->rx_timeout
);
1198 tty_flip_buffer_push(&port
->state
->port
);
1200 desc
= dmaengine_prep_slave_sg(s
->chan_rx
, &s
->sg_rx
[active
], 1,
1202 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1206 desc
->callback
= sci_dma_rx_complete
;
1207 desc
->callback_param
= s
;
1208 s
->cookie_rx
[active
] = dmaengine_submit(desc
);
1209 if (dma_submit_error(s
->cookie_rx
[active
]))
1212 s
->active_rx
= s
->cookie_rx
[!active
];
1214 dma_async_issue_pending(chan
);
1216 dev_dbg(port
->dev
, "%s: cookie %d #%d, new active cookie %d\n",
1217 __func__
, s
->cookie_rx
[active
], active
, s
->active_rx
);
1218 spin_unlock_irqrestore(&port
->lock
, flags
);
1222 spin_unlock_irqrestore(&port
->lock
, flags
);
1223 dev_warn(port
->dev
, "Failed submitting Rx DMA descriptor\n");
1224 sci_rx_dma_release(s
, true);
1227 static void sci_tx_dma_release(struct sci_port
*s
, bool enable_pio
)
1229 struct dma_chan
*chan
= s
->chan_tx
;
1230 struct uart_port
*port
= &s
->port
;
1231 unsigned long flags
;
1233 spin_lock_irqsave(&port
->lock
, flags
);
1235 s
->cookie_tx
= -EINVAL
;
1236 spin_unlock_irqrestore(&port
->lock
, flags
);
1237 dmaengine_terminate_all(chan
);
1238 dma_unmap_single(chan
->device
->dev
, s
->tx_dma_addr
, UART_XMIT_SIZE
,
1240 dma_release_channel(chan
);
1245 static void sci_submit_rx(struct sci_port
*s
)
1247 struct dma_chan
*chan
= s
->chan_rx
;
1250 for (i
= 0; i
< 2; i
++) {
1251 struct scatterlist
*sg
= &s
->sg_rx
[i
];
1252 struct dma_async_tx_descriptor
*desc
;
1254 desc
= dmaengine_prep_slave_sg(chan
,
1255 sg
, 1, DMA_DEV_TO_MEM
,
1256 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1260 desc
->callback
= sci_dma_rx_complete
;
1261 desc
->callback_param
= s
;
1262 s
->cookie_rx
[i
] = dmaengine_submit(desc
);
1263 if (dma_submit_error(s
->cookie_rx
[i
]))
1266 dev_dbg(s
->port
.dev
, "%s(): cookie %d to #%d\n", __func__
,
1267 s
->cookie_rx
[i
], i
);
1270 s
->active_rx
= s
->cookie_rx
[0];
1272 dma_async_issue_pending(chan
);
1277 dmaengine_terminate_all(chan
);
1278 for (i
= 0; i
< 2; i
++)
1279 s
->cookie_rx
[i
] = -EINVAL
;
1280 s
->active_rx
= -EINVAL
;
1281 dev_warn(s
->port
.dev
, "Failed to re-start Rx DMA, using PIO\n");
1282 sci_rx_dma_release(s
, true);
1285 static void work_fn_tx(struct work_struct
*work
)
1287 struct sci_port
*s
= container_of(work
, struct sci_port
, work_tx
);
1288 struct dma_async_tx_descriptor
*desc
;
1289 struct dma_chan
*chan
= s
->chan_tx
;
1290 struct uart_port
*port
= &s
->port
;
1291 struct circ_buf
*xmit
= &port
->state
->xmit
;
1296 * Port xmit buffer is already mapped, and it is one page... Just adjust
1297 * offsets and lengths. Since it is a circular buffer, we have to
1298 * transmit till the end, and then the rest. Take the port lock to get a
1299 * consistent xmit buffer state.
1301 spin_lock_irq(&port
->lock
);
1302 buf
= s
->tx_dma_addr
+ (xmit
->tail
& (UART_XMIT_SIZE
- 1));
1303 s
->tx_dma_len
= min_t(unsigned int,
1304 CIRC_CNT(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
),
1305 CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
));
1306 spin_unlock_irq(&port
->lock
);
1308 desc
= dmaengine_prep_slave_single(chan
, buf
, s
->tx_dma_len
,
1310 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1312 dev_warn(port
->dev
, "Failed preparing Tx DMA descriptor\n");
1314 sci_tx_dma_release(s
, true);
1318 dma_sync_single_for_device(chan
->device
->dev
, buf
, s
->tx_dma_len
,
1321 spin_lock_irq(&port
->lock
);
1322 desc
->callback
= sci_dma_tx_complete
;
1323 desc
->callback_param
= s
;
1324 spin_unlock_irq(&port
->lock
);
1325 s
->cookie_tx
= dmaengine_submit(desc
);
1326 if (dma_submit_error(s
->cookie_tx
)) {
1327 dev_warn(port
->dev
, "Failed submitting Tx DMA descriptor\n");
1329 sci_tx_dma_release(s
, true);
1333 dev_dbg(port
->dev
, "%s: %p: %d...%d, cookie %d\n",
1334 __func__
, xmit
->buf
, xmit
->tail
, xmit
->head
, s
->cookie_tx
);
1336 dma_async_issue_pending(chan
);
1339 static void rx_timer_fn(unsigned long arg
)
1341 struct sci_port
*s
= (struct sci_port
*)arg
;
1342 struct dma_chan
*chan
= s
->chan_rx
;
1343 struct uart_port
*port
= &s
->port
;
1344 struct dma_tx_state state
;
1345 enum dma_status status
;
1346 unsigned long flags
;
1351 spin_lock_irqsave(&port
->lock
, flags
);
1353 dev_dbg(port
->dev
, "DMA Rx timed out\n");
1355 active
= sci_dma_rx_find_active(s
);
1357 spin_unlock_irqrestore(&port
->lock
, flags
);
1361 status
= dmaengine_tx_status(s
->chan_rx
, s
->active_rx
, &state
);
1362 if (status
== DMA_COMPLETE
) {
1363 dev_dbg(port
->dev
, "Cookie %d #%d has already completed\n",
1364 s
->active_rx
, active
);
1365 spin_unlock_irqrestore(&port
->lock
, flags
);
1367 /* Let packet complete handler take care of the packet */
1371 dmaengine_pause(chan
);
1374 * sometimes DMA transfer doesn't stop even if it is stopped and
1375 * data keeps on coming until transaction is complete so check
1376 * for DMA_COMPLETE again
1377 * Let packet complete handler take care of the packet
1379 status
= dmaengine_tx_status(s
->chan_rx
, s
->active_rx
, &state
);
1380 if (status
== DMA_COMPLETE
) {
1381 spin_unlock_irqrestore(&port
->lock
, flags
);
1382 dev_dbg(port
->dev
, "Transaction complete after DMA engine was stopped");
1386 /* Handle incomplete DMA receive */
1387 dmaengine_terminate_all(s
->chan_rx
);
1388 read
= sg_dma_len(&s
->sg_rx
[active
]) - state
.residue
;
1389 dev_dbg(port
->dev
, "Read %u bytes with cookie %d\n", read
,
1393 count
= sci_dma_rx_push(s
, s
->rx_buf
[active
], read
);
1395 tty_flip_buffer_push(&port
->state
->port
);
1398 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
1401 /* Direct new serial port interrupts back to CPU */
1402 scr
= serial_port_in(port
, SCSCR
);
1403 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1404 scr
&= ~SCSCR_RDRQE
;
1405 enable_irq(s
->irqs
[SCIx_RXI_IRQ
]);
1407 serial_port_out(port
, SCSCR
, scr
| SCSCR_RIE
);
1409 spin_unlock_irqrestore(&port
->lock
, flags
);
1412 static struct dma_chan
*sci_request_dma_chan(struct uart_port
*port
,
1413 enum dma_transfer_direction dir
,
1416 dma_cap_mask_t mask
;
1417 struct dma_chan
*chan
;
1418 struct dma_slave_config cfg
;
1422 dma_cap_set(DMA_SLAVE
, mask
);
1424 chan
= dma_request_slave_channel_compat(mask
, shdma_chan_filter
,
1425 (void *)(unsigned long)id
, port
->dev
,
1426 dir
== DMA_MEM_TO_DEV
? "tx" : "rx");
1429 "dma_request_slave_channel_compat failed\n");
1433 memset(&cfg
, 0, sizeof(cfg
));
1434 cfg
.direction
= dir
;
1435 if (dir
== DMA_MEM_TO_DEV
) {
1436 cfg
.dst_addr
= port
->mapbase
+
1437 (sci_getreg(port
, SCxTDR
)->offset
<< port
->regshift
);
1438 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
1440 cfg
.src_addr
= port
->mapbase
+
1441 (sci_getreg(port
, SCxRDR
)->offset
<< port
->regshift
);
1442 cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
1445 ret
= dmaengine_slave_config(chan
, &cfg
);
1447 dev_warn(port
->dev
, "dmaengine_slave_config failed %d\n", ret
);
1448 dma_release_channel(chan
);
1455 static void sci_request_dma(struct uart_port
*port
)
1457 struct sci_port
*s
= to_sci_port(port
);
1458 struct dma_chan
*chan
;
1460 dev_dbg(port
->dev
, "%s: port %d\n", __func__
, port
->line
);
1462 if (!port
->dev
->of_node
&&
1463 (s
->cfg
->dma_slave_tx
<= 0 || s
->cfg
->dma_slave_rx
<= 0))
1466 s
->cookie_tx
= -EINVAL
;
1467 chan
= sci_request_dma_chan(port
, DMA_MEM_TO_DEV
, s
->cfg
->dma_slave_tx
);
1468 dev_dbg(port
->dev
, "%s: TX: got channel %p\n", __func__
, chan
);
1471 /* UART circular tx buffer is an aligned page. */
1472 s
->tx_dma_addr
= dma_map_single(chan
->device
->dev
,
1473 port
->state
->xmit
.buf
,
1476 if (dma_mapping_error(chan
->device
->dev
, s
->tx_dma_addr
)) {
1477 dev_warn(port
->dev
, "Failed mapping Tx DMA descriptor\n");
1478 dma_release_channel(chan
);
1481 dev_dbg(port
->dev
, "%s: mapped %lu@%p to %pad\n",
1482 __func__
, UART_XMIT_SIZE
,
1483 port
->state
->xmit
.buf
, &s
->tx_dma_addr
);
1486 INIT_WORK(&s
->work_tx
, work_fn_tx
);
1489 chan
= sci_request_dma_chan(port
, DMA_DEV_TO_MEM
, s
->cfg
->dma_slave_rx
);
1490 dev_dbg(port
->dev
, "%s: RX: got channel %p\n", __func__
, chan
);
1498 s
->buf_len_rx
= 2 * max_t(size_t, 16, port
->fifosize
);
1499 buf
= dma_alloc_coherent(chan
->device
->dev
, s
->buf_len_rx
* 2,
1503 "Failed to allocate Rx dma buffer, using PIO\n");
1504 dma_release_channel(chan
);
1509 for (i
= 0; i
< 2; i
++) {
1510 struct scatterlist
*sg
= &s
->sg_rx
[i
];
1512 sg_init_table(sg
, 1);
1514 sg_dma_address(sg
) = dma
;
1515 sg_dma_len(sg
) = s
->buf_len_rx
;
1517 buf
+= s
->buf_len_rx
;
1518 dma
+= s
->buf_len_rx
;
1521 setup_timer(&s
->rx_timer
, rx_timer_fn
, (unsigned long)s
);
1523 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
1528 static void sci_free_dma(struct uart_port
*port
)
1530 struct sci_port
*s
= to_sci_port(port
);
1533 sci_tx_dma_release(s
, false);
1535 sci_rx_dma_release(s
, false);
1538 static inline void sci_request_dma(struct uart_port
*port
)
1542 static inline void sci_free_dma(struct uart_port
*port
)
1547 static irqreturn_t
sci_rx_interrupt(int irq
, void *ptr
)
1549 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1550 struct uart_port
*port
= ptr
;
1551 struct sci_port
*s
= to_sci_port(port
);
1554 u16 scr
= serial_port_in(port
, SCSCR
);
1555 u16 ssr
= serial_port_in(port
, SCxSR
);
1557 /* Disable future Rx interrupts */
1558 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1559 disable_irq_nosync(irq
);
1565 serial_port_out(port
, SCSCR
, scr
);
1566 /* Clear current interrupt */
1567 serial_port_out(port
, SCxSR
,
1568 ssr
& ~(SCIF_DR
| SCxSR_RDxF(port
)));
1569 dev_dbg(port
->dev
, "Rx IRQ %lu: setup t-out in %u jiffies\n",
1570 jiffies
, s
->rx_timeout
);
1571 mod_timer(&s
->rx_timer
, jiffies
+ s
->rx_timeout
);
1577 /* I think sci_receive_chars has to be called irrespective
1578 * of whether the I_IXOFF is set, otherwise, how is the interrupt
1581 sci_receive_chars(ptr
);
1586 static irqreturn_t
sci_tx_interrupt(int irq
, void *ptr
)
1588 struct uart_port
*port
= ptr
;
1589 unsigned long flags
;
1591 spin_lock_irqsave(&port
->lock
, flags
);
1592 sci_transmit_chars(port
);
1593 spin_unlock_irqrestore(&port
->lock
, flags
);
1598 static irqreturn_t
sci_er_interrupt(int irq
, void *ptr
)
1600 struct uart_port
*port
= ptr
;
1601 struct sci_port
*s
= to_sci_port(port
);
1604 if (port
->type
== PORT_SCI
) {
1605 if (sci_handle_errors(port
)) {
1606 /* discard character in rx buffer */
1607 serial_port_in(port
, SCxSR
);
1608 sci_clear_SCxSR(port
, SCxSR_RDxF_CLEAR(port
));
1611 sci_handle_fifo_overrun(port
);
1613 sci_receive_chars(ptr
);
1616 sci_clear_SCxSR(port
, SCxSR_ERROR_CLEAR(port
));
1618 /* Kick the transmission */
1620 sci_tx_interrupt(irq
, ptr
);
1625 static irqreturn_t
sci_br_interrupt(int irq
, void *ptr
)
1627 struct uart_port
*port
= ptr
;
1630 sci_handle_breaks(port
);
1631 sci_clear_SCxSR(port
, SCxSR_BREAK_CLEAR(port
));
1636 static irqreturn_t
sci_mpxed_interrupt(int irq
, void *ptr
)
1638 unsigned short ssr_status
, scr_status
, err_enabled
, orer_status
= 0;
1639 struct uart_port
*port
= ptr
;
1640 struct sci_port
*s
= to_sci_port(port
);
1641 irqreturn_t ret
= IRQ_NONE
;
1643 ssr_status
= serial_port_in(port
, SCxSR
);
1644 scr_status
= serial_port_in(port
, SCSCR
);
1645 if (s
->overrun_reg
== SCxSR
)
1646 orer_status
= ssr_status
;
1648 if (sci_getreg(port
, s
->overrun_reg
)->size
)
1649 orer_status
= serial_port_in(port
, s
->overrun_reg
);
1652 err_enabled
= scr_status
& port_rx_irq_mask(port
);
1655 if ((ssr_status
& SCxSR_TDxE(port
)) && (scr_status
& SCSCR_TIE
) &&
1657 ret
= sci_tx_interrupt(irq
, ptr
);
1660 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
1663 if (((ssr_status
& SCxSR_RDxF(port
)) || s
->chan_rx
) &&
1664 (scr_status
& SCSCR_RIE
))
1665 ret
= sci_rx_interrupt(irq
, ptr
);
1667 /* Error Interrupt */
1668 if ((ssr_status
& SCxSR_ERRORS(port
)) && err_enabled
)
1669 ret
= sci_er_interrupt(irq
, ptr
);
1671 /* Break Interrupt */
1672 if ((ssr_status
& SCxSR_BRK(port
)) && err_enabled
)
1673 ret
= sci_br_interrupt(irq
, ptr
);
1675 /* Overrun Interrupt */
1676 if (orer_status
& s
->overrun_mask
) {
1677 sci_handle_fifo_overrun(port
);
1684 static const struct sci_irq_desc
{
1686 irq_handler_t handler
;
1687 } sci_irq_desc
[] = {
1689 * Split out handlers, the default case.
1693 .handler
= sci_er_interrupt
,
1698 .handler
= sci_rx_interrupt
,
1703 .handler
= sci_tx_interrupt
,
1708 .handler
= sci_br_interrupt
,
1712 * Special muxed handler.
1716 .handler
= sci_mpxed_interrupt
,
1720 static int sci_request_irq(struct sci_port
*port
)
1722 struct uart_port
*up
= &port
->port
;
1725 for (i
= j
= 0; i
< SCIx_NR_IRQS
; i
++, j
++) {
1726 const struct sci_irq_desc
*desc
;
1729 if (SCIx_IRQ_IS_MUXED(port
)) {
1733 irq
= port
->irqs
[i
];
1736 * Certain port types won't support all of the
1737 * available interrupt sources.
1739 if (unlikely(irq
< 0))
1743 desc
= sci_irq_desc
+ i
;
1744 port
->irqstr
[j
] = kasprintf(GFP_KERNEL
, "%s:%s",
1745 dev_name(up
->dev
), desc
->desc
);
1746 if (!port
->irqstr
[j
])
1749 ret
= request_irq(irq
, desc
->handler
, up
->irqflags
,
1750 port
->irqstr
[j
], port
);
1751 if (unlikely(ret
)) {
1752 dev_err(up
->dev
, "Can't allocate %s IRQ\n", desc
->desc
);
1761 free_irq(port
->irqs
[i
], port
);
1765 kfree(port
->irqstr
[j
]);
1770 static void sci_free_irq(struct sci_port
*port
)
1775 * Intentionally in reverse order so we iterate over the muxed
1778 for (i
= 0; i
< SCIx_NR_IRQS
; i
++) {
1779 int irq
= port
->irqs
[i
];
1782 * Certain port types won't support all of the available
1783 * interrupt sources.
1785 if (unlikely(irq
< 0))
1788 free_irq(port
->irqs
[i
], port
);
1789 kfree(port
->irqstr
[i
]);
1791 if (SCIx_IRQ_IS_MUXED(port
)) {
1792 /* If there's only one IRQ, we're done. */
1798 static unsigned int sci_tx_empty(struct uart_port
*port
)
1800 unsigned short status
= serial_port_in(port
, SCxSR
);
1801 unsigned short in_tx_fifo
= sci_txfill(port
);
1803 return (status
& SCxSR_TEND(port
)) && !in_tx_fifo
? TIOCSER_TEMT
: 0;
1807 * Modem control is a bit of a mixed bag for SCI(F) ports. Generally
1808 * CTS/RTS is supported in hardware by at least one port and controlled
1809 * via SCSPTR (SCxPCR for SCIFA/B parts), or external pins (presently
1810 * handled via the ->init_pins() op, which is a bit of a one-way street,
1811 * lacking any ability to defer pin control -- this will later be
1812 * converted over to the GPIO framework).
1814 * Other modes (such as loopback) are supported generically on certain
1815 * port types, but not others. For these it's sufficient to test for the
1816 * existence of the support register and simply ignore the port type.
1818 static void sci_set_mctrl(struct uart_port
*port
, unsigned int mctrl
)
1820 if (mctrl
& TIOCM_LOOP
) {
1821 const struct plat_sci_reg
*reg
;
1824 * Standard loopback mode for SCFCR ports.
1826 reg
= sci_getreg(port
, SCFCR
);
1828 serial_port_out(port
, SCFCR
,
1829 serial_port_in(port
, SCFCR
) |
1834 static unsigned int sci_get_mctrl(struct uart_port
*port
)
1837 * CTS/RTS is handled in hardware when supported, while nothing
1838 * else is wired up. Keep it simple and simply assert DSR/CAR.
1840 return TIOCM_DSR
| TIOCM_CAR
;
1843 static void sci_break_ctl(struct uart_port
*port
, int break_state
)
1845 struct sci_port
*s
= to_sci_port(port
);
1846 const struct plat_sci_reg
*reg
= sci_regmap
[s
->cfg
->regtype
] + SCSPTR
;
1847 unsigned short scscr
, scsptr
;
1849 /* check wheter the port has SCSPTR */
1852 * Not supported by hardware. Most parts couple break and rx
1853 * interrupts together, with break detection always enabled.
1858 scsptr
= serial_port_in(port
, SCSPTR
);
1859 scscr
= serial_port_in(port
, SCSCR
);
1861 if (break_state
== -1) {
1862 scsptr
= (scsptr
| SCSPTR_SPB2IO
) & ~SCSPTR_SPB2DT
;
1865 scsptr
= (scsptr
| SCSPTR_SPB2DT
) & ~SCSPTR_SPB2IO
;
1869 serial_port_out(port
, SCSPTR
, scsptr
);
1870 serial_port_out(port
, SCSCR
, scscr
);
1873 static int sci_startup(struct uart_port
*port
)
1875 struct sci_port
*s
= to_sci_port(port
);
1876 unsigned long flags
;
1879 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
1881 ret
= sci_request_irq(s
);
1882 if (unlikely(ret
< 0))
1885 sci_request_dma(port
);
1887 spin_lock_irqsave(&port
->lock
, flags
);
1890 spin_unlock_irqrestore(&port
->lock
, flags
);
1895 static void sci_shutdown(struct uart_port
*port
)
1897 struct sci_port
*s
= to_sci_port(port
);
1898 unsigned long flags
;
1900 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
1902 spin_lock_irqsave(&port
->lock
, flags
);
1905 spin_unlock_irqrestore(&port
->lock
, flags
);
1907 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1909 dev_dbg(port
->dev
, "%s(%d) deleting rx_timer\n", __func__
,
1911 del_timer_sync(&s
->rx_timer
);
1919 static int sci_sck_calc(struct sci_port
*s
, unsigned int bps
,
1922 unsigned long freq
= s
->clk_rates
[SCI_SCK
];
1923 int err
, min_err
= INT_MAX
;
1926 if (s
->port
.type
!= PORT_HSCIF
)
1929 for_each_sr(sr
, s
) {
1930 err
= DIV_ROUND_CLOSEST(freq
, sr
) - bps
;
1931 if (abs(err
) >= abs(min_err
))
1941 dev_dbg(s
->port
.dev
, "SCK: %u%+d bps using SR %u\n", bps
, min_err
,
1946 static int sci_brg_calc(struct sci_port
*s
, unsigned int bps
,
1947 unsigned long freq
, unsigned int *dlr
,
1950 int err
, min_err
= INT_MAX
;
1951 unsigned int sr
, dl
;
1953 if (s
->port
.type
!= PORT_HSCIF
)
1956 for_each_sr(sr
, s
) {
1957 dl
= DIV_ROUND_CLOSEST(freq
, sr
* bps
);
1958 dl
= clamp(dl
, 1U, 65535U);
1960 err
= DIV_ROUND_CLOSEST(freq
, sr
* dl
) - bps
;
1961 if (abs(err
) >= abs(min_err
))
1972 dev_dbg(s
->port
.dev
, "BRG: %u%+d bps using DL %u SR %u\n", bps
,
1973 min_err
, *dlr
, *srr
+ 1);
1977 /* calculate sample rate, BRR, and clock select */
1978 static int sci_scbrr_calc(struct sci_port
*s
, unsigned int bps
,
1979 unsigned int *brr
, unsigned int *srr
,
1982 unsigned long freq
= s
->clk_rates
[SCI_FCK
];
1983 unsigned int sr
, br
, prediv
, scrate
, c
;
1984 int err
, min_err
= INT_MAX
;
1986 if (s
->port
.type
!= PORT_HSCIF
)
1990 * Find the combination of sample rate and clock select with the
1991 * smallest deviation from the desired baud rate.
1992 * Prefer high sample rates to maximise the receive margin.
1994 * M: Receive margin (%)
1995 * N: Ratio of bit rate to clock (N = sampling rate)
1996 * D: Clock duty (D = 0 to 1.0)
1997 * L: Frame length (L = 9 to 12)
1998 * F: Absolute value of clock frequency deviation
2000 * M = |(0.5 - 1 / 2 * N) - ((L - 0.5) * F) -
2001 * (|D - 0.5| / N * (1 + F))|
2002 * NOTE: Usually, treat D for 0.5, F is 0 by this calculation.
2004 for_each_sr(sr
, s
) {
2005 for (c
= 0; c
<= 3; c
++) {
2006 /* integerized formulas from HSCIF documentation */
2007 prediv
= sr
* (1 << (2 * c
+ 1));
2010 * We need to calculate:
2012 * br = freq / (prediv * bps) clamped to [1..256]
2013 * err = freq / (br * prediv) - bps
2015 * Watch out for overflow when calculating the desired
2016 * sampling clock rate!
2018 if (bps
> UINT_MAX
/ prediv
)
2021 scrate
= prediv
* bps
;
2022 br
= DIV_ROUND_CLOSEST(freq
, scrate
);
2023 br
= clamp(br
, 1U, 256U);
2025 err
= DIV_ROUND_CLOSEST(freq
, br
* prediv
) - bps
;
2026 if (abs(err
) >= abs(min_err
))
2040 dev_dbg(s
->port
.dev
, "BRR: %u%+d bps using N %u SR %u cks %u\n", bps
,
2041 min_err
, *brr
, *srr
+ 1, *cks
);
2045 static void sci_reset(struct uart_port
*port
)
2047 const struct plat_sci_reg
*reg
;
2048 unsigned int status
;
2051 status
= serial_port_in(port
, SCxSR
);
2052 } while (!(status
& SCxSR_TEND(port
)));
2054 serial_port_out(port
, SCSCR
, 0x00); /* TE=0, RE=0, CKE1=0 */
2056 reg
= sci_getreg(port
, SCFCR
);
2058 serial_port_out(port
, SCFCR
, SCFCR_RFRST
| SCFCR_TFRST
);
2061 static void sci_set_termios(struct uart_port
*port
, struct ktermios
*termios
,
2062 struct ktermios
*old
)
2064 unsigned int baud
, smr_val
= SCSMR_ASYNC
, scr_val
= 0, i
;
2065 unsigned int brr
= 255, cks
= 0, srr
= 15, dl
= 0, sccks
= 0;
2066 unsigned int brr1
= 255, cks1
= 0, srr1
= 15, dl1
= 0;
2067 struct sci_port
*s
= to_sci_port(port
);
2068 const struct plat_sci_reg
*reg
;
2069 int min_err
= INT_MAX
, err
;
2070 unsigned long max_freq
= 0;
2073 if ((termios
->c_cflag
& CSIZE
) == CS7
)
2074 smr_val
|= SCSMR_CHR
;
2075 if (termios
->c_cflag
& PARENB
)
2076 smr_val
|= SCSMR_PE
;
2077 if (termios
->c_cflag
& PARODD
)
2078 smr_val
|= SCSMR_PE
| SCSMR_ODD
;
2079 if (termios
->c_cflag
& CSTOPB
)
2080 smr_val
|= SCSMR_STOP
;
2083 * earlyprintk comes here early on with port->uartclk set to zero.
2084 * the clock framework is not up and running at this point so here
2085 * we assume that 115200 is the maximum baud rate. please note that
2086 * the baud rate is not programmed during earlyprintk - it is assumed
2087 * that the previous boot loader has enabled required clocks and
2088 * setup the baud rate generator hardware for us already.
2090 if (!port
->uartclk
) {
2091 baud
= uart_get_baud_rate(port
, termios
, old
, 0, 115200);
2095 for (i
= 0; i
< SCI_NUM_CLKS
; i
++)
2096 max_freq
= max(max_freq
, s
->clk_rates
[i
]);
2098 baud
= uart_get_baud_rate(port
, termios
, old
, 0, max_freq
/ min_sr(s
));
2103 * There can be multiple sources for the sampling clock. Find the one
2104 * that gives us the smallest deviation from the desired baud rate.
2107 /* Optional Undivided External Clock */
2108 if (s
->clk_rates
[SCI_SCK
] && port
->type
!= PORT_SCIFA
&&
2109 port
->type
!= PORT_SCIFB
) {
2110 err
= sci_sck_calc(s
, baud
, &srr1
);
2111 if (abs(err
) < abs(min_err
)) {
2113 scr_val
= SCSCR_CKE1
;
2122 /* Optional BRG Frequency Divided External Clock */
2123 if (s
->clk_rates
[SCI_SCIF_CLK
] && sci_getreg(port
, SCDL
)->size
) {
2124 err
= sci_brg_calc(s
, baud
, s
->clk_rates
[SCI_SCIF_CLK
], &dl1
,
2126 if (abs(err
) < abs(min_err
)) {
2127 best_clk
= SCI_SCIF_CLK
;
2128 scr_val
= SCSCR_CKE1
;
2138 /* Optional BRG Frequency Divided Internal Clock */
2139 if (s
->clk_rates
[SCI_BRG_INT
] && sci_getreg(port
, SCDL
)->size
) {
2140 err
= sci_brg_calc(s
, baud
, s
->clk_rates
[SCI_BRG_INT
], &dl1
,
2142 if (abs(err
) < abs(min_err
)) {
2143 best_clk
= SCI_BRG_INT
;
2144 scr_val
= SCSCR_CKE1
;
2154 /* Divided Functional Clock using standard Bit Rate Register */
2155 err
= sci_scbrr_calc(s
, baud
, &brr1
, &srr1
, &cks1
);
2156 if (abs(err
) < abs(min_err
)) {
2167 dev_dbg(port
->dev
, "Using clk %pC for %u%+d bps\n",
2168 s
->clks
[best_clk
], baud
, min_err
);
2173 * Program the optional External Baud Rate Generator (BRG) first.
2174 * It controls the mux to select (H)SCK or frequency divided clock.
2176 if (best_clk
>= 0 && sci_getreg(port
, SCCKS
)->size
) {
2177 serial_port_out(port
, SCDL
, dl
);
2178 serial_port_out(port
, SCCKS
, sccks
);
2183 uart_update_timeout(port
, termios
->c_cflag
, baud
);
2185 if (best_clk
>= 0) {
2186 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
2188 case 5: smr_val
|= SCSMR_SRC_5
; break;
2189 case 7: smr_val
|= SCSMR_SRC_7
; break;
2190 case 11: smr_val
|= SCSMR_SRC_11
; break;
2191 case 13: smr_val
|= SCSMR_SRC_13
; break;
2192 case 16: smr_val
|= SCSMR_SRC_16
; break;
2193 case 17: smr_val
|= SCSMR_SRC_17
; break;
2194 case 19: smr_val
|= SCSMR_SRC_19
; break;
2195 case 27: smr_val
|= SCSMR_SRC_27
; break;
2199 "SCR 0x%x SMR 0x%x BRR %u CKS 0x%x DL %u SRR %u\n",
2200 scr_val
, smr_val
, brr
, sccks
, dl
, srr
);
2201 serial_port_out(port
, SCSCR
, scr_val
);
2202 serial_port_out(port
, SCSMR
, smr_val
);
2203 serial_port_out(port
, SCBRR
, brr
);
2204 if (sci_getreg(port
, HSSRR
)->size
)
2205 serial_port_out(port
, HSSRR
, srr
| HSCIF_SRE
);
2207 /* Wait one bit interval */
2208 udelay((1000000 + (baud
- 1)) / baud
);
2210 /* Don't touch the bit rate configuration */
2211 scr_val
= s
->cfg
->scscr
& (SCSCR_CKE1
| SCSCR_CKE0
);
2212 smr_val
|= serial_port_in(port
, SCSMR
) &
2213 (SCSMR_CKEDG
| SCSMR_SRC_MASK
| SCSMR_CKS
);
2214 dev_dbg(port
->dev
, "SCR 0x%x SMR 0x%x\n", scr_val
, smr_val
);
2215 serial_port_out(port
, SCSCR
, scr_val
);
2216 serial_port_out(port
, SCSMR
, smr_val
);
2219 sci_init_pins(port
, termios
->c_cflag
);
2221 reg
= sci_getreg(port
, SCFCR
);
2223 unsigned short ctrl
= serial_port_in(port
, SCFCR
);
2225 if (s
->cfg
->capabilities
& SCIx_HAVE_RTSCTS
) {
2226 if (termios
->c_cflag
& CRTSCTS
)
2233 * As we've done a sci_reset() above, ensure we don't
2234 * interfere with the FIFOs while toggling MCE. As the
2235 * reset values could still be set, simply mask them out.
2237 ctrl
&= ~(SCFCR_RFRST
| SCFCR_TFRST
);
2239 serial_port_out(port
, SCFCR
, ctrl
);
2242 scr_val
|= s
->cfg
->scscr
& ~(SCSCR_CKE1
| SCSCR_CKE0
);
2243 dev_dbg(port
->dev
, "SCSCR 0x%x\n", scr_val
);
2244 serial_port_out(port
, SCSCR
, scr_val
);
2245 if ((srr
+ 1 == 5) &&
2246 (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)) {
2248 * In asynchronous mode, when the sampling rate is 1/5, first
2249 * received data may become invalid on some SCIFA and SCIFB.
2250 * To avoid this problem wait more than 1 serial data time (1
2251 * bit time x serial data number) after setting SCSCR.RE = 1.
2253 udelay(DIV_ROUND_UP(10 * 1000000, baud
));
2256 #ifdef CONFIG_SERIAL_SH_SCI_DMA
2258 * Calculate delay for 2 DMA buffers (4 FIFO).
2259 * See serial_core.c::uart_update_timeout().
2260 * With 10 bits (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above
2261 * function calculates 1 jiffie for the data plus 5 jiffies for the
2262 * "slop(e)." Then below we calculate 5 jiffies (20ms) for 2 DMA
2263 * buffers (4 FIFO sizes), but when performing a faster transfer, the
2264 * value obtained by this formula is too small. Therefore, if the value
2265 * is smaller than 20ms, use 20ms as the timeout value for DMA.
2270 /* byte size and parity */
2271 switch (termios
->c_cflag
& CSIZE
) {
2286 if (termios
->c_cflag
& CSTOPB
)
2288 if (termios
->c_cflag
& PARENB
)
2290 s
->rx_timeout
= DIV_ROUND_UP((s
->buf_len_rx
* 2 * bits
* HZ
) /
2292 dev_dbg(port
->dev
, "DMA Rx t-out %ums, tty t-out %u jiffies\n",
2293 s
->rx_timeout
* 1000 / HZ
, port
->timeout
);
2294 if (s
->rx_timeout
< msecs_to_jiffies(20))
2295 s
->rx_timeout
= msecs_to_jiffies(20);
2299 if ((termios
->c_cflag
& CREAD
) != 0)
2302 sci_port_disable(s
);
2305 static void sci_pm(struct uart_port
*port
, unsigned int state
,
2306 unsigned int oldstate
)
2308 struct sci_port
*sci_port
= to_sci_port(port
);
2311 case UART_PM_STATE_OFF
:
2312 sci_port_disable(sci_port
);
2315 sci_port_enable(sci_port
);
2320 static const char *sci_type(struct uart_port
*port
)
2322 switch (port
->type
) {
2340 static int sci_remap_port(struct uart_port
*port
)
2342 struct sci_port
*sport
= to_sci_port(port
);
2345 * Nothing to do if there's already an established membase.
2350 if (port
->flags
& UPF_IOREMAP
) {
2351 port
->membase
= ioremap_nocache(port
->mapbase
, sport
->reg_size
);
2352 if (unlikely(!port
->membase
)) {
2353 dev_err(port
->dev
, "can't remap port#%d\n", port
->line
);
2358 * For the simple (and majority of) cases where we don't
2359 * need to do any remapping, just cast the cookie
2362 port
->membase
= (void __iomem
*)(uintptr_t)port
->mapbase
;
2368 static void sci_release_port(struct uart_port
*port
)
2370 struct sci_port
*sport
= to_sci_port(port
);
2372 if (port
->flags
& UPF_IOREMAP
) {
2373 iounmap(port
->membase
);
2374 port
->membase
= NULL
;
2377 release_mem_region(port
->mapbase
, sport
->reg_size
);
2380 static int sci_request_port(struct uart_port
*port
)
2382 struct resource
*res
;
2383 struct sci_port
*sport
= to_sci_port(port
);
2386 res
= request_mem_region(port
->mapbase
, sport
->reg_size
,
2387 dev_name(port
->dev
));
2388 if (unlikely(res
== NULL
)) {
2389 dev_err(port
->dev
, "request_mem_region failed.");
2393 ret
= sci_remap_port(port
);
2394 if (unlikely(ret
!= 0)) {
2395 release_resource(res
);
2402 static void sci_config_port(struct uart_port
*port
, int flags
)
2404 if (flags
& UART_CONFIG_TYPE
) {
2405 struct sci_port
*sport
= to_sci_port(port
);
2407 port
->type
= sport
->cfg
->type
;
2408 sci_request_port(port
);
2412 static int sci_verify_port(struct uart_port
*port
, struct serial_struct
*ser
)
2414 if (ser
->baud_base
< 2400)
2415 /* No paper tape reader for Mitch.. */
2421 static struct uart_ops sci_uart_ops
= {
2422 .tx_empty
= sci_tx_empty
,
2423 .set_mctrl
= sci_set_mctrl
,
2424 .get_mctrl
= sci_get_mctrl
,
2425 .start_tx
= sci_start_tx
,
2426 .stop_tx
= sci_stop_tx
,
2427 .stop_rx
= sci_stop_rx
,
2428 .break_ctl
= sci_break_ctl
,
2429 .startup
= sci_startup
,
2430 .shutdown
= sci_shutdown
,
2431 .set_termios
= sci_set_termios
,
2434 .release_port
= sci_release_port
,
2435 .request_port
= sci_request_port
,
2436 .config_port
= sci_config_port
,
2437 .verify_port
= sci_verify_port
,
2438 #ifdef CONFIG_CONSOLE_POLL
2439 .poll_get_char
= sci_poll_get_char
,
2440 .poll_put_char
= sci_poll_put_char
,
2444 static int sci_init_clocks(struct sci_port
*sci_port
, struct device
*dev
)
2446 const char *clk_names
[] = {
2449 [SCI_BRG_INT
] = "brg_int",
2450 [SCI_SCIF_CLK
] = "scif_clk",
2455 if (sci_port
->cfg
->type
== PORT_HSCIF
)
2456 clk_names
[SCI_SCK
] = "hsck";
2458 for (i
= 0; i
< SCI_NUM_CLKS
; i
++) {
2459 clk
= devm_clk_get(dev
, clk_names
[i
]);
2460 if (PTR_ERR(clk
) == -EPROBE_DEFER
)
2461 return -EPROBE_DEFER
;
2463 if (IS_ERR(clk
) && i
== SCI_FCK
) {
2465 * "fck" used to be called "sci_ick", and we need to
2466 * maintain DT backward compatibility.
2468 clk
= devm_clk_get(dev
, "sci_ick");
2469 if (PTR_ERR(clk
) == -EPROBE_DEFER
)
2470 return -EPROBE_DEFER
;
2476 * Not all SH platforms declare a clock lookup entry
2477 * for SCI devices, in which case we need to get the
2478 * global "peripheral_clk" clock.
2480 clk
= devm_clk_get(dev
, "peripheral_clk");
2484 dev_err(dev
, "failed to get %s (%ld)\n", clk_names
[i
],
2486 return PTR_ERR(clk
);
2491 dev_dbg(dev
, "failed to get %s (%ld)\n", clk_names
[i
],
2494 dev_dbg(dev
, "clk %s is %pC rate %pCr\n", clk_names
[i
],
2496 sci_port
->clks
[i
] = IS_ERR(clk
) ? NULL
: clk
;
2501 static int sci_init_single(struct platform_device
*dev
,
2502 struct sci_port
*sci_port
, unsigned int index
,
2503 struct plat_sci_port
*p
, bool early
)
2505 struct uart_port
*port
= &sci_port
->port
;
2506 const struct resource
*res
;
2512 port
->ops
= &sci_uart_ops
;
2513 port
->iotype
= UPIO_MEM
;
2516 res
= platform_get_resource(dev
, IORESOURCE_MEM
, 0);
2520 port
->mapbase
= res
->start
;
2521 sci_port
->reg_size
= resource_size(res
);
2523 for (i
= 0; i
< ARRAY_SIZE(sci_port
->irqs
); ++i
)
2524 sci_port
->irqs
[i
] = platform_get_irq(dev
, i
);
2526 /* The SCI generates several interrupts. They can be muxed together or
2527 * connected to different interrupt lines. In the muxed case only one
2528 * interrupt resource is specified. In the non-muxed case three or four
2529 * interrupt resources are specified, as the BRI interrupt is optional.
2531 if (sci_port
->irqs
[0] < 0)
2534 if (sci_port
->irqs
[1] < 0) {
2535 sci_port
->irqs
[1] = sci_port
->irqs
[0];
2536 sci_port
->irqs
[2] = sci_port
->irqs
[0];
2537 sci_port
->irqs
[3] = sci_port
->irqs
[0];
2540 if (p
->regtype
== SCIx_PROBE_REGTYPE
) {
2541 ret
= sci_probe_regmap(p
);
2548 port
->fifosize
= 256;
2549 sci_port
->overrun_reg
= SCxSR
;
2550 sci_port
->overrun_mask
= SCIFA_ORER
;
2551 sci_port
->sampling_rate_mask
= SCI_SR_SCIFAB
;
2554 port
->fifosize
= 128;
2555 sci_port
->overrun_reg
= SCLSR
;
2556 sci_port
->overrun_mask
= SCLSR_ORER
;
2557 sci_port
->sampling_rate_mask
= SCI_SR_RANGE(8, 32);
2560 port
->fifosize
= 64;
2561 sci_port
->overrun_reg
= SCxSR
;
2562 sci_port
->overrun_mask
= SCIFA_ORER
;
2563 sci_port
->sampling_rate_mask
= SCI_SR_SCIFAB
;
2566 port
->fifosize
= 16;
2567 if (p
->regtype
== SCIx_SH7705_SCIF_REGTYPE
) {
2568 sci_port
->overrun_reg
= SCxSR
;
2569 sci_port
->overrun_mask
= SCIFA_ORER
;
2570 sci_port
->sampling_rate_mask
= SCI_SR(16);
2572 sci_port
->overrun_reg
= SCLSR
;
2573 sci_port
->overrun_mask
= SCLSR_ORER
;
2574 sci_port
->sampling_rate_mask
= SCI_SR(32);
2579 sci_port
->overrun_reg
= SCxSR
;
2580 sci_port
->overrun_mask
= SCI_ORER
;
2581 sci_port
->sampling_rate_mask
= SCI_SR(32);
2585 /* SCIFA on sh7723 and sh7724 need a custom sampling rate that doesn't
2586 * match the SoC datasheet, this should be investigated. Let platform
2587 * data override the sampling rate for now.
2589 if (p
->sampling_rate
)
2590 sci_port
->sampling_rate_mask
= SCI_SR(p
->sampling_rate
);
2593 ret
= sci_init_clocks(sci_port
, &dev
->dev
);
2597 port
->dev
= &dev
->dev
;
2599 pm_runtime_enable(&dev
->dev
);
2602 sci_port
->break_timer
.data
= (unsigned long)sci_port
;
2603 sci_port
->break_timer
.function
= sci_break_timer
;
2604 init_timer(&sci_port
->break_timer
);
2607 * Establish some sensible defaults for the error detection.
2609 if (p
->type
== PORT_SCI
) {
2610 sci_port
->error_mask
= SCI_DEFAULT_ERROR_MASK
;
2611 sci_port
->error_clear
= SCI_ERROR_CLEAR
;
2613 sci_port
->error_mask
= SCIF_DEFAULT_ERROR_MASK
;
2614 sci_port
->error_clear
= SCIF_ERROR_CLEAR
;
2618 * Make the error mask inclusive of overrun detection, if
2621 if (sci_port
->overrun_reg
== SCxSR
) {
2622 sci_port
->error_mask
|= sci_port
->overrun_mask
;
2623 sci_port
->error_clear
&= ~sci_port
->overrun_mask
;
2626 port
->type
= p
->type
;
2627 port
->flags
= UPF_FIXED_PORT
| p
->flags
;
2628 port
->regshift
= p
->regshift
;
2631 * The UART port needs an IRQ value, so we peg this to the RX IRQ
2632 * for the multi-IRQ ports, which is where we are primarily
2633 * concerned with the shutdown path synchronization.
2635 * For the muxed case there's nothing more to do.
2637 port
->irq
= sci_port
->irqs
[SCIx_RXI_IRQ
];
2640 port
->serial_in
= sci_serial_in
;
2641 port
->serial_out
= sci_serial_out
;
2643 if (p
->dma_slave_tx
> 0 && p
->dma_slave_rx
> 0)
2644 dev_dbg(port
->dev
, "DMA tx %d, rx %d\n",
2645 p
->dma_slave_tx
, p
->dma_slave_rx
);
2650 static void sci_cleanup_single(struct sci_port
*port
)
2652 pm_runtime_disable(port
->port
.dev
);
2655 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
2656 defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
2657 static void serial_console_putchar(struct uart_port
*port
, int ch
)
2659 sci_poll_put_char(port
, ch
);
2663 * Print a string to the serial port trying not to disturb
2664 * any possible real use of the port...
2666 static void serial_console_write(struct console
*co
, const char *s
,
2669 struct sci_port
*sci_port
= &sci_ports
[co
->index
];
2670 struct uart_port
*port
= &sci_port
->port
;
2671 unsigned short bits
, ctrl
, ctrl_temp
;
2672 unsigned long flags
;
2675 local_irq_save(flags
);
2676 #if defined(SUPPORT_SYSRQ)
2681 if (oops_in_progress
)
2682 locked
= spin_trylock(&port
->lock
);
2684 spin_lock(&port
->lock
);
2686 /* first save SCSCR then disable interrupts, keep clock source */
2687 ctrl
= serial_port_in(port
, SCSCR
);
2688 ctrl_temp
= (sci_port
->cfg
->scscr
& ~(SCSCR_CKE1
| SCSCR_CKE0
)) |
2689 (ctrl
& (SCSCR_CKE1
| SCSCR_CKE0
));
2690 serial_port_out(port
, SCSCR
, ctrl_temp
);
2692 uart_console_write(port
, s
, count
, serial_console_putchar
);
2694 /* wait until fifo is empty and last bit has been transmitted */
2695 bits
= SCxSR_TDxE(port
) | SCxSR_TEND(port
);
2696 while ((serial_port_in(port
, SCxSR
) & bits
) != bits
)
2699 /* restore the SCSCR */
2700 serial_port_out(port
, SCSCR
, ctrl
);
2703 spin_unlock(&port
->lock
);
2704 local_irq_restore(flags
);
2707 static int serial_console_setup(struct console
*co
, char *options
)
2709 struct sci_port
*sci_port
;
2710 struct uart_port
*port
;
2718 * Refuse to handle any bogus ports.
2720 if (co
->index
< 0 || co
->index
>= SCI_NPORTS
)
2723 sci_port
= &sci_ports
[co
->index
];
2724 port
= &sci_port
->port
;
2727 * Refuse to handle uninitialized ports.
2732 ret
= sci_remap_port(port
);
2733 if (unlikely(ret
!= 0))
2737 uart_parse_options(options
, &baud
, &parity
, &bits
, &flow
);
2739 return uart_set_options(port
, co
, baud
, parity
, bits
, flow
);
2742 static struct console serial_console
= {
2744 .device
= uart_console_device
,
2745 .write
= serial_console_write
,
2746 .setup
= serial_console_setup
,
2747 .flags
= CON_PRINTBUFFER
,
2749 .data
= &sci_uart_driver
,
2752 static struct console early_serial_console
= {
2753 .name
= "early_ttySC",
2754 .write
= serial_console_write
,
2755 .flags
= CON_PRINTBUFFER
,
2759 static char early_serial_buf
[32];
2761 static int sci_probe_earlyprintk(struct platform_device
*pdev
)
2763 struct plat_sci_port
*cfg
= dev_get_platdata(&pdev
->dev
);
2765 if (early_serial_console
.data
)
2768 early_serial_console
.index
= pdev
->id
;
2770 sci_init_single(pdev
, &sci_ports
[pdev
->id
], pdev
->id
, cfg
, true);
2772 serial_console_setup(&early_serial_console
, early_serial_buf
);
2774 if (!strstr(early_serial_buf
, "keep"))
2775 early_serial_console
.flags
|= CON_BOOT
;
2777 register_console(&early_serial_console
);
2781 #define SCI_CONSOLE (&serial_console)
2784 static inline int sci_probe_earlyprintk(struct platform_device
*pdev
)
2789 #define SCI_CONSOLE NULL
2791 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE || CONFIG_SERIAL_SH_SCI_EARLYCON */
2793 static const char banner
[] __initconst
= "SuperH (H)SCI(F) driver initialized";
2795 static struct uart_driver sci_uart_driver
= {
2796 .owner
= THIS_MODULE
,
2797 .driver_name
= "sci",
2798 .dev_name
= "ttySC",
2800 .minor
= SCI_MINOR_START
,
2802 .cons
= SCI_CONSOLE
,
2805 static int sci_remove(struct platform_device
*dev
)
2807 struct sci_port
*port
= platform_get_drvdata(dev
);
2809 uart_remove_one_port(&sci_uart_driver
, &port
->port
);
2811 sci_cleanup_single(port
);
2817 #define SCI_OF_DATA(type, regtype) (void *)((type) << 16 | (regtype))
2818 #define SCI_OF_TYPE(data) ((unsigned long)(data) >> 16)
2819 #define SCI_OF_REGTYPE(data) ((unsigned long)(data) & 0xffff)
2821 static const struct of_device_id of_sci_match
[] = {
2822 /* SoC-specific types */
2824 .compatible
= "renesas,scif-r7s72100",
2825 .data
= SCI_OF_DATA(PORT_SCIF
, SCIx_SH2_SCIF_FIFODATA_REGTYPE
),
2827 /* Family-specific types */
2829 .compatible
= "renesas,rcar-gen1-scif",
2830 .data
= SCI_OF_DATA(PORT_SCIF
, SCIx_SH4_SCIF_BRG_REGTYPE
),
2832 .compatible
= "renesas,rcar-gen2-scif",
2833 .data
= SCI_OF_DATA(PORT_SCIF
, SCIx_SH4_SCIF_BRG_REGTYPE
),
2835 .compatible
= "renesas,rcar-gen3-scif",
2836 .data
= SCI_OF_DATA(PORT_SCIF
, SCIx_SH4_SCIF_BRG_REGTYPE
),
2840 .compatible
= "renesas,scif",
2841 .data
= SCI_OF_DATA(PORT_SCIF
, SCIx_SH4_SCIF_REGTYPE
),
2843 .compatible
= "renesas,scifa",
2844 .data
= SCI_OF_DATA(PORT_SCIFA
, SCIx_SCIFA_REGTYPE
),
2846 .compatible
= "renesas,scifb",
2847 .data
= SCI_OF_DATA(PORT_SCIFB
, SCIx_SCIFB_REGTYPE
),
2849 .compatible
= "renesas,hscif",
2850 .data
= SCI_OF_DATA(PORT_HSCIF
, SCIx_HSCIF_REGTYPE
),
2852 .compatible
= "renesas,sci",
2853 .data
= SCI_OF_DATA(PORT_SCI
, SCIx_SCI_REGTYPE
),
2858 MODULE_DEVICE_TABLE(of
, of_sci_match
);
2860 static struct plat_sci_port
*
2861 sci_parse_dt(struct platform_device
*pdev
, unsigned int *dev_id
)
2863 struct device_node
*np
= pdev
->dev
.of_node
;
2864 const struct of_device_id
*match
;
2865 struct plat_sci_port
*p
;
2868 if (!IS_ENABLED(CONFIG_OF
) || !np
)
2871 match
= of_match_node(of_sci_match
, np
);
2875 p
= devm_kzalloc(&pdev
->dev
, sizeof(struct plat_sci_port
), GFP_KERNEL
);
2879 /* Get the line number from the aliases node. */
2880 id
= of_alias_get_id(np
, "serial");
2882 dev_err(&pdev
->dev
, "failed to get alias id (%d)\n", id
);
2888 p
->flags
= UPF_IOREMAP
| UPF_BOOT_AUTOCONF
;
2889 p
->type
= SCI_OF_TYPE(match
->data
);
2890 p
->regtype
= SCI_OF_REGTYPE(match
->data
);
2891 p
->scscr
= SCSCR_RE
| SCSCR_TE
;
2896 static int sci_probe_single(struct platform_device
*dev
,
2898 struct plat_sci_port
*p
,
2899 struct sci_port
*sciport
)
2904 if (unlikely(index
>= SCI_NPORTS
)) {
2905 dev_notice(&dev
->dev
, "Attempting to register port %d when only %d are available\n",
2906 index
+1, SCI_NPORTS
);
2907 dev_notice(&dev
->dev
, "Consider bumping CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
2911 ret
= sci_init_single(dev
, sciport
, index
, p
, false);
2915 ret
= uart_add_one_port(&sci_uart_driver
, &sciport
->port
);
2917 sci_cleanup_single(sciport
);
2924 static int sci_probe(struct platform_device
*dev
)
2926 struct plat_sci_port
*p
;
2927 struct sci_port
*sp
;
2928 unsigned int dev_id
;
2932 * If we've come here via earlyprintk initialization, head off to
2933 * the special early probe. We don't have sufficient device state
2934 * to make it beyond this yet.
2936 if (is_early_platform_device(dev
))
2937 return sci_probe_earlyprintk(dev
);
2939 if (dev
->dev
.of_node
) {
2940 p
= sci_parse_dt(dev
, &dev_id
);
2944 p
= dev
->dev
.platform_data
;
2946 dev_err(&dev
->dev
, "no platform data supplied\n");
2953 sp
= &sci_ports
[dev_id
];
2954 platform_set_drvdata(dev
, sp
);
2956 ret
= sci_probe_single(dev
, dev_id
, p
, sp
);
2960 #ifdef CONFIG_SH_STANDARD_BIOS
2961 sh_bios_gdb_detach();
2967 static __maybe_unused
int sci_suspend(struct device
*dev
)
2969 struct sci_port
*sport
= dev_get_drvdata(dev
);
2972 uart_suspend_port(&sci_uart_driver
, &sport
->port
);
2977 static __maybe_unused
int sci_resume(struct device
*dev
)
2979 struct sci_port
*sport
= dev_get_drvdata(dev
);
2982 uart_resume_port(&sci_uart_driver
, &sport
->port
);
2987 static SIMPLE_DEV_PM_OPS(sci_dev_pm_ops
, sci_suspend
, sci_resume
);
2989 static struct platform_driver sci_driver
= {
2991 .remove
= sci_remove
,
2994 .pm
= &sci_dev_pm_ops
,
2995 .of_match_table
= of_match_ptr(of_sci_match
),
2999 static int __init
sci_init(void)
3003 pr_info("%s\n", banner
);
3005 ret
= uart_register_driver(&sci_uart_driver
);
3006 if (likely(ret
== 0)) {
3007 ret
= platform_driver_register(&sci_driver
);
3009 uart_unregister_driver(&sci_uart_driver
);
3015 static void __exit
sci_exit(void)
3017 platform_driver_unregister(&sci_driver
);
3018 uart_unregister_driver(&sci_uart_driver
);
3021 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
3022 early_platform_init_buffer("earlyprintk", &sci_driver
,
3023 early_serial_buf
, ARRAY_SIZE(early_serial_buf
));
3025 #ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
3026 static struct __init plat_sci_port port_cfg
;
3028 static int __init
early_console_setup(struct earlycon_device
*device
,
3031 if (!device
->port
.membase
)
3034 device
->port
.serial_in
= sci_serial_in
;
3035 device
->port
.serial_out
= sci_serial_out
;
3036 device
->port
.type
= type
;
3037 memcpy(&sci_ports
[0].port
, &device
->port
, sizeof(struct uart_port
));
3038 sci_ports
[0].cfg
= &port_cfg
;
3039 sci_ports
[0].cfg
->type
= type
;
3040 sci_probe_regmap(sci_ports
[0].cfg
);
3041 port_cfg
.scscr
= sci_serial_in(&sci_ports
[0].port
, SCSCR
) |
3042 SCSCR_RE
| SCSCR_TE
;
3043 sci_serial_out(&sci_ports
[0].port
, SCSCR
, port_cfg
.scscr
);
3045 device
->con
->write
= serial_console_write
;
3048 static int __init
sci_early_console_setup(struct earlycon_device
*device
,
3051 return early_console_setup(device
, PORT_SCI
);
3053 static int __init
scif_early_console_setup(struct earlycon_device
*device
,
3056 return early_console_setup(device
, PORT_SCIF
);
3058 static int __init
scifa_early_console_setup(struct earlycon_device
*device
,
3061 return early_console_setup(device
, PORT_SCIFA
);
3063 static int __init
scifb_early_console_setup(struct earlycon_device
*device
,
3066 return early_console_setup(device
, PORT_SCIFB
);
3068 static int __init
hscif_early_console_setup(struct earlycon_device
*device
,
3071 return early_console_setup(device
, PORT_HSCIF
);
3074 OF_EARLYCON_DECLARE(sci
, "renesas,sci", sci_early_console_setup
);
3075 OF_EARLYCON_DECLARE(scif
, "renesas,scif", scif_early_console_setup
);
3076 OF_EARLYCON_DECLARE(scifa
, "renesas,scifa", scifa_early_console_setup
);
3077 OF_EARLYCON_DECLARE(scifb
, "renesas,scifb", scifb_early_console_setup
);
3078 OF_EARLYCON_DECLARE(hscif
, "renesas,hscif", hscif_early_console_setup
);
3079 #endif /* CONFIG_SERIAL_SH_SCI_EARLYCON */
3081 module_init(sci_init
);
3082 module_exit(sci_exit
);
3084 MODULE_LICENSE("GPL");
3085 MODULE_ALIAS("platform:sh-sci");
3086 MODULE_AUTHOR("Paul Mundt");
3087 MODULE_DESCRIPTION("SuperH (H)SCI(F) serial driver");