2 * Driver for AMBA serial ports
4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
6 * Copyright 1999 ARM Limited
7 * Copyright (C) 2000 Deep Blue Solutions Ltd.
8 * Copyright (C) 2010 ST-Ericsson SA
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * This is a generic driver for ARM AMBA-type serial ports. They
25 * have a lot of 16550-like features, but are not register compatible.
26 * Note that although they do have CTS, DCD and DSR inputs, they do
27 * not have an RI input, nor do they have DTR or RTS outputs. If
28 * required, these have to be supplied via some other means (eg, GPIO)
29 * and hooked into this driver.
33 #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
37 #include <linux/module.h>
38 #include <linux/ioport.h>
39 #include <linux/init.h>
40 #include <linux/console.h>
41 #include <linux/sysrq.h>
42 #include <linux/device.h>
43 #include <linux/tty.h>
44 #include <linux/tty_flip.h>
45 #include <linux/serial_core.h>
46 #include <linux/serial.h>
47 #include <linux/amba/bus.h>
48 #include <linux/amba/serial.h>
49 #include <linux/clk.h>
50 #include <linux/slab.h>
51 #include <linux/dmaengine.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/scatterlist.h>
54 #include <linux/delay.h>
55 #include <linux/types.h>
57 #include <linux/of_device.h>
58 #include <linux/pinctrl/consumer.h>
59 #include <linux/sizes.h>
61 #include <linux/workqueue.h>
65 #define SERIAL_AMBA_MAJOR 204
66 #define SERIAL_AMBA_MINOR 64
67 #define SERIAL_AMBA_NR UART_NR
69 #define AMBA_ISR_PASS_LIMIT 256
71 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
72 #define UART_DUMMY_DR_RX (1 << 16)
74 /* There is by now at least one vendor with differing details, so handle it */
81 bool cts_event_workaround
;
83 unsigned int (*get_fifosize
)(struct amba_device
*dev
);
86 static unsigned int get_fifosize_arm(struct amba_device
*dev
)
88 return amba_rev(dev
) < 3 ? 16 : 32;
91 static struct vendor_data vendor_arm
= {
92 .ifls
= UART011_IFLS_RX4_8
|UART011_IFLS_TX4_8
,
93 .lcrh_tx
= UART011_LCRH
,
94 .lcrh_rx
= UART011_LCRH
,
95 .oversampling
= false,
96 .dma_threshold
= false,
97 .cts_event_workaround
= false,
98 .get_fifosize
= get_fifosize_arm
,
101 static unsigned int get_fifosize_st(struct amba_device
*dev
)
106 static struct vendor_data vendor_st
= {
107 .ifls
= UART011_IFLS_RX_HALF
|UART011_IFLS_TX_HALF
,
108 .lcrh_tx
= ST_UART011_LCRH_TX
,
109 .lcrh_rx
= ST_UART011_LCRH_RX
,
110 .oversampling
= true,
111 .dma_threshold
= true,
112 .cts_event_workaround
= true,
113 .get_fifosize
= get_fifosize_st
,
116 /* Deals with DMA transactions */
119 struct scatterlist sg
;
123 struct pl011_dmarx_data
{
124 struct dma_chan
*chan
;
125 struct completion complete
;
127 struct pl011_sgbuf sgbuf_a
;
128 struct pl011_sgbuf sgbuf_b
;
131 struct timer_list timer
;
132 unsigned int last_residue
;
133 unsigned long last_jiffies
;
135 unsigned int poll_rate
;
136 unsigned int poll_timeout
;
139 struct pl011_dmatx_data
{
140 struct dma_chan
*chan
;
141 struct scatterlist sg
;
147 * We wrap our port structure around the generic uart_port.
149 struct uart_amba_port
{
150 struct uart_port port
;
152 const struct vendor_data
*vendor
;
153 unsigned int dmacr
; /* dma control reg */
154 unsigned int im
; /* interrupt mask */
155 unsigned int old_status
;
156 unsigned int fifosize
; /* vendor-specific */
157 unsigned int lcrh_tx
; /* vendor-specific */
158 unsigned int lcrh_rx
; /* vendor-specific */
159 unsigned int old_cr
; /* state during shutdown */
160 struct delayed_work tx_softirq_work
;
162 unsigned int tx_irq_seen
; /* 0=none, 1=1, 2=2 or more */
164 #ifdef CONFIG_DMA_ENGINE
168 struct pl011_dmarx_data dmarx
;
169 struct pl011_dmatx_data dmatx
;
175 * Reads up to 256 characters from the FIFO or until it's empty and
176 * inserts them into the TTY layer. Returns the number of characters
177 * read from the FIFO.
179 static int pl011_fifo_to_tty(struct uart_amba_port
*uap
)
182 unsigned int flag
, max_count
= 256;
185 while (max_count
--) {
186 status
= readw(uap
->port
.membase
+ UART01x_FR
);
187 if (status
& UART01x_FR_RXFE
)
190 /* Take chars from the FIFO and update status */
191 ch
= readw(uap
->port
.membase
+ UART01x_DR
) |
194 uap
->port
.icount
.rx
++;
197 if (unlikely(ch
& UART_DR_ERROR
)) {
198 if (ch
& UART011_DR_BE
) {
199 ch
&= ~(UART011_DR_FE
| UART011_DR_PE
);
200 uap
->port
.icount
.brk
++;
201 if (uart_handle_break(&uap
->port
))
203 } else if (ch
& UART011_DR_PE
)
204 uap
->port
.icount
.parity
++;
205 else if (ch
& UART011_DR_FE
)
206 uap
->port
.icount
.frame
++;
207 if (ch
& UART011_DR_OE
)
208 uap
->port
.icount
.overrun
++;
210 ch
&= uap
->port
.read_status_mask
;
212 if (ch
& UART011_DR_BE
)
214 else if (ch
& UART011_DR_PE
)
216 else if (ch
& UART011_DR_FE
)
220 if (uart_handle_sysrq_char(&uap
->port
, ch
& 255))
223 uart_insert_char(&uap
->port
, ch
, UART011_DR_OE
, ch
, flag
);
231 * All the DMA operation mode stuff goes inside this ifdef.
232 * This assumes that you have a generic DMA device interface,
233 * no custom DMA interfaces are supported.
235 #ifdef CONFIG_DMA_ENGINE
237 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
239 static int pl011_sgbuf_init(struct dma_chan
*chan
, struct pl011_sgbuf
*sg
,
240 enum dma_data_direction dir
)
244 sg
->buf
= dma_alloc_coherent(chan
->device
->dev
,
245 PL011_DMA_BUFFER_SIZE
, &dma_addr
, GFP_KERNEL
);
249 sg_init_table(&sg
->sg
, 1);
250 sg_set_page(&sg
->sg
, phys_to_page(dma_addr
),
251 PL011_DMA_BUFFER_SIZE
, offset_in_page(dma_addr
));
252 sg_dma_address(&sg
->sg
) = dma_addr
;
253 sg_dma_len(&sg
->sg
) = PL011_DMA_BUFFER_SIZE
;
258 static void pl011_sgbuf_free(struct dma_chan
*chan
, struct pl011_sgbuf
*sg
,
259 enum dma_data_direction dir
)
262 dma_free_coherent(chan
->device
->dev
,
263 PL011_DMA_BUFFER_SIZE
, sg
->buf
,
264 sg_dma_address(&sg
->sg
));
268 static void pl011_dma_probe(struct uart_amba_port
*uap
)
270 /* DMA is the sole user of the platform data right now */
271 struct amba_pl011_data
*plat
= dev_get_platdata(uap
->port
.dev
);
272 struct device
*dev
= uap
->port
.dev
;
273 struct dma_slave_config tx_conf
= {
274 .dst_addr
= uap
->port
.mapbase
+ UART01x_DR
,
275 .dst_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
,
276 .direction
= DMA_MEM_TO_DEV
,
277 .dst_maxburst
= uap
->fifosize
>> 1,
280 struct dma_chan
*chan
;
283 uap
->dma_probed
= true;
284 chan
= dma_request_slave_channel_reason(dev
, "tx");
286 if (PTR_ERR(chan
) == -EPROBE_DEFER
) {
287 uap
->dma_probed
= false;
291 /* We need platform data */
292 if (!plat
|| !plat
->dma_filter
) {
293 dev_info(uap
->port
.dev
, "no DMA platform data\n");
297 /* Try to acquire a generic DMA engine slave TX channel */
299 dma_cap_set(DMA_SLAVE
, mask
);
301 chan
= dma_request_channel(mask
, plat
->dma_filter
,
304 dev_err(uap
->port
.dev
, "no TX DMA channel!\n");
309 dmaengine_slave_config(chan
, &tx_conf
);
310 uap
->dmatx
.chan
= chan
;
312 dev_info(uap
->port
.dev
, "DMA channel TX %s\n",
313 dma_chan_name(uap
->dmatx
.chan
));
315 /* Optionally make use of an RX channel as well */
316 chan
= dma_request_slave_channel(dev
, "rx");
318 if (!chan
&& plat
->dma_rx_param
) {
319 chan
= dma_request_channel(mask
, plat
->dma_filter
, plat
->dma_rx_param
);
322 dev_err(uap
->port
.dev
, "no RX DMA channel!\n");
328 struct dma_slave_config rx_conf
= {
329 .src_addr
= uap
->port
.mapbase
+ UART01x_DR
,
330 .src_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
,
331 .direction
= DMA_DEV_TO_MEM
,
332 .src_maxburst
= uap
->fifosize
>> 2,
335 struct dma_slave_caps caps
;
338 * Some DMA controllers provide information on their capabilities.
339 * If the controller does, check for suitable residue processing
340 * otherwise assime all is well.
342 if (0 == dma_get_slave_caps(chan
, &caps
)) {
343 if (caps
.residue_granularity
==
344 DMA_RESIDUE_GRANULARITY_DESCRIPTOR
) {
345 dma_release_channel(chan
);
346 dev_info(uap
->port
.dev
,
347 "RX DMA disabled - no residue processing\n");
351 dmaengine_slave_config(chan
, &rx_conf
);
352 uap
->dmarx
.chan
= chan
;
354 uap
->dmarx
.auto_poll_rate
= false;
355 if (plat
&& plat
->dma_rx_poll_enable
) {
356 /* Set poll rate if specified. */
357 if (plat
->dma_rx_poll_rate
) {
358 uap
->dmarx
.auto_poll_rate
= false;
359 uap
->dmarx
.poll_rate
= plat
->dma_rx_poll_rate
;
362 * 100 ms defaults to poll rate if not
363 * specified. This will be adjusted with
364 * the baud rate at set_termios.
366 uap
->dmarx
.auto_poll_rate
= true;
367 uap
->dmarx
.poll_rate
= 100;
369 /* 3 secs defaults poll_timeout if not specified. */
370 if (plat
->dma_rx_poll_timeout
)
371 uap
->dmarx
.poll_timeout
=
372 plat
->dma_rx_poll_timeout
;
374 uap
->dmarx
.poll_timeout
= 3000;
375 } else if (!plat
&& dev
->of_node
) {
376 uap
->dmarx
.auto_poll_rate
= of_property_read_bool(
377 dev
->of_node
, "auto-poll");
378 if (uap
->dmarx
.auto_poll_rate
) {
381 if (0 == of_property_read_u32(dev
->of_node
,
383 uap
->dmarx
.poll_rate
= x
;
385 uap
->dmarx
.poll_rate
= 100;
386 if (0 == of_property_read_u32(dev
->of_node
,
387 "poll-timeout-ms", &x
))
388 uap
->dmarx
.poll_timeout
= x
;
390 uap
->dmarx
.poll_timeout
= 3000;
393 dev_info(uap
->port
.dev
, "DMA channel RX %s\n",
394 dma_chan_name(uap
->dmarx
.chan
));
398 static void pl011_dma_remove(struct uart_amba_port
*uap
)
401 dma_release_channel(uap
->dmatx
.chan
);
403 dma_release_channel(uap
->dmarx
.chan
);
406 /* Forward declare these for the refill routine */
407 static int pl011_dma_tx_refill(struct uart_amba_port
*uap
);
408 static void pl011_start_tx_pio(struct uart_amba_port
*uap
);
411 * The current DMA TX buffer has been sent.
412 * Try to queue up another DMA buffer.
414 static void pl011_dma_tx_callback(void *data
)
416 struct uart_amba_port
*uap
= data
;
417 struct pl011_dmatx_data
*dmatx
= &uap
->dmatx
;
421 spin_lock_irqsave(&uap
->port
.lock
, flags
);
422 if (uap
->dmatx
.queued
)
423 dma_unmap_sg(dmatx
->chan
->device
->dev
, &dmatx
->sg
, 1,
427 uap
->dmacr
= dmacr
& ~UART011_TXDMAE
;
428 writew(uap
->dmacr
, uap
->port
.membase
+ UART011_DMACR
);
431 * If TX DMA was disabled, it means that we've stopped the DMA for
432 * some reason (eg, XOFF received, or we want to send an X-char.)
434 * Note: we need to be careful here of a potential race between DMA
435 * and the rest of the driver - if the driver disables TX DMA while
436 * a TX buffer completing, we must update the tx queued status to
437 * get further refills (hence we check dmacr).
439 if (!(dmacr
& UART011_TXDMAE
) || uart_tx_stopped(&uap
->port
) ||
440 uart_circ_empty(&uap
->port
.state
->xmit
)) {
441 uap
->dmatx
.queued
= false;
442 spin_unlock_irqrestore(&uap
->port
.lock
, flags
);
446 if (pl011_dma_tx_refill(uap
) <= 0)
448 * We didn't queue a DMA buffer for some reason, but we
449 * have data pending to be sent. Re-enable the TX IRQ.
451 pl011_start_tx_pio(uap
);
453 spin_unlock_irqrestore(&uap
->port
.lock
, flags
);
457 * Try to refill the TX DMA buffer.
458 * Locking: called with port lock held and IRQs disabled.
460 * 1 if we queued up a TX DMA buffer.
461 * 0 if we didn't want to handle this by DMA
464 static int pl011_dma_tx_refill(struct uart_amba_port
*uap
)
466 struct pl011_dmatx_data
*dmatx
= &uap
->dmatx
;
467 struct dma_chan
*chan
= dmatx
->chan
;
468 struct dma_device
*dma_dev
= chan
->device
;
469 struct dma_async_tx_descriptor
*desc
;
470 struct circ_buf
*xmit
= &uap
->port
.state
->xmit
;
474 * Try to avoid the overhead involved in using DMA if the
475 * transaction fits in the first half of the FIFO, by using
476 * the standard interrupt handling. This ensures that we
477 * issue a uart_write_wakeup() at the appropriate time.
479 count
= uart_circ_chars_pending(xmit
);
480 if (count
< (uap
->fifosize
>> 1)) {
481 uap
->dmatx
.queued
= false;
486 * Bodge: don't send the last character by DMA, as this
487 * will prevent XON from notifying us to restart DMA.
491 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
492 if (count
> PL011_DMA_BUFFER_SIZE
)
493 count
= PL011_DMA_BUFFER_SIZE
;
495 if (xmit
->tail
< xmit
->head
)
496 memcpy(&dmatx
->buf
[0], &xmit
->buf
[xmit
->tail
], count
);
498 size_t first
= UART_XMIT_SIZE
- xmit
->tail
;
503 second
= count
- first
;
505 memcpy(&dmatx
->buf
[0], &xmit
->buf
[xmit
->tail
], first
);
507 memcpy(&dmatx
->buf
[first
], &xmit
->buf
[0], second
);
510 dmatx
->sg
.length
= count
;
512 if (dma_map_sg(dma_dev
->dev
, &dmatx
->sg
, 1, DMA_TO_DEVICE
) != 1) {
513 uap
->dmatx
.queued
= false;
514 dev_dbg(uap
->port
.dev
, "unable to map TX DMA\n");
518 desc
= dmaengine_prep_slave_sg(chan
, &dmatx
->sg
, 1, DMA_MEM_TO_DEV
,
519 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
521 dma_unmap_sg(dma_dev
->dev
, &dmatx
->sg
, 1, DMA_TO_DEVICE
);
522 uap
->dmatx
.queued
= false;
524 * If DMA cannot be used right now, we complete this
525 * transaction via IRQ and let the TTY layer retry.
527 dev_dbg(uap
->port
.dev
, "TX DMA busy\n");
531 /* Some data to go along to the callback */
532 desc
->callback
= pl011_dma_tx_callback
;
533 desc
->callback_param
= uap
;
535 /* All errors should happen at prepare time */
536 dmaengine_submit(desc
);
538 /* Fire the DMA transaction */
539 dma_dev
->device_issue_pending(chan
);
541 uap
->dmacr
|= UART011_TXDMAE
;
542 writew(uap
->dmacr
, uap
->port
.membase
+ UART011_DMACR
);
543 uap
->dmatx
.queued
= true;
546 * Now we know that DMA will fire, so advance the ring buffer
547 * with the stuff we just dispatched.
549 xmit
->tail
= (xmit
->tail
+ count
) & (UART_XMIT_SIZE
- 1);
550 uap
->port
.icount
.tx
+= count
;
552 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
553 uart_write_wakeup(&uap
->port
);
559 * We received a transmit interrupt without a pending X-char but with
560 * pending characters.
561 * Locking: called with port lock held and IRQs disabled.
563 * false if we want to use PIO to transmit
564 * true if we queued a DMA buffer
566 static bool pl011_dma_tx_irq(struct uart_amba_port
*uap
)
568 if (!uap
->using_tx_dma
)
572 * If we already have a TX buffer queued, but received a
573 * TX interrupt, it will be because we've just sent an X-char.
574 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
576 if (uap
->dmatx
.queued
) {
577 uap
->dmacr
|= UART011_TXDMAE
;
578 writew(uap
->dmacr
, uap
->port
.membase
+ UART011_DMACR
);
579 uap
->im
&= ~UART011_TXIM
;
580 writew(uap
->im
, uap
->port
.membase
+ UART011_IMSC
);
585 * We don't have a TX buffer queued, so try to queue one.
586 * If we successfully queued a buffer, mask the TX IRQ.
588 if (pl011_dma_tx_refill(uap
) > 0) {
589 uap
->im
&= ~UART011_TXIM
;
590 writew(uap
->im
, uap
->port
.membase
+ UART011_IMSC
);
597 * Stop the DMA transmit (eg, due to received XOFF).
598 * Locking: called with port lock held and IRQs disabled.
600 static inline void pl011_dma_tx_stop(struct uart_amba_port
*uap
)
602 if (uap
->dmatx
.queued
) {
603 uap
->dmacr
&= ~UART011_TXDMAE
;
604 writew(uap
->dmacr
, uap
->port
.membase
+ UART011_DMACR
);
609 * Try to start a DMA transmit, or in the case of an XON/OFF
610 * character queued for send, try to get that character out ASAP.
611 * Locking: called with port lock held and IRQs disabled.
613 * false if we want the TX IRQ to be enabled
614 * true if we have a buffer queued
616 static inline bool pl011_dma_tx_start(struct uart_amba_port
*uap
)
620 if (!uap
->using_tx_dma
)
623 if (!uap
->port
.x_char
) {
624 /* no X-char, try to push chars out in DMA mode */
627 if (!uap
->dmatx
.queued
) {
628 if (pl011_dma_tx_refill(uap
) > 0) {
629 uap
->im
&= ~UART011_TXIM
;
630 writew(uap
->im
, uap
->port
.membase
+
634 } else if (!(uap
->dmacr
& UART011_TXDMAE
)) {
635 uap
->dmacr
|= UART011_TXDMAE
;
637 uap
->port
.membase
+ UART011_DMACR
);
643 * We have an X-char to send. Disable DMA to prevent it loading
644 * the TX fifo, and then see if we can stuff it into the FIFO.
647 uap
->dmacr
&= ~UART011_TXDMAE
;
648 writew(uap
->dmacr
, uap
->port
.membase
+ UART011_DMACR
);
650 if (readw(uap
->port
.membase
+ UART01x_FR
) & UART01x_FR_TXFF
) {
652 * No space in the FIFO, so enable the transmit interrupt
653 * so we know when there is space. Note that once we've
654 * loaded the character, we should just re-enable DMA.
659 writew(uap
->port
.x_char
, uap
->port
.membase
+ UART01x_DR
);
660 uap
->port
.icount
.tx
++;
661 uap
->port
.x_char
= 0;
663 /* Success - restore the DMA state */
665 writew(dmacr
, uap
->port
.membase
+ UART011_DMACR
);
671 * Flush the transmit buffer.
672 * Locking: called with port lock held and IRQs disabled.
674 static void pl011_dma_flush_buffer(struct uart_port
*port
)
675 __releases(&uap
->port
.lock
)
676 __acquires(&uap
->port
.lock
)
678 struct uart_amba_port
*uap
=
679 container_of(port
, struct uart_amba_port
, port
);
681 if (!uap
->using_tx_dma
)
684 /* Avoid deadlock with the DMA engine callback */
685 spin_unlock(&uap
->port
.lock
);
686 dmaengine_terminate_all(uap
->dmatx
.chan
);
687 spin_lock(&uap
->port
.lock
);
688 if (uap
->dmatx
.queued
) {
689 dma_unmap_sg(uap
->dmatx
.chan
->device
->dev
, &uap
->dmatx
.sg
, 1,
691 uap
->dmatx
.queued
= false;
692 uap
->dmacr
&= ~UART011_TXDMAE
;
693 writew(uap
->dmacr
, uap
->port
.membase
+ UART011_DMACR
);
697 static void pl011_dma_rx_callback(void *data
);
699 static int pl011_dma_rx_trigger_dma(struct uart_amba_port
*uap
)
701 struct dma_chan
*rxchan
= uap
->dmarx
.chan
;
702 struct pl011_dmarx_data
*dmarx
= &uap
->dmarx
;
703 struct dma_async_tx_descriptor
*desc
;
704 struct pl011_sgbuf
*sgbuf
;
709 /* Start the RX DMA job */
710 sgbuf
= uap
->dmarx
.use_buf_b
?
711 &uap
->dmarx
.sgbuf_b
: &uap
->dmarx
.sgbuf_a
;
712 desc
= dmaengine_prep_slave_sg(rxchan
, &sgbuf
->sg
, 1,
714 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
716 * If the DMA engine is busy and cannot prepare a
717 * channel, no big deal, the driver will fall back
718 * to interrupt mode as a result of this error code.
721 uap
->dmarx
.running
= false;
722 dmaengine_terminate_all(rxchan
);
726 /* Some data to go along to the callback */
727 desc
->callback
= pl011_dma_rx_callback
;
728 desc
->callback_param
= uap
;
729 dmarx
->cookie
= dmaengine_submit(desc
);
730 dma_async_issue_pending(rxchan
);
732 uap
->dmacr
|= UART011_RXDMAE
;
733 writew(uap
->dmacr
, uap
->port
.membase
+ UART011_DMACR
);
734 uap
->dmarx
.running
= true;
736 uap
->im
&= ~UART011_RXIM
;
737 writew(uap
->im
, uap
->port
.membase
+ UART011_IMSC
);
743 * This is called when either the DMA job is complete, or
744 * the FIFO timeout interrupt occurred. This must be called
745 * with the port spinlock uap->port.lock held.
747 static void pl011_dma_rx_chars(struct uart_amba_port
*uap
,
748 u32 pending
, bool use_buf_b
,
751 struct tty_port
*port
= &uap
->port
.state
->port
;
752 struct pl011_sgbuf
*sgbuf
= use_buf_b
?
753 &uap
->dmarx
.sgbuf_b
: &uap
->dmarx
.sgbuf_a
;
755 u32 fifotaken
= 0; /* only used for vdbg() */
757 struct pl011_dmarx_data
*dmarx
= &uap
->dmarx
;
760 if (uap
->dmarx
.poll_rate
) {
761 /* The data can be taken by polling */
762 dmataken
= sgbuf
->sg
.length
- dmarx
->last_residue
;
763 /* Recalculate the pending size */
764 if (pending
>= dmataken
)
768 /* Pick the remain data from the DMA */
772 * First take all chars in the DMA pipe, then look in the FIFO.
773 * Note that tty_insert_flip_buf() tries to take as many chars
776 dma_count
= tty_insert_flip_string(port
, sgbuf
->buf
+ dmataken
,
779 uap
->port
.icount
.rx
+= dma_count
;
780 if (dma_count
< pending
)
781 dev_warn(uap
->port
.dev
,
782 "couldn't insert all characters (TTY is full?)\n");
785 /* Reset the last_residue for Rx DMA poll */
786 if (uap
->dmarx
.poll_rate
)
787 dmarx
->last_residue
= sgbuf
->sg
.length
;
790 * Only continue with trying to read the FIFO if all DMA chars have
793 if (dma_count
== pending
&& readfifo
) {
794 /* Clear any error flags */
795 writew(UART011_OEIS
| UART011_BEIS
| UART011_PEIS
| UART011_FEIS
,
796 uap
->port
.membase
+ UART011_ICR
);
799 * If we read all the DMA'd characters, and we had an
800 * incomplete buffer, that could be due to an rx error, or
801 * maybe we just timed out. Read any pending chars and check
804 * Error conditions will only occur in the FIFO, these will
805 * trigger an immediate interrupt and stop the DMA job, so we
806 * will always find the error in the FIFO, never in the DMA
809 fifotaken
= pl011_fifo_to_tty(uap
);
812 spin_unlock(&uap
->port
.lock
);
813 dev_vdbg(uap
->port
.dev
,
814 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
815 dma_count
, fifotaken
);
816 tty_flip_buffer_push(port
);
817 spin_lock(&uap
->port
.lock
);
820 static void pl011_dma_rx_irq(struct uart_amba_port
*uap
)
822 struct pl011_dmarx_data
*dmarx
= &uap
->dmarx
;
823 struct dma_chan
*rxchan
= dmarx
->chan
;
824 struct pl011_sgbuf
*sgbuf
= dmarx
->use_buf_b
?
825 &dmarx
->sgbuf_b
: &dmarx
->sgbuf_a
;
827 struct dma_tx_state state
;
828 enum dma_status dmastat
;
831 * Pause the transfer so we can trust the current counter,
832 * do this before we pause the PL011 block, else we may
835 if (dmaengine_pause(rxchan
))
836 dev_err(uap
->port
.dev
, "unable to pause DMA transfer\n");
837 dmastat
= rxchan
->device
->device_tx_status(rxchan
,
838 dmarx
->cookie
, &state
);
839 if (dmastat
!= DMA_PAUSED
)
840 dev_err(uap
->port
.dev
, "unable to pause DMA transfer\n");
842 /* Disable RX DMA - incoming data will wait in the FIFO */
843 uap
->dmacr
&= ~UART011_RXDMAE
;
844 writew(uap
->dmacr
, uap
->port
.membase
+ UART011_DMACR
);
845 uap
->dmarx
.running
= false;
847 pending
= sgbuf
->sg
.length
- state
.residue
;
848 BUG_ON(pending
> PL011_DMA_BUFFER_SIZE
);
849 /* Then we terminate the transfer - we now know our residue */
850 dmaengine_terminate_all(rxchan
);
853 * This will take the chars we have so far and insert
854 * into the framework.
856 pl011_dma_rx_chars(uap
, pending
, dmarx
->use_buf_b
, true);
858 /* Switch buffer & re-trigger DMA job */
859 dmarx
->use_buf_b
= !dmarx
->use_buf_b
;
860 if (pl011_dma_rx_trigger_dma(uap
)) {
861 dev_dbg(uap
->port
.dev
, "could not retrigger RX DMA job "
862 "fall back to interrupt mode\n");
863 uap
->im
|= UART011_RXIM
;
864 writew(uap
->im
, uap
->port
.membase
+ UART011_IMSC
);
868 static void pl011_dma_rx_callback(void *data
)
870 struct uart_amba_port
*uap
= data
;
871 struct pl011_dmarx_data
*dmarx
= &uap
->dmarx
;
872 struct dma_chan
*rxchan
= dmarx
->chan
;
873 bool lastbuf
= dmarx
->use_buf_b
;
874 struct pl011_sgbuf
*sgbuf
= dmarx
->use_buf_b
?
875 &dmarx
->sgbuf_b
: &dmarx
->sgbuf_a
;
877 struct dma_tx_state state
;
881 * This completion interrupt occurs typically when the
882 * RX buffer is totally stuffed but no timeout has yet
883 * occurred. When that happens, we just want the RX
884 * routine to flush out the secondary DMA buffer while
885 * we immediately trigger the next DMA job.
887 spin_lock_irq(&uap
->port
.lock
);
889 * Rx data can be taken by the UART interrupts during
890 * the DMA irq handler. So we check the residue here.
892 rxchan
->device
->device_tx_status(rxchan
, dmarx
->cookie
, &state
);
893 pending
= sgbuf
->sg
.length
- state
.residue
;
894 BUG_ON(pending
> PL011_DMA_BUFFER_SIZE
);
895 /* Then we terminate the transfer - we now know our residue */
896 dmaengine_terminate_all(rxchan
);
898 uap
->dmarx
.running
= false;
899 dmarx
->use_buf_b
= !lastbuf
;
900 ret
= pl011_dma_rx_trigger_dma(uap
);
902 pl011_dma_rx_chars(uap
, pending
, lastbuf
, false);
903 spin_unlock_irq(&uap
->port
.lock
);
905 * Do this check after we picked the DMA chars so we don't
906 * get some IRQ immediately from RX.
909 dev_dbg(uap
->port
.dev
, "could not retrigger RX DMA job "
910 "fall back to interrupt mode\n");
911 uap
->im
|= UART011_RXIM
;
912 writew(uap
->im
, uap
->port
.membase
+ UART011_IMSC
);
917 * Stop accepting received characters, when we're shutting down or
918 * suspending this port.
919 * Locking: called with port lock held and IRQs disabled.
921 static inline void pl011_dma_rx_stop(struct uart_amba_port
*uap
)
923 /* FIXME. Just disable the DMA enable */
924 uap
->dmacr
&= ~UART011_RXDMAE
;
925 writew(uap
->dmacr
, uap
->port
.membase
+ UART011_DMACR
);
929 * Timer handler for Rx DMA polling.
930 * Every polling, It checks the residue in the dma buffer and transfer
931 * data to the tty. Also, last_residue is updated for the next polling.
933 static void pl011_dma_rx_poll(unsigned long args
)
935 struct uart_amba_port
*uap
= (struct uart_amba_port
*)args
;
936 struct tty_port
*port
= &uap
->port
.state
->port
;
937 struct pl011_dmarx_data
*dmarx
= &uap
->dmarx
;
938 struct dma_chan
*rxchan
= uap
->dmarx
.chan
;
939 unsigned long flags
= 0;
940 unsigned int dmataken
= 0;
941 unsigned int size
= 0;
942 struct pl011_sgbuf
*sgbuf
;
944 struct dma_tx_state state
;
946 sgbuf
= dmarx
->use_buf_b
? &uap
->dmarx
.sgbuf_b
: &uap
->dmarx
.sgbuf_a
;
947 rxchan
->device
->device_tx_status(rxchan
, dmarx
->cookie
, &state
);
948 if (likely(state
.residue
< dmarx
->last_residue
)) {
949 dmataken
= sgbuf
->sg
.length
- dmarx
->last_residue
;
950 size
= dmarx
->last_residue
- state
.residue
;
951 dma_count
= tty_insert_flip_string(port
, sgbuf
->buf
+ dmataken
,
953 if (dma_count
== size
)
954 dmarx
->last_residue
= state
.residue
;
955 dmarx
->last_jiffies
= jiffies
;
957 tty_flip_buffer_push(port
);
960 * If no data is received in poll_timeout, the driver will fall back
961 * to interrupt mode. We will retrigger DMA at the first interrupt.
963 if (jiffies_to_msecs(jiffies
- dmarx
->last_jiffies
)
964 > uap
->dmarx
.poll_timeout
) {
966 spin_lock_irqsave(&uap
->port
.lock
, flags
);
967 pl011_dma_rx_stop(uap
);
968 uap
->im
|= UART011_RXIM
;
969 writew(uap
->im
, uap
->port
.membase
+ UART011_IMSC
);
970 spin_unlock_irqrestore(&uap
->port
.lock
, flags
);
972 uap
->dmarx
.running
= false;
973 dmaengine_terminate_all(rxchan
);
974 del_timer(&uap
->dmarx
.timer
);
976 mod_timer(&uap
->dmarx
.timer
,
977 jiffies
+ msecs_to_jiffies(uap
->dmarx
.poll_rate
));
981 static void pl011_dma_startup(struct uart_amba_port
*uap
)
985 if (!uap
->dma_probed
)
986 pl011_dma_probe(uap
);
988 if (!uap
->dmatx
.chan
)
991 uap
->dmatx
.buf
= kmalloc(PL011_DMA_BUFFER_SIZE
, GFP_KERNEL
| __GFP_DMA
);
992 if (!uap
->dmatx
.buf
) {
993 dev_err(uap
->port
.dev
, "no memory for DMA TX buffer\n");
994 uap
->port
.fifosize
= uap
->fifosize
;
998 sg_init_one(&uap
->dmatx
.sg
, uap
->dmatx
.buf
, PL011_DMA_BUFFER_SIZE
);
1000 /* The DMA buffer is now the FIFO the TTY subsystem can use */
1001 uap
->port
.fifosize
= PL011_DMA_BUFFER_SIZE
;
1002 uap
->using_tx_dma
= true;
1004 if (!uap
->dmarx
.chan
)
1007 /* Allocate and map DMA RX buffers */
1008 ret
= pl011_sgbuf_init(uap
->dmarx
.chan
, &uap
->dmarx
.sgbuf_a
,
1011 dev_err(uap
->port
.dev
, "failed to init DMA %s: %d\n",
1012 "RX buffer A", ret
);
1016 ret
= pl011_sgbuf_init(uap
->dmarx
.chan
, &uap
->dmarx
.sgbuf_b
,
1019 dev_err(uap
->port
.dev
, "failed to init DMA %s: %d\n",
1020 "RX buffer B", ret
);
1021 pl011_sgbuf_free(uap
->dmarx
.chan
, &uap
->dmarx
.sgbuf_a
,
1026 uap
->using_rx_dma
= true;
1029 /* Turn on DMA error (RX/TX will be enabled on demand) */
1030 uap
->dmacr
|= UART011_DMAONERR
;
1031 writew(uap
->dmacr
, uap
->port
.membase
+ UART011_DMACR
);
1034 * ST Micro variants has some specific dma burst threshold
1035 * compensation. Set this to 16 bytes, so burst will only
1036 * be issued above/below 16 bytes.
1038 if (uap
->vendor
->dma_threshold
)
1039 writew(ST_UART011_DMAWM_RX_16
| ST_UART011_DMAWM_TX_16
,
1040 uap
->port
.membase
+ ST_UART011_DMAWM
);
1042 if (uap
->using_rx_dma
) {
1043 if (pl011_dma_rx_trigger_dma(uap
))
1044 dev_dbg(uap
->port
.dev
, "could not trigger initial "
1045 "RX DMA job, fall back to interrupt mode\n");
1046 if (uap
->dmarx
.poll_rate
) {
1047 init_timer(&(uap
->dmarx
.timer
));
1048 uap
->dmarx
.timer
.function
= pl011_dma_rx_poll
;
1049 uap
->dmarx
.timer
.data
= (unsigned long)uap
;
1050 mod_timer(&uap
->dmarx
.timer
,
1052 msecs_to_jiffies(uap
->dmarx
.poll_rate
));
1053 uap
->dmarx
.last_residue
= PL011_DMA_BUFFER_SIZE
;
1054 uap
->dmarx
.last_jiffies
= jiffies
;
1059 static void pl011_dma_shutdown(struct uart_amba_port
*uap
)
1061 if (!(uap
->using_tx_dma
|| uap
->using_rx_dma
))
1064 /* Disable RX and TX DMA */
1065 while (readw(uap
->port
.membase
+ UART01x_FR
) & UART01x_FR_BUSY
)
1068 spin_lock_irq(&uap
->port
.lock
);
1069 uap
->dmacr
&= ~(UART011_DMAONERR
| UART011_RXDMAE
| UART011_TXDMAE
);
1070 writew(uap
->dmacr
, uap
->port
.membase
+ UART011_DMACR
);
1071 spin_unlock_irq(&uap
->port
.lock
);
1073 if (uap
->using_tx_dma
) {
1074 /* In theory, this should already be done by pl011_dma_flush_buffer */
1075 dmaengine_terminate_all(uap
->dmatx
.chan
);
1076 if (uap
->dmatx
.queued
) {
1077 dma_unmap_sg(uap
->dmatx
.chan
->device
->dev
, &uap
->dmatx
.sg
, 1,
1079 uap
->dmatx
.queued
= false;
1082 kfree(uap
->dmatx
.buf
);
1083 uap
->using_tx_dma
= false;
1086 if (uap
->using_rx_dma
) {
1087 dmaengine_terminate_all(uap
->dmarx
.chan
);
1088 /* Clean up the RX DMA */
1089 pl011_sgbuf_free(uap
->dmarx
.chan
, &uap
->dmarx
.sgbuf_a
, DMA_FROM_DEVICE
);
1090 pl011_sgbuf_free(uap
->dmarx
.chan
, &uap
->dmarx
.sgbuf_b
, DMA_FROM_DEVICE
);
1091 if (uap
->dmarx
.poll_rate
)
1092 del_timer_sync(&uap
->dmarx
.timer
);
1093 uap
->using_rx_dma
= false;
1097 static inline bool pl011_dma_rx_available(struct uart_amba_port
*uap
)
1099 return uap
->using_rx_dma
;
1102 static inline bool pl011_dma_rx_running(struct uart_amba_port
*uap
)
1104 return uap
->using_rx_dma
&& uap
->dmarx
.running
;
1108 /* Blank functions if the DMA engine is not available */
1109 static inline void pl011_dma_probe(struct uart_amba_port
*uap
)
1113 static inline void pl011_dma_remove(struct uart_amba_port
*uap
)
1117 static inline void pl011_dma_startup(struct uart_amba_port
*uap
)
1121 static inline void pl011_dma_shutdown(struct uart_amba_port
*uap
)
1125 static inline bool pl011_dma_tx_irq(struct uart_amba_port
*uap
)
1130 static inline void pl011_dma_tx_stop(struct uart_amba_port
*uap
)
1134 static inline bool pl011_dma_tx_start(struct uart_amba_port
*uap
)
1139 static inline void pl011_dma_rx_irq(struct uart_amba_port
*uap
)
1143 static inline void pl011_dma_rx_stop(struct uart_amba_port
*uap
)
1147 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port
*uap
)
1152 static inline bool pl011_dma_rx_available(struct uart_amba_port
*uap
)
1157 static inline bool pl011_dma_rx_running(struct uart_amba_port
*uap
)
1162 #define pl011_dma_flush_buffer NULL
1165 static void pl011_stop_tx(struct uart_port
*port
)
1167 struct uart_amba_port
*uap
=
1168 container_of(port
, struct uart_amba_port
, port
);
1170 uap
->im
&= ~UART011_TXIM
;
1171 writew(uap
->im
, uap
->port
.membase
+ UART011_IMSC
);
1172 pl011_dma_tx_stop(uap
);
1175 static bool pl011_tx_chars(struct uart_amba_port
*uap
);
1177 /* Start TX with programmed I/O only (no DMA) */
1178 static void pl011_start_tx_pio(struct uart_amba_port
*uap
)
1180 uap
->im
|= UART011_TXIM
;
1181 writew(uap
->im
, uap
->port
.membase
+ UART011_IMSC
);
1182 if (!uap
->tx_irq_seen
)
1183 pl011_tx_chars(uap
);
1186 static void pl011_start_tx(struct uart_port
*port
)
1188 struct uart_amba_port
*uap
=
1189 container_of(port
, struct uart_amba_port
, port
);
1191 if (!pl011_dma_tx_start(uap
))
1192 pl011_start_tx_pio(uap
);
1195 static void pl011_stop_rx(struct uart_port
*port
)
1197 struct uart_amba_port
*uap
=
1198 container_of(port
, struct uart_amba_port
, port
);
1200 uap
->im
&= ~(UART011_RXIM
|UART011_RTIM
|UART011_FEIM
|
1201 UART011_PEIM
|UART011_BEIM
|UART011_OEIM
);
1202 writew(uap
->im
, uap
->port
.membase
+ UART011_IMSC
);
1204 pl011_dma_rx_stop(uap
);
1207 static void pl011_enable_ms(struct uart_port
*port
)
1209 struct uart_amba_port
*uap
=
1210 container_of(port
, struct uart_amba_port
, port
);
1212 uap
->im
|= UART011_RIMIM
|UART011_CTSMIM
|UART011_DCDMIM
|UART011_DSRMIM
;
1213 writew(uap
->im
, uap
->port
.membase
+ UART011_IMSC
);
1216 static void pl011_rx_chars(struct uart_amba_port
*uap
)
1217 __releases(&uap
->port
.lock
)
1218 __acquires(&uap
->port
.lock
)
1220 pl011_fifo_to_tty(uap
);
1222 spin_unlock(&uap
->port
.lock
);
1223 tty_flip_buffer_push(&uap
->port
.state
->port
);
1225 * If we were temporarily out of DMA mode for a while,
1226 * attempt to switch back to DMA mode again.
1228 if (pl011_dma_rx_available(uap
)) {
1229 if (pl011_dma_rx_trigger_dma(uap
)) {
1230 dev_dbg(uap
->port
.dev
, "could not trigger RX DMA job "
1231 "fall back to interrupt mode again\n");
1232 uap
->im
|= UART011_RXIM
;
1233 writew(uap
->im
, uap
->port
.membase
+ UART011_IMSC
);
1235 #ifdef CONFIG_DMA_ENGINE
1236 /* Start Rx DMA poll */
1237 if (uap
->dmarx
.poll_rate
) {
1238 uap
->dmarx
.last_jiffies
= jiffies
;
1239 uap
->dmarx
.last_residue
= PL011_DMA_BUFFER_SIZE
;
1240 mod_timer(&uap
->dmarx
.timer
,
1242 msecs_to_jiffies(uap
->dmarx
.poll_rate
));
1247 spin_lock(&uap
->port
.lock
);
1251 * Transmit a character
1253 * Returns true if the character was successfully queued to the FIFO.
1254 * Returns false otherwise.
1256 static bool pl011_tx_char(struct uart_amba_port
*uap
, unsigned char c
)
1258 if (readw(uap
->port
.membase
+ UART01x_FR
) & UART01x_FR_TXFF
)
1259 return false; /* unable to transmit character */
1261 writew(c
, uap
->port
.membase
+ UART01x_DR
);
1262 uap
->port
.icount
.tx
++;
1267 static bool pl011_tx_chars(struct uart_amba_port
*uap
)
1269 struct circ_buf
*xmit
= &uap
->port
.state
->xmit
;
1272 if (unlikely(uap
->tx_irq_seen
< 2))
1274 * Initial FIFO fill level unknown: we must check TXFF
1275 * after each write, so just try to fill up the FIFO.
1277 count
= uap
->fifosize
;
1278 else /* tx_irq_seen >= 2 */
1280 * FIFO initially at least half-empty, so we can simply
1281 * write half the FIFO without polling TXFF.
1283 * Note: the *first* TX IRQ can still race with
1284 * pl011_start_tx_pio(), which can result in the FIFO
1285 * being fuller than expected in that case.
1287 count
= uap
->fifosize
>> 1;
1290 * If the FIFO is full we're guaranteed a TX IRQ at some later point,
1291 * and can't transmit immediately in any case:
1293 if (unlikely(uap
->tx_irq_seen
< 2 &&
1294 readw(uap
->port
.membase
+ UART01x_FR
) & UART01x_FR_TXFF
))
1297 if (uap
->port
.x_char
) {
1298 if (!pl011_tx_char(uap
, uap
->port
.x_char
))
1300 uap
->port
.x_char
= 0;
1303 if (uart_circ_empty(xmit
) || uart_tx_stopped(&uap
->port
)) {
1304 pl011_stop_tx(&uap
->port
);
1308 /* If we are using DMA mode, try to send some characters. */
1309 if (pl011_dma_tx_irq(uap
))
1312 while (count
-- > 0 && pl011_tx_char(uap
, xmit
->buf
[xmit
->tail
])) {
1313 xmit
->tail
= (xmit
->tail
+ 1) & (UART_XMIT_SIZE
- 1);
1314 if (uart_circ_empty(xmit
))
1318 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
1319 uart_write_wakeup(&uap
->port
);
1321 if (uart_circ_empty(xmit
)) {
1322 pl011_stop_tx(&uap
->port
);
1326 if (unlikely(!uap
->tx_irq_seen
))
1327 schedule_delayed_work(&uap
->tx_softirq_work
, uap
->port
.timeout
);
1333 static void pl011_modem_status(struct uart_amba_port
*uap
)
1335 unsigned int status
, delta
;
1337 status
= readw(uap
->port
.membase
+ UART01x_FR
) & UART01x_FR_MODEM_ANY
;
1339 delta
= status
^ uap
->old_status
;
1340 uap
->old_status
= status
;
1345 if (delta
& UART01x_FR_DCD
)
1346 uart_handle_dcd_change(&uap
->port
, status
& UART01x_FR_DCD
);
1348 if (delta
& UART01x_FR_DSR
)
1349 uap
->port
.icount
.dsr
++;
1351 if (delta
& UART01x_FR_CTS
)
1352 uart_handle_cts_change(&uap
->port
, status
& UART01x_FR_CTS
);
1354 wake_up_interruptible(&uap
->port
.state
->port
.delta_msr_wait
);
1357 static void pl011_tx_softirq(struct work_struct
*work
)
1359 struct delayed_work
*dwork
= to_delayed_work(work
);
1360 struct uart_amba_port
*uap
=
1361 container_of(dwork
, struct uart_amba_port
, tx_softirq_work
);
1363 spin_lock_irq(&uap
->port
.lock
);
1364 while (pl011_tx_chars(uap
)) ;
1365 spin_unlock_irq(&uap
->port
.lock
);
1368 static void pl011_tx_irq_seen(struct uart_amba_port
*uap
)
1370 if (likely(uap
->tx_irq_seen
> 1))
1374 if (uap
->tx_irq_seen
< 2)
1376 cancel_delayed_work(&uap
->tx_softirq_work
);
1379 static irqreturn_t
pl011_int(int irq
, void *dev_id
)
1381 struct uart_amba_port
*uap
= dev_id
;
1382 unsigned long flags
;
1383 unsigned int status
, pass_counter
= AMBA_ISR_PASS_LIMIT
;
1385 unsigned int dummy_read
;
1387 spin_lock_irqsave(&uap
->port
.lock
, flags
);
1388 status
= readw(uap
->port
.membase
+ UART011_MIS
);
1391 if (uap
->vendor
->cts_event_workaround
) {
1392 /* workaround to make sure that all bits are unlocked.. */
1393 writew(0x00, uap
->port
.membase
+ UART011_ICR
);
1396 * WA: introduce 26ns(1 uart clk) delay before W1C;
1397 * single apb access will incur 2 pclk(133.12Mhz) delay,
1398 * so add 2 dummy reads
1400 dummy_read
= readw(uap
->port
.membase
+ UART011_ICR
);
1401 dummy_read
= readw(uap
->port
.membase
+ UART011_ICR
);
1404 writew(status
& ~(UART011_TXIS
|UART011_RTIS
|
1406 uap
->port
.membase
+ UART011_ICR
);
1408 if (status
& (UART011_RTIS
|UART011_RXIS
)) {
1409 if (pl011_dma_rx_running(uap
))
1410 pl011_dma_rx_irq(uap
);
1412 pl011_rx_chars(uap
);
1414 if (status
& (UART011_DSRMIS
|UART011_DCDMIS
|
1415 UART011_CTSMIS
|UART011_RIMIS
))
1416 pl011_modem_status(uap
);
1417 if (status
& UART011_TXIS
) {
1418 pl011_tx_irq_seen(uap
);
1419 pl011_tx_chars(uap
);
1422 if (pass_counter
-- == 0)
1425 status
= readw(uap
->port
.membase
+ UART011_MIS
);
1426 } while (status
!= 0);
1430 spin_unlock_irqrestore(&uap
->port
.lock
, flags
);
1432 return IRQ_RETVAL(handled
);
1435 static unsigned int pl011_tx_empty(struct uart_port
*port
)
1437 struct uart_amba_port
*uap
=
1438 container_of(port
, struct uart_amba_port
, port
);
1439 unsigned int status
= readw(uap
->port
.membase
+ UART01x_FR
);
1440 return status
& (UART01x_FR_BUSY
|UART01x_FR_TXFF
) ? 0 : TIOCSER_TEMT
;
1443 static unsigned int pl011_get_mctrl(struct uart_port
*port
)
1445 struct uart_amba_port
*uap
=
1446 container_of(port
, struct uart_amba_port
, port
);
1447 unsigned int result
= 0;
1448 unsigned int status
= readw(uap
->port
.membase
+ UART01x_FR
);
1450 #define TIOCMBIT(uartbit, tiocmbit) \
1451 if (status & uartbit) \
1454 TIOCMBIT(UART01x_FR_DCD
, TIOCM_CAR
);
1455 TIOCMBIT(UART01x_FR_DSR
, TIOCM_DSR
);
1456 TIOCMBIT(UART01x_FR_CTS
, TIOCM_CTS
);
1457 TIOCMBIT(UART011_FR_RI
, TIOCM_RNG
);
1462 static void pl011_set_mctrl(struct uart_port
*port
, unsigned int mctrl
)
1464 struct uart_amba_port
*uap
=
1465 container_of(port
, struct uart_amba_port
, port
);
1468 cr
= readw(uap
->port
.membase
+ UART011_CR
);
1470 #define TIOCMBIT(tiocmbit, uartbit) \
1471 if (mctrl & tiocmbit) \
1476 TIOCMBIT(TIOCM_RTS
, UART011_CR_RTS
);
1477 TIOCMBIT(TIOCM_DTR
, UART011_CR_DTR
);
1478 TIOCMBIT(TIOCM_OUT1
, UART011_CR_OUT1
);
1479 TIOCMBIT(TIOCM_OUT2
, UART011_CR_OUT2
);
1480 TIOCMBIT(TIOCM_LOOP
, UART011_CR_LBE
);
1483 /* We need to disable auto-RTS if we want to turn RTS off */
1484 TIOCMBIT(TIOCM_RTS
, UART011_CR_RTSEN
);
1488 writew(cr
, uap
->port
.membase
+ UART011_CR
);
1491 static void pl011_break_ctl(struct uart_port
*port
, int break_state
)
1493 struct uart_amba_port
*uap
=
1494 container_of(port
, struct uart_amba_port
, port
);
1495 unsigned long flags
;
1498 spin_lock_irqsave(&uap
->port
.lock
, flags
);
1499 lcr_h
= readw(uap
->port
.membase
+ uap
->lcrh_tx
);
1500 if (break_state
== -1)
1501 lcr_h
|= UART01x_LCRH_BRK
;
1503 lcr_h
&= ~UART01x_LCRH_BRK
;
1504 writew(lcr_h
, uap
->port
.membase
+ uap
->lcrh_tx
);
1505 spin_unlock_irqrestore(&uap
->port
.lock
, flags
);
1508 #ifdef CONFIG_CONSOLE_POLL
1510 static void pl011_quiesce_irqs(struct uart_port
*port
)
1512 struct uart_amba_port
*uap
=
1513 container_of(port
, struct uart_amba_port
, port
);
1514 unsigned char __iomem
*regs
= uap
->port
.membase
;
1516 writew(readw(regs
+ UART011_MIS
), regs
+ UART011_ICR
);
1518 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1519 * we simply mask it. start_tx() will unmask it.
1521 * Note we can race with start_tx(), and if the race happens, the
1522 * polling user might get another interrupt just after we clear it.
1523 * But it should be OK and can happen even w/o the race, e.g.
1524 * controller immediately got some new data and raised the IRQ.
1526 * And whoever uses polling routines assumes that it manages the device
1527 * (including tx queue), so we're also fine with start_tx()'s caller
1530 writew(readw(regs
+ UART011_IMSC
) & ~UART011_TXIM
, regs
+ UART011_IMSC
);
1533 static int pl011_get_poll_char(struct uart_port
*port
)
1535 struct uart_amba_port
*uap
=
1536 container_of(port
, struct uart_amba_port
, port
);
1537 unsigned int status
;
1540 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1543 pl011_quiesce_irqs(port
);
1545 status
= readw(uap
->port
.membase
+ UART01x_FR
);
1546 if (status
& UART01x_FR_RXFE
)
1547 return NO_POLL_CHAR
;
1549 return readw(uap
->port
.membase
+ UART01x_DR
);
1552 static void pl011_put_poll_char(struct uart_port
*port
,
1555 struct uart_amba_port
*uap
=
1556 container_of(port
, struct uart_amba_port
, port
);
1558 while (readw(uap
->port
.membase
+ UART01x_FR
) & UART01x_FR_TXFF
)
1561 writew(ch
, uap
->port
.membase
+ UART01x_DR
);
1564 #endif /* CONFIG_CONSOLE_POLL */
1566 static int pl011_hwinit(struct uart_port
*port
)
1568 struct uart_amba_port
*uap
=
1569 container_of(port
, struct uart_amba_port
, port
);
1572 /* Optionaly enable pins to be muxed in and configured */
1573 pinctrl_pm_select_default_state(port
->dev
);
1576 * Try to enable the clock producer.
1578 retval
= clk_prepare_enable(uap
->clk
);
1582 uap
->port
.uartclk
= clk_get_rate(uap
->clk
);
1584 /* Clear pending error and receive interrupts */
1585 writew(UART011_OEIS
| UART011_BEIS
| UART011_PEIS
| UART011_FEIS
|
1586 UART011_RTIS
| UART011_RXIS
, uap
->port
.membase
+ UART011_ICR
);
1589 * Save interrupts enable mask, and enable RX interrupts in case if
1590 * the interrupt is used for NMI entry.
1592 uap
->im
= readw(uap
->port
.membase
+ UART011_IMSC
);
1593 writew(UART011_RTIM
| UART011_RXIM
, uap
->port
.membase
+ UART011_IMSC
);
1595 if (dev_get_platdata(uap
->port
.dev
)) {
1596 struct amba_pl011_data
*plat
;
1598 plat
= dev_get_platdata(uap
->port
.dev
);
1605 static void pl011_write_lcr_h(struct uart_amba_port
*uap
, unsigned int lcr_h
)
1607 writew(lcr_h
, uap
->port
.membase
+ uap
->lcrh_rx
);
1608 if (uap
->lcrh_rx
!= uap
->lcrh_tx
) {
1611 * Wait 10 PCLKs before writing LCRH_TX register,
1612 * to get this delay write read only register 10 times
1614 for (i
= 0; i
< 10; ++i
)
1615 writew(0xff, uap
->port
.membase
+ UART011_MIS
);
1616 writew(lcr_h
, uap
->port
.membase
+ uap
->lcrh_tx
);
1620 static int pl011_startup(struct uart_port
*port
)
1622 struct uart_amba_port
*uap
=
1623 container_of(port
, struct uart_amba_port
, port
);
1627 retval
= pl011_hwinit(port
);
1631 writew(uap
->im
, uap
->port
.membase
+ UART011_IMSC
);
1636 retval
= request_irq(uap
->port
.irq
, pl011_int
, 0, "uart-pl011", uap
);
1640 writew(uap
->vendor
->ifls
, uap
->port
.membase
+ UART011_IFLS
);
1642 /* Assume that TX IRQ doesn't work until we see one: */
1643 uap
->tx_irq_seen
= 0;
1645 spin_lock_irq(&uap
->port
.lock
);
1647 /* restore RTS and DTR */
1648 cr
= uap
->old_cr
& (UART011_CR_RTS
| UART011_CR_DTR
);
1649 cr
|= UART01x_CR_UARTEN
| UART011_CR_RXE
| UART011_CR_TXE
;
1650 writew(cr
, uap
->port
.membase
+ UART011_CR
);
1652 spin_unlock_irq(&uap
->port
.lock
);
1655 * initialise the old status of the modem signals
1657 uap
->old_status
= readw(uap
->port
.membase
+ UART01x_FR
) & UART01x_FR_MODEM_ANY
;
1660 pl011_dma_startup(uap
);
1663 * Finally, enable interrupts, only timeouts when using DMA
1664 * if initial RX DMA job failed, start in interrupt mode
1667 spin_lock_irq(&uap
->port
.lock
);
1668 /* Clear out any spuriously appearing RX interrupts */
1669 writew(UART011_RTIS
| UART011_RXIS
,
1670 uap
->port
.membase
+ UART011_ICR
);
1671 uap
->im
= UART011_RTIM
;
1672 if (!pl011_dma_rx_running(uap
))
1673 uap
->im
|= UART011_RXIM
;
1674 writew(uap
->im
, uap
->port
.membase
+ UART011_IMSC
);
1675 spin_unlock_irq(&uap
->port
.lock
);
1680 clk_disable_unprepare(uap
->clk
);
1684 static void pl011_shutdown_channel(struct uart_amba_port
*uap
,
1689 val
= readw(uap
->port
.membase
+ lcrh
);
1690 val
&= ~(UART01x_LCRH_BRK
| UART01x_LCRH_FEN
);
1691 writew(val
, uap
->port
.membase
+ lcrh
);
1694 static void pl011_shutdown(struct uart_port
*port
)
1696 struct uart_amba_port
*uap
=
1697 container_of(port
, struct uart_amba_port
, port
);
1700 cancel_delayed_work_sync(&uap
->tx_softirq_work
);
1703 * disable all interrupts
1705 spin_lock_irq(&uap
->port
.lock
);
1707 writew(uap
->im
, uap
->port
.membase
+ UART011_IMSC
);
1708 writew(0xffff, uap
->port
.membase
+ UART011_ICR
);
1709 spin_unlock_irq(&uap
->port
.lock
);
1711 pl011_dma_shutdown(uap
);
1714 * Free the interrupt
1716 free_irq(uap
->port
.irq
, uap
);
1720 * disable the port. It should not disable RTS and DTR.
1721 * Also RTS and DTR state should be preserved to restore
1722 * it during startup().
1724 uap
->autorts
= false;
1725 spin_lock_irq(&uap
->port
.lock
);
1726 cr
= readw(uap
->port
.membase
+ UART011_CR
);
1728 cr
&= UART011_CR_RTS
| UART011_CR_DTR
;
1729 cr
|= UART01x_CR_UARTEN
| UART011_CR_TXE
;
1730 writew(cr
, uap
->port
.membase
+ UART011_CR
);
1731 spin_unlock_irq(&uap
->port
.lock
);
1734 * disable break condition and fifos
1736 pl011_shutdown_channel(uap
, uap
->lcrh_rx
);
1737 if (uap
->lcrh_rx
!= uap
->lcrh_tx
)
1738 pl011_shutdown_channel(uap
, uap
->lcrh_tx
);
1741 * Shut down the clock producer
1743 clk_disable_unprepare(uap
->clk
);
1744 /* Optionally let pins go into sleep states */
1745 pinctrl_pm_select_sleep_state(port
->dev
);
1747 if (dev_get_platdata(uap
->port
.dev
)) {
1748 struct amba_pl011_data
*plat
;
1750 plat
= dev_get_platdata(uap
->port
.dev
);
1755 if (uap
->port
.ops
->flush_buffer
)
1756 uap
->port
.ops
->flush_buffer(port
);
1760 pl011_set_termios(struct uart_port
*port
, struct ktermios
*termios
,
1761 struct ktermios
*old
)
1763 struct uart_amba_port
*uap
=
1764 container_of(port
, struct uart_amba_port
, port
);
1765 unsigned int lcr_h
, old_cr
;
1766 unsigned long flags
;
1767 unsigned int baud
, quot
, clkdiv
;
1769 if (uap
->vendor
->oversampling
)
1775 * Ask the core to calculate the divisor for us.
1777 baud
= uart_get_baud_rate(port
, termios
, old
, 0,
1778 port
->uartclk
/ clkdiv
);
1779 #ifdef CONFIG_DMA_ENGINE
1781 * Adjust RX DMA polling rate with baud rate if not specified.
1783 if (uap
->dmarx
.auto_poll_rate
)
1784 uap
->dmarx
.poll_rate
= DIV_ROUND_UP(10000000, baud
);
1787 if (baud
> port
->uartclk
/16)
1788 quot
= DIV_ROUND_CLOSEST(port
->uartclk
* 8, baud
);
1790 quot
= DIV_ROUND_CLOSEST(port
->uartclk
* 4, baud
);
1792 switch (termios
->c_cflag
& CSIZE
) {
1794 lcr_h
= UART01x_LCRH_WLEN_5
;
1797 lcr_h
= UART01x_LCRH_WLEN_6
;
1800 lcr_h
= UART01x_LCRH_WLEN_7
;
1803 lcr_h
= UART01x_LCRH_WLEN_8
;
1806 if (termios
->c_cflag
& CSTOPB
)
1807 lcr_h
|= UART01x_LCRH_STP2
;
1808 if (termios
->c_cflag
& PARENB
) {
1809 lcr_h
|= UART01x_LCRH_PEN
;
1810 if (!(termios
->c_cflag
& PARODD
))
1811 lcr_h
|= UART01x_LCRH_EPS
;
1813 if (uap
->fifosize
> 1)
1814 lcr_h
|= UART01x_LCRH_FEN
;
1816 spin_lock_irqsave(&port
->lock
, flags
);
1819 * Update the per-port timeout.
1821 uart_update_timeout(port
, termios
->c_cflag
, baud
);
1823 port
->read_status_mask
= UART011_DR_OE
| 255;
1824 if (termios
->c_iflag
& INPCK
)
1825 port
->read_status_mask
|= UART011_DR_FE
| UART011_DR_PE
;
1826 if (termios
->c_iflag
& (IGNBRK
| BRKINT
| PARMRK
))
1827 port
->read_status_mask
|= UART011_DR_BE
;
1830 * Characters to ignore
1832 port
->ignore_status_mask
= 0;
1833 if (termios
->c_iflag
& IGNPAR
)
1834 port
->ignore_status_mask
|= UART011_DR_FE
| UART011_DR_PE
;
1835 if (termios
->c_iflag
& IGNBRK
) {
1836 port
->ignore_status_mask
|= UART011_DR_BE
;
1838 * If we're ignoring parity and break indicators,
1839 * ignore overruns too (for real raw support).
1841 if (termios
->c_iflag
& IGNPAR
)
1842 port
->ignore_status_mask
|= UART011_DR_OE
;
1846 * Ignore all characters if CREAD is not set.
1848 if ((termios
->c_cflag
& CREAD
) == 0)
1849 port
->ignore_status_mask
|= UART_DUMMY_DR_RX
;
1851 if (UART_ENABLE_MS(port
, termios
->c_cflag
))
1852 pl011_enable_ms(port
);
1854 /* first, disable everything */
1855 old_cr
= readw(port
->membase
+ UART011_CR
);
1856 writew(0, port
->membase
+ UART011_CR
);
1858 if (termios
->c_cflag
& CRTSCTS
) {
1859 if (old_cr
& UART011_CR_RTS
)
1860 old_cr
|= UART011_CR_RTSEN
;
1862 old_cr
|= UART011_CR_CTSEN
;
1863 uap
->autorts
= true;
1865 old_cr
&= ~(UART011_CR_CTSEN
| UART011_CR_RTSEN
);
1866 uap
->autorts
= false;
1869 if (uap
->vendor
->oversampling
) {
1870 if (baud
> port
->uartclk
/ 16)
1871 old_cr
|= ST_UART011_CR_OVSFACT
;
1873 old_cr
&= ~ST_UART011_CR_OVSFACT
;
1877 * Workaround for the ST Micro oversampling variants to
1878 * increase the bitrate slightly, by lowering the divisor,
1879 * to avoid delayed sampling of start bit at high speeds,
1880 * else we see data corruption.
1882 if (uap
->vendor
->oversampling
) {
1883 if ((baud
>= 3000000) && (baud
< 3250000) && (quot
> 1))
1885 else if ((baud
> 3250000) && (quot
> 2))
1889 writew(quot
& 0x3f, port
->membase
+ UART011_FBRD
);
1890 writew(quot
>> 6, port
->membase
+ UART011_IBRD
);
1893 * ----------v----------v----------v----------v-----
1894 * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
1895 * UART011_FBRD & UART011_IBRD.
1896 * ----------^----------^----------^----------^-----
1898 pl011_write_lcr_h(uap
, lcr_h
);
1899 writew(old_cr
, port
->membase
+ UART011_CR
);
1901 spin_unlock_irqrestore(&port
->lock
, flags
);
1904 static const char *pl011_type(struct uart_port
*port
)
1906 struct uart_amba_port
*uap
=
1907 container_of(port
, struct uart_amba_port
, port
);
1908 return uap
->port
.type
== PORT_AMBA
? uap
->type
: NULL
;
1912 * Release the memory region(s) being used by 'port'
1914 static void pl011_release_port(struct uart_port
*port
)
1916 release_mem_region(port
->mapbase
, SZ_4K
);
1920 * Request the memory region(s) being used by 'port'
1922 static int pl011_request_port(struct uart_port
*port
)
1924 return request_mem_region(port
->mapbase
, SZ_4K
, "uart-pl011")
1925 != NULL
? 0 : -EBUSY
;
1929 * Configure/autoconfigure the port.
1931 static void pl011_config_port(struct uart_port
*port
, int flags
)
1933 if (flags
& UART_CONFIG_TYPE
) {
1934 port
->type
= PORT_AMBA
;
1935 pl011_request_port(port
);
1940 * verify the new serial_struct (for TIOCSSERIAL).
1942 static int pl011_verify_port(struct uart_port
*port
, struct serial_struct
*ser
)
1945 if (ser
->type
!= PORT_UNKNOWN
&& ser
->type
!= PORT_AMBA
)
1947 if (ser
->irq
< 0 || ser
->irq
>= nr_irqs
)
1949 if (ser
->baud_base
< 9600)
1954 static struct uart_ops amba_pl011_pops
= {
1955 .tx_empty
= pl011_tx_empty
,
1956 .set_mctrl
= pl011_set_mctrl
,
1957 .get_mctrl
= pl011_get_mctrl
,
1958 .stop_tx
= pl011_stop_tx
,
1959 .start_tx
= pl011_start_tx
,
1960 .stop_rx
= pl011_stop_rx
,
1961 .enable_ms
= pl011_enable_ms
,
1962 .break_ctl
= pl011_break_ctl
,
1963 .startup
= pl011_startup
,
1964 .shutdown
= pl011_shutdown
,
1965 .flush_buffer
= pl011_dma_flush_buffer
,
1966 .set_termios
= pl011_set_termios
,
1968 .release_port
= pl011_release_port
,
1969 .request_port
= pl011_request_port
,
1970 .config_port
= pl011_config_port
,
1971 .verify_port
= pl011_verify_port
,
1972 #ifdef CONFIG_CONSOLE_POLL
1973 .poll_init
= pl011_hwinit
,
1974 .poll_get_char
= pl011_get_poll_char
,
1975 .poll_put_char
= pl011_put_poll_char
,
1979 static struct uart_amba_port
*amba_ports
[UART_NR
];
1981 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
1983 static void pl011_console_putchar(struct uart_port
*port
, int ch
)
1985 struct uart_amba_port
*uap
=
1986 container_of(port
, struct uart_amba_port
, port
);
1988 while (readw(uap
->port
.membase
+ UART01x_FR
) & UART01x_FR_TXFF
)
1990 writew(ch
, uap
->port
.membase
+ UART01x_DR
);
1994 pl011_console_write(struct console
*co
, const char *s
, unsigned int count
)
1996 struct uart_amba_port
*uap
= amba_ports
[co
->index
];
1997 unsigned int status
, old_cr
, new_cr
;
1998 unsigned long flags
;
2001 clk_enable(uap
->clk
);
2003 local_irq_save(flags
);
2004 if (uap
->port
.sysrq
)
2006 else if (oops_in_progress
)
2007 locked
= spin_trylock(&uap
->port
.lock
);
2009 spin_lock(&uap
->port
.lock
);
2012 * First save the CR then disable the interrupts
2014 old_cr
= readw(uap
->port
.membase
+ UART011_CR
);
2015 new_cr
= old_cr
& ~UART011_CR_CTSEN
;
2016 new_cr
|= UART01x_CR_UARTEN
| UART011_CR_TXE
;
2017 writew(new_cr
, uap
->port
.membase
+ UART011_CR
);
2019 uart_console_write(&uap
->port
, s
, count
, pl011_console_putchar
);
2022 * Finally, wait for transmitter to become empty
2023 * and restore the TCR
2026 status
= readw(uap
->port
.membase
+ UART01x_FR
);
2027 } while (status
& UART01x_FR_BUSY
);
2028 writew(old_cr
, uap
->port
.membase
+ UART011_CR
);
2031 spin_unlock(&uap
->port
.lock
);
2032 local_irq_restore(flags
);
2034 clk_disable(uap
->clk
);
2038 pl011_console_get_options(struct uart_amba_port
*uap
, int *baud
,
2039 int *parity
, int *bits
)
2041 if (readw(uap
->port
.membase
+ UART011_CR
) & UART01x_CR_UARTEN
) {
2042 unsigned int lcr_h
, ibrd
, fbrd
;
2044 lcr_h
= readw(uap
->port
.membase
+ uap
->lcrh_tx
);
2047 if (lcr_h
& UART01x_LCRH_PEN
) {
2048 if (lcr_h
& UART01x_LCRH_EPS
)
2054 if ((lcr_h
& 0x60) == UART01x_LCRH_WLEN_7
)
2059 ibrd
= readw(uap
->port
.membase
+ UART011_IBRD
);
2060 fbrd
= readw(uap
->port
.membase
+ UART011_FBRD
);
2062 *baud
= uap
->port
.uartclk
* 4 / (64 * ibrd
+ fbrd
);
2064 if (uap
->vendor
->oversampling
) {
2065 if (readw(uap
->port
.membase
+ UART011_CR
)
2066 & ST_UART011_CR_OVSFACT
)
2072 static int __init
pl011_console_setup(struct console
*co
, char *options
)
2074 struct uart_amba_port
*uap
;
2082 * Check whether an invalid uart number has been specified, and
2083 * if so, search for the first available port that does have
2086 if (co
->index
>= UART_NR
)
2088 uap
= amba_ports
[co
->index
];
2092 /* Allow pins to be muxed in and configured */
2093 pinctrl_pm_select_default_state(uap
->port
.dev
);
2095 ret
= clk_prepare(uap
->clk
);
2099 if (dev_get_platdata(uap
->port
.dev
)) {
2100 struct amba_pl011_data
*plat
;
2102 plat
= dev_get_platdata(uap
->port
.dev
);
2107 uap
->port
.uartclk
= clk_get_rate(uap
->clk
);
2110 uart_parse_options(options
, &baud
, &parity
, &bits
, &flow
);
2112 pl011_console_get_options(uap
, &baud
, &parity
, &bits
);
2114 return uart_set_options(&uap
->port
, co
, baud
, parity
, bits
, flow
);
2117 static struct uart_driver amba_reg
;
2118 static struct console amba_console
= {
2120 .write
= pl011_console_write
,
2121 .device
= uart_console_device
,
2122 .setup
= pl011_console_setup
,
2123 .flags
= CON_PRINTBUFFER
,
2128 #define AMBA_CONSOLE (&amba_console)
2130 static void pl011_putc(struct uart_port
*port
, int c
)
2132 while (readl(port
->membase
+ UART01x_FR
) & UART01x_FR_TXFF
)
2134 writeb(c
, port
->membase
+ UART01x_DR
);
2135 while (readl(port
->membase
+ UART01x_FR
) & UART01x_FR_BUSY
)
2139 static void pl011_early_write(struct console
*con
, const char *s
, unsigned n
)
2141 struct earlycon_device
*dev
= con
->data
;
2143 uart_console_write(&dev
->port
, s
, n
, pl011_putc
);
2146 static int __init
pl011_early_console_setup(struct earlycon_device
*device
,
2149 if (!device
->port
.membase
)
2152 device
->con
->write
= pl011_early_write
;
2155 EARLYCON_DECLARE(pl011
, pl011_early_console_setup
);
2156 OF_EARLYCON_DECLARE(pl011
, "arm,pl011", pl011_early_console_setup
);
2159 #define AMBA_CONSOLE NULL
2162 static struct uart_driver amba_reg
= {
2163 .owner
= THIS_MODULE
,
2164 .driver_name
= "ttyAMA",
2165 .dev_name
= "ttyAMA",
2166 .major
= SERIAL_AMBA_MAJOR
,
2167 .minor
= SERIAL_AMBA_MINOR
,
2169 .cons
= AMBA_CONSOLE
,
2172 static int pl011_probe_dt_alias(int index
, struct device
*dev
)
2174 struct device_node
*np
;
2175 static bool seen_dev_with_alias
= false;
2176 static bool seen_dev_without_alias
= false;
2179 if (!IS_ENABLED(CONFIG_OF
))
2186 ret
= of_alias_get_id(np
, "serial");
2187 if (IS_ERR_VALUE(ret
)) {
2188 seen_dev_without_alias
= true;
2191 seen_dev_with_alias
= true;
2192 if (ret
>= ARRAY_SIZE(amba_ports
) || amba_ports
[ret
] != NULL
) {
2193 dev_warn(dev
, "requested serial port %d not available.\n", ret
);
2198 if (seen_dev_with_alias
&& seen_dev_without_alias
)
2199 dev_warn(dev
, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2204 static int pl011_probe(struct amba_device
*dev
, const struct amba_id
*id
)
2206 struct uart_amba_port
*uap
;
2207 struct vendor_data
*vendor
= id
->data
;
2211 for (i
= 0; i
< ARRAY_SIZE(amba_ports
); i
++)
2212 if (amba_ports
[i
] == NULL
)
2215 if (i
== ARRAY_SIZE(amba_ports
))
2218 uap
= devm_kzalloc(&dev
->dev
, sizeof(struct uart_amba_port
),
2223 i
= pl011_probe_dt_alias(i
, &dev
->dev
);
2225 base
= devm_ioremap(&dev
->dev
, dev
->res
.start
,
2226 resource_size(&dev
->res
));
2230 uap
->clk
= devm_clk_get(&dev
->dev
, NULL
);
2231 if (IS_ERR(uap
->clk
))
2232 return PTR_ERR(uap
->clk
);
2234 uap
->vendor
= vendor
;
2235 uap
->lcrh_rx
= vendor
->lcrh_rx
;
2236 uap
->lcrh_tx
= vendor
->lcrh_tx
;
2238 uap
->fifosize
= vendor
->get_fifosize(dev
);
2239 uap
->port
.dev
= &dev
->dev
;
2240 uap
->port
.mapbase
= dev
->res
.start
;
2241 uap
->port
.membase
= base
;
2242 uap
->port
.iotype
= UPIO_MEM
;
2243 uap
->port
.irq
= dev
->irq
[0];
2244 uap
->port
.fifosize
= uap
->fifosize
;
2245 uap
->port
.ops
= &amba_pl011_pops
;
2246 uap
->port
.flags
= UPF_BOOT_AUTOCONF
;
2248 INIT_DELAYED_WORK(&uap
->tx_softirq_work
, pl011_tx_softirq
);
2250 /* Ensure interrupts from this UART are masked and cleared */
2251 writew(0, uap
->port
.membase
+ UART011_IMSC
);
2252 writew(0xffff, uap
->port
.membase
+ UART011_ICR
);
2254 snprintf(uap
->type
, sizeof(uap
->type
), "PL011 rev%u", amba_rev(dev
));
2256 amba_ports
[i
] = uap
;
2258 amba_set_drvdata(dev
, uap
);
2260 if (!amba_reg
.state
) {
2261 ret
= uart_register_driver(&amba_reg
);
2264 "Failed to register AMBA-PL011 driver\n");
2269 ret
= uart_add_one_port(&amba_reg
, &uap
->port
);
2271 amba_ports
[i
] = NULL
;
2272 uart_unregister_driver(&amba_reg
);
2278 static int pl011_remove(struct amba_device
*dev
)
2280 struct uart_amba_port
*uap
= amba_get_drvdata(dev
);
2284 uart_remove_one_port(&amba_reg
, &uap
->port
);
2286 for (i
= 0; i
< ARRAY_SIZE(amba_ports
); i
++)
2287 if (amba_ports
[i
] == uap
)
2288 amba_ports
[i
] = NULL
;
2289 else if (amba_ports
[i
])
2292 pl011_dma_remove(uap
);
2294 uart_unregister_driver(&amba_reg
);
2298 #ifdef CONFIG_PM_SLEEP
2299 static int pl011_suspend(struct device
*dev
)
2301 struct uart_amba_port
*uap
= dev_get_drvdata(dev
);
2306 return uart_suspend_port(&amba_reg
, &uap
->port
);
2309 static int pl011_resume(struct device
*dev
)
2311 struct uart_amba_port
*uap
= dev_get_drvdata(dev
);
2316 return uart_resume_port(&amba_reg
, &uap
->port
);
2320 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops
, pl011_suspend
, pl011_resume
);
2322 static struct amba_id pl011_ids
[] = {
2326 .data
= &vendor_arm
,
2336 MODULE_DEVICE_TABLE(amba
, pl011_ids
);
2338 static struct amba_driver pl011_driver
= {
2340 .name
= "uart-pl011",
2341 .pm
= &pl011_dev_pm_ops
,
2343 .id_table
= pl011_ids
,
2344 .probe
= pl011_probe
,
2345 .remove
= pl011_remove
,
2348 static int __init
pl011_init(void)
2350 printk(KERN_INFO
"Serial: AMBA PL011 UART driver\n");
2352 return amba_driver_register(&pl011_driver
);
2355 static void __exit
pl011_exit(void)
2357 amba_driver_unregister(&pl011_driver
);
2361 * While this can be a module, if builtin it's most likely the console
2362 * So let's leave module_exit but move module_init to an earlier place
2364 arch_initcall(pl011_init
);
2365 module_exit(pl011_exit
);
2367 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2368 MODULE_DESCRIPTION("ARM AMBA serial port driver");
2369 MODULE_LICENSE("GPL");