2 * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
3 * GT64260, MV64340, MV64360, GT96100, ... ).
5 * Author: Mark A. Greer <mgreer@mvista.com>
7 * Based on an old MPSC driver that was in the linuxppc tree. It appears to
8 * have been created by Chris Zankel (formerly of MontaVista) but there
9 * is no proper Copyright so I'm not sure. Apparently, parts were also
10 * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c
13 * 2004 (c) MontaVista, Software, Inc. This file is licensed under
14 * the terms of the GNU General Public License version 2. This program
15 * is licensed "as is" without any warranty of any kind, whether express
19 * The MPSC interface is much like a typical network controller's interface.
20 * That is, you set up separate rings of descriptors for transmitting and
21 * receiving data. There is also a pool of buffers with (one buffer per
22 * descriptor) that incoming data are dma'd into or outgoing data are dma'd
25 * The MPSC requires two other controllers to be able to work. The Baud Rate
26 * Generator (BRG) provides a clock at programmable frequencies which determines
27 * the baud rate. The Serial DMA Controller (SDMA) takes incoming data from the
28 * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the
29 * MPSC. It is actually the SDMA interrupt that the driver uses to keep the
30 * transmit and receive "engines" going (i.e., indicate data has been
31 * transmitted or received).
35 * 1) Some chips have an erratum where several regs cannot be
36 * read. To work around that, we keep a local copy of those regs in
39 * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr
40 * accesses system mem with coherency enabled. For that reason, the driver
41 * assumes that coherency for that ctlr has been disabled. This means
42 * that when in a cache coherent system, the driver has to manually manage
43 * the data cache on the areas that it touches because the dma_* macro are
46 * 3) There is an erratum (on PPC) where you can't use the instruction to do
47 * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places
48 * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed.
50 * 4) AFAICT, hardware flow control isn't supported by the controller --MAG.
54 #if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
58 #include <linux/tty.h>
59 #include <linux/tty_flip.h>
60 #include <linux/ioport.h>
61 #include <linux/init.h>
62 #include <linux/console.h>
63 #include <linux/sysrq.h>
64 #include <linux/serial.h>
65 #include <linux/serial_core.h>
66 #include <linux/delay.h>
67 #include <linux/device.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/mv643xx.h>
70 #include <linux/platform_device.h>
71 #include <linux/gfp.h>
76 #define MPSC_NUM_CTLRS 2
79 * Descriptors and buffers must be cache line aligned.
80 * Buffers lengths must be multiple of cache line size.
81 * Number of Tx & Rx descriptors must be powers of 2.
83 #define MPSC_RXR_ENTRIES 32
84 #define MPSC_RXRE_SIZE dma_get_cache_alignment()
85 #define MPSC_RXR_SIZE (MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE)
86 #define MPSC_RXBE_SIZE dma_get_cache_alignment()
87 #define MPSC_RXB_SIZE (MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE)
89 #define MPSC_TXR_ENTRIES 32
90 #define MPSC_TXRE_SIZE dma_get_cache_alignment()
91 #define MPSC_TXR_SIZE (MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE)
92 #define MPSC_TXBE_SIZE dma_get_cache_alignment()
93 #define MPSC_TXB_SIZE (MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE)
95 #define MPSC_DMA_ALLOC_SIZE (MPSC_RXR_SIZE + MPSC_RXB_SIZE + MPSC_TXR_SIZE \
96 + MPSC_TXB_SIZE + dma_get_cache_alignment() /* for alignment */)
98 /* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */
105 } __attribute((packed
));
107 struct mpsc_tx_desc
{
113 } __attribute((packed
));
116 * Some regs that have the erratum that you can't read them are are shared
117 * between the two MPSC controllers. This struct contains those shared regs.
119 struct mpsc_shared_regs
{
120 phys_addr_t mpsc_routing_base_p
;
121 phys_addr_t sdma_intr_base_p
;
123 void __iomem
*mpsc_routing_base
;
124 void __iomem
*sdma_intr_base
;
129 u32 SDMA_INTR_CAUSE_m
;
130 u32 SDMA_INTR_MASK_m
;
133 /* The main driver data structure */
134 struct mpsc_port_info
{
135 struct uart_port port
; /* Overlay uart_port structure */
137 /* Internal driver state for this ctlr */
141 /* Info passed in from platform */
142 u8 mirror_regs
; /* Need to mirror regs? */
143 u8 cache_mgmt
; /* Need manual cache mgmt? */
144 u8 brg_can_tune
; /* BRG has baud tuning? */
152 /* Physical addresses of various blocks of registers (from platform) */
153 phys_addr_t mpsc_base_p
;
154 phys_addr_t sdma_base_p
;
155 phys_addr_t brg_base_p
;
157 /* Virtual addresses of various blocks of registers (from platform) */
158 void __iomem
*mpsc_base
;
159 void __iomem
*sdma_base
;
160 void __iomem
*brg_base
;
162 /* Descriptor ring and buffer allocations */
164 dma_addr_t dma_region_p
;
166 dma_addr_t rxr
; /* Rx descriptor ring */
167 dma_addr_t rxr_p
; /* Phys addr of rxr */
168 u8
*rxb
; /* Rx Ring I/O buf */
169 u8
*rxb_p
; /* Phys addr of rxb */
170 u32 rxr_posn
; /* First desc w/ Rx data */
172 dma_addr_t txr
; /* Tx descriptor ring */
173 dma_addr_t txr_p
; /* Phys addr of txr */
174 u8
*txb
; /* Tx Ring I/O buf */
175 u8
*txb_p
; /* Phys addr of txb */
176 int txr_head
; /* Where new data goes */
177 int txr_tail
; /* Where sent data comes off */
178 spinlock_t tx_lock
; /* transmit lock */
180 /* Mirrored values of regs we can't read (if 'mirror_regs' set) */
186 struct mpsc_shared_regs
*shared_regs
;
189 /* Hooks to platform-specific code */
190 int mpsc_platform_register_driver(void);
191 void mpsc_platform_unregister_driver(void);
193 /* Hooks back in to mpsc common to be called by platform-specific code */
194 struct mpsc_port_info
*mpsc_device_probe(int index
);
195 struct mpsc_port_info
*mpsc_device_remove(int index
);
197 /* Main MPSC Configuration Register Offsets */
198 #define MPSC_MMCRL 0x0000
199 #define MPSC_MMCRH 0x0004
200 #define MPSC_MPCR 0x0008
201 #define MPSC_CHR_1 0x000c
202 #define MPSC_CHR_2 0x0010
203 #define MPSC_CHR_3 0x0014
204 #define MPSC_CHR_4 0x0018
205 #define MPSC_CHR_5 0x001c
206 #define MPSC_CHR_6 0x0020
207 #define MPSC_CHR_7 0x0024
208 #define MPSC_CHR_8 0x0028
209 #define MPSC_CHR_9 0x002c
210 #define MPSC_CHR_10 0x0030
211 #define MPSC_CHR_11 0x0034
213 #define MPSC_MPCR_FRZ (1 << 9)
214 #define MPSC_MPCR_CL_5 0
215 #define MPSC_MPCR_CL_6 1
216 #define MPSC_MPCR_CL_7 2
217 #define MPSC_MPCR_CL_8 3
218 #define MPSC_MPCR_SBL_1 0
219 #define MPSC_MPCR_SBL_2 1
221 #define MPSC_CHR_2_TEV (1<<1)
222 #define MPSC_CHR_2_TA (1<<7)
223 #define MPSC_CHR_2_TTCS (1<<9)
224 #define MPSC_CHR_2_REV (1<<17)
225 #define MPSC_CHR_2_RA (1<<23)
226 #define MPSC_CHR_2_CRD (1<<25)
227 #define MPSC_CHR_2_EH (1<<31)
228 #define MPSC_CHR_2_PAR_ODD 0
229 #define MPSC_CHR_2_PAR_SPACE 1
230 #define MPSC_CHR_2_PAR_EVEN 2
231 #define MPSC_CHR_2_PAR_MARK 3
233 /* MPSC Signal Routing */
234 #define MPSC_MRR 0x0000
235 #define MPSC_RCRR 0x0004
236 #define MPSC_TCRR 0x0008
238 /* Serial DMA Controller Interface Registers */
239 #define SDMA_SDC 0x0000
240 #define SDMA_SDCM 0x0008
241 #define SDMA_RX_DESC 0x0800
242 #define SDMA_RX_BUF_PTR 0x0808
243 #define SDMA_SCRDP 0x0810
244 #define SDMA_TX_DESC 0x0c00
245 #define SDMA_SCTDP 0x0c10
246 #define SDMA_SFTDP 0x0c14
248 #define SDMA_DESC_CMDSTAT_PE (1<<0)
249 #define SDMA_DESC_CMDSTAT_CDL (1<<1)
250 #define SDMA_DESC_CMDSTAT_FR (1<<3)
251 #define SDMA_DESC_CMDSTAT_OR (1<<6)
252 #define SDMA_DESC_CMDSTAT_BR (1<<9)
253 #define SDMA_DESC_CMDSTAT_MI (1<<10)
254 #define SDMA_DESC_CMDSTAT_A (1<<11)
255 #define SDMA_DESC_CMDSTAT_AM (1<<12)
256 #define SDMA_DESC_CMDSTAT_CT (1<<13)
257 #define SDMA_DESC_CMDSTAT_C (1<<14)
258 #define SDMA_DESC_CMDSTAT_ES (1<<15)
259 #define SDMA_DESC_CMDSTAT_L (1<<16)
260 #define SDMA_DESC_CMDSTAT_F (1<<17)
261 #define SDMA_DESC_CMDSTAT_P (1<<18)
262 #define SDMA_DESC_CMDSTAT_EI (1<<23)
263 #define SDMA_DESC_CMDSTAT_O (1<<31)
265 #define SDMA_DESC_DFLT (SDMA_DESC_CMDSTAT_O \
266 | SDMA_DESC_CMDSTAT_EI)
268 #define SDMA_SDC_RFT (1<<0)
269 #define SDMA_SDC_SFM (1<<1)
270 #define SDMA_SDC_BLMR (1<<6)
271 #define SDMA_SDC_BLMT (1<<7)
272 #define SDMA_SDC_POVR (1<<8)
273 #define SDMA_SDC_RIFB (1<<9)
275 #define SDMA_SDCM_ERD (1<<7)
276 #define SDMA_SDCM_AR (1<<15)
277 #define SDMA_SDCM_STD (1<<16)
278 #define SDMA_SDCM_TXD (1<<23)
279 #define SDMA_SDCM_AT (1<<31)
281 #define SDMA_0_CAUSE_RXBUF (1<<0)
282 #define SDMA_0_CAUSE_RXERR (1<<1)
283 #define SDMA_0_CAUSE_TXBUF (1<<2)
284 #define SDMA_0_CAUSE_TXEND (1<<3)
285 #define SDMA_1_CAUSE_RXBUF (1<<8)
286 #define SDMA_1_CAUSE_RXERR (1<<9)
287 #define SDMA_1_CAUSE_TXBUF (1<<10)
288 #define SDMA_1_CAUSE_TXEND (1<<11)
290 #define SDMA_CAUSE_RX_MASK (SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR \
291 | SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR)
292 #define SDMA_CAUSE_TX_MASK (SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND \
293 | SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND)
295 /* SDMA Interrupt registers */
296 #define SDMA_INTR_CAUSE 0x0000
297 #define SDMA_INTR_MASK 0x0080
299 /* Baud Rate Generator Interface Registers */
300 #define BRG_BCR 0x0000
301 #define BRG_BTR 0x0004
304 * Define how this driver is known to the outside (we've been assigned a
305 * range on the "Low-density serial ports" major).
307 #define MPSC_MAJOR 204
308 #define MPSC_MINOR_START 44
309 #define MPSC_DRIVER_NAME "MPSC"
310 #define MPSC_DEV_NAME "ttyMM"
311 #define MPSC_VERSION "1.00"
313 static struct mpsc_port_info mpsc_ports
[MPSC_NUM_CTLRS
];
314 static struct mpsc_shared_regs mpsc_shared_regs
;
315 static struct uart_driver mpsc_reg
;
317 static void mpsc_start_rx(struct mpsc_port_info
*pi
);
318 static void mpsc_free_ring_mem(struct mpsc_port_info
*pi
);
319 static void mpsc_release_port(struct uart_port
*port
);
321 ******************************************************************************
323 * Baud Rate Generator Routines (BRG)
325 ******************************************************************************
327 static void mpsc_brg_init(struct mpsc_port_info
*pi
, u32 clk_src
)
331 v
= (pi
->mirror_regs
) ? pi
->BRG_BCR_m
: readl(pi
->brg_base
+ BRG_BCR
);
332 v
= (v
& ~(0xf << 18)) | ((clk_src
& 0xf) << 18);
334 if (pi
->brg_can_tune
)
339 writel(v
, pi
->brg_base
+ BRG_BCR
);
341 writel(readl(pi
->brg_base
+ BRG_BTR
) & 0xffff0000,
342 pi
->brg_base
+ BRG_BTR
);
345 static void mpsc_brg_enable(struct mpsc_port_info
*pi
)
349 v
= (pi
->mirror_regs
) ? pi
->BRG_BCR_m
: readl(pi
->brg_base
+ BRG_BCR
);
354 writel(v
, pi
->brg_base
+ BRG_BCR
);
357 static void mpsc_brg_disable(struct mpsc_port_info
*pi
)
361 v
= (pi
->mirror_regs
) ? pi
->BRG_BCR_m
: readl(pi
->brg_base
+ BRG_BCR
);
366 writel(v
, pi
->brg_base
+ BRG_BCR
);
370 * To set the baud, we adjust the CDV field in the BRG_BCR reg.
371 * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1.
372 * However, the input clock is divided by 16 in the MPSC b/c of how
373 * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our
374 * calculation by 16 to account for that. So the real calculation
375 * that accounts for the way the mpsc is set up is:
376 * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1.
378 static void mpsc_set_baudrate(struct mpsc_port_info
*pi
, u32 baud
)
380 u32 cdv
= (pi
->port
.uartclk
/ (baud
<< 5)) - 1;
383 mpsc_brg_disable(pi
);
384 v
= (pi
->mirror_regs
) ? pi
->BRG_BCR_m
: readl(pi
->brg_base
+ BRG_BCR
);
385 v
= (v
& 0xffff0000) | (cdv
& 0xffff);
389 writel(v
, pi
->brg_base
+ BRG_BCR
);
394 ******************************************************************************
396 * Serial DMA Routines (SDMA)
398 ******************************************************************************
401 static void mpsc_sdma_burstsize(struct mpsc_port_info
*pi
, u32 burst_size
)
405 pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n",
406 pi
->port
.line
, burst_size
);
408 burst_size
>>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */
411 v
= 0x0; /* 1 64-bit word */
412 else if (burst_size
< 4)
413 v
= 0x1; /* 2 64-bit words */
414 else if (burst_size
< 8)
415 v
= 0x2; /* 4 64-bit words */
417 v
= 0x3; /* 8 64-bit words */
419 writel((readl(pi
->sdma_base
+ SDMA_SDC
) & (0x3 << 12)) | (v
<< 12),
420 pi
->sdma_base
+ SDMA_SDC
);
423 static void mpsc_sdma_init(struct mpsc_port_info
*pi
, u32 burst_size
)
425 pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi
->port
.line
,
428 writel((readl(pi
->sdma_base
+ SDMA_SDC
) & 0x3ff) | 0x03f,
429 pi
->sdma_base
+ SDMA_SDC
);
430 mpsc_sdma_burstsize(pi
, burst_size
);
433 static u32
mpsc_sdma_intr_mask(struct mpsc_port_info
*pi
, u32 mask
)
437 pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi
->port
.line
, mask
);
439 old
= v
= (pi
->mirror_regs
) ? pi
->shared_regs
->SDMA_INTR_MASK_m
:
440 readl(pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_MASK
);
448 pi
->shared_regs
->SDMA_INTR_MASK_m
= v
;
449 writel(v
, pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_MASK
);
456 static void mpsc_sdma_intr_unmask(struct mpsc_port_info
*pi
, u32 mask
)
460 pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi
->port
.line
,mask
);
462 v
= (pi
->mirror_regs
) ? pi
->shared_regs
->SDMA_INTR_MASK_m
463 : readl(pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_MASK
);
471 pi
->shared_regs
->SDMA_INTR_MASK_m
= v
;
472 writel(v
, pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_MASK
);
475 static void mpsc_sdma_intr_ack(struct mpsc_port_info
*pi
)
477 pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi
->port
.line
);
480 pi
->shared_regs
->SDMA_INTR_CAUSE_m
= 0;
481 writeb(0x00, pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_CAUSE
485 static void mpsc_sdma_set_rx_ring(struct mpsc_port_info
*pi
,
486 struct mpsc_rx_desc
*rxre_p
)
488 pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n",
489 pi
->port
.line
, (u32
)rxre_p
);
491 writel((u32
)rxre_p
, pi
->sdma_base
+ SDMA_SCRDP
);
494 static void mpsc_sdma_set_tx_ring(struct mpsc_port_info
*pi
,
495 struct mpsc_tx_desc
*txre_p
)
497 writel((u32
)txre_p
, pi
->sdma_base
+ SDMA_SFTDP
);
498 writel((u32
)txre_p
, pi
->sdma_base
+ SDMA_SCTDP
);
501 static void mpsc_sdma_cmd(struct mpsc_port_info
*pi
, u32 val
)
505 v
= readl(pi
->sdma_base
+ SDMA_SDCM
);
511 writel(v
, pi
->sdma_base
+ SDMA_SDCM
);
515 static uint
mpsc_sdma_tx_active(struct mpsc_port_info
*pi
)
517 return readl(pi
->sdma_base
+ SDMA_SDCM
) & SDMA_SDCM_TXD
;
520 static void mpsc_sdma_start_tx(struct mpsc_port_info
*pi
)
522 struct mpsc_tx_desc
*txre
, *txre_p
;
524 /* If tx isn't running & there's a desc ready to go, start it */
525 if (!mpsc_sdma_tx_active(pi
)) {
526 txre
= (struct mpsc_tx_desc
*)(pi
->txr
527 + (pi
->txr_tail
* MPSC_TXRE_SIZE
));
528 dma_cache_sync(pi
->port
.dev
, (void *)txre
, MPSC_TXRE_SIZE
,
530 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
531 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
532 invalidate_dcache_range((ulong
)txre
,
533 (ulong
)txre
+ MPSC_TXRE_SIZE
);
536 if (be32_to_cpu(txre
->cmdstat
) & SDMA_DESC_CMDSTAT_O
) {
537 txre_p
= (struct mpsc_tx_desc
*)
538 (pi
->txr_p
+ (pi
->txr_tail
* MPSC_TXRE_SIZE
));
540 mpsc_sdma_set_tx_ring(pi
, txre_p
);
541 mpsc_sdma_cmd(pi
, SDMA_SDCM_STD
| SDMA_SDCM_TXD
);
546 static void mpsc_sdma_stop(struct mpsc_port_info
*pi
)
548 pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi
->port
.line
);
550 /* Abort any SDMA transfers */
551 mpsc_sdma_cmd(pi
, 0);
552 mpsc_sdma_cmd(pi
, SDMA_SDCM_AR
| SDMA_SDCM_AT
);
554 /* Clear the SDMA current and first TX and RX pointers */
555 mpsc_sdma_set_tx_ring(pi
, NULL
);
556 mpsc_sdma_set_rx_ring(pi
, NULL
);
558 /* Disable interrupts */
559 mpsc_sdma_intr_mask(pi
, 0xf);
560 mpsc_sdma_intr_ack(pi
);
564 ******************************************************************************
566 * Multi-Protocol Serial Controller Routines (MPSC)
568 ******************************************************************************
571 static void mpsc_hw_init(struct mpsc_port_info
*pi
)
575 pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi
->port
.line
);
577 /* Set up clock routing */
578 if (pi
->mirror_regs
) {
579 v
= pi
->shared_regs
->MPSC_MRR_m
;
581 pi
->shared_regs
->MPSC_MRR_m
= v
;
582 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_MRR
);
584 v
= pi
->shared_regs
->MPSC_RCRR_m
;
585 v
= (v
& ~0xf0f) | 0x100;
586 pi
->shared_regs
->MPSC_RCRR_m
= v
;
587 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_RCRR
);
589 v
= pi
->shared_regs
->MPSC_TCRR_m
;
590 v
= (v
& ~0xf0f) | 0x100;
591 pi
->shared_regs
->MPSC_TCRR_m
= v
;
592 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_TCRR
);
594 v
= readl(pi
->shared_regs
->mpsc_routing_base
+ MPSC_MRR
);
596 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_MRR
);
598 v
= readl(pi
->shared_regs
->mpsc_routing_base
+ MPSC_RCRR
);
599 v
= (v
& ~0xf0f) | 0x100;
600 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_RCRR
);
602 v
= readl(pi
->shared_regs
->mpsc_routing_base
+ MPSC_TCRR
);
603 v
= (v
& ~0xf0f) | 0x100;
604 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_TCRR
);
607 /* Put MPSC in UART mode & enabel Tx/Rx egines */
608 writel(0x000004c4, pi
->mpsc_base
+ MPSC_MMCRL
);
610 /* No preamble, 16x divider, low-latency, */
611 writel(0x04400400, pi
->mpsc_base
+ MPSC_MMCRH
);
612 mpsc_set_baudrate(pi
, pi
->default_baud
);
614 if (pi
->mirror_regs
) {
615 pi
->MPSC_CHR_1_m
= 0;
616 pi
->MPSC_CHR_2_m
= 0;
618 writel(0, pi
->mpsc_base
+ MPSC_CHR_1
);
619 writel(0, pi
->mpsc_base
+ MPSC_CHR_2
);
620 writel(pi
->mpsc_max_idle
, pi
->mpsc_base
+ MPSC_CHR_3
);
621 writel(0, pi
->mpsc_base
+ MPSC_CHR_4
);
622 writel(0, pi
->mpsc_base
+ MPSC_CHR_5
);
623 writel(0, pi
->mpsc_base
+ MPSC_CHR_6
);
624 writel(0, pi
->mpsc_base
+ MPSC_CHR_7
);
625 writel(0, pi
->mpsc_base
+ MPSC_CHR_8
);
626 writel(0, pi
->mpsc_base
+ MPSC_CHR_9
);
627 writel(0, pi
->mpsc_base
+ MPSC_CHR_10
);
630 static void mpsc_enter_hunt(struct mpsc_port_info
*pi
)
632 pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi
->port
.line
);
634 if (pi
->mirror_regs
) {
635 writel(pi
->MPSC_CHR_2_m
| MPSC_CHR_2_EH
,
636 pi
->mpsc_base
+ MPSC_CHR_2
);
637 /* Erratum prevents reading CHR_2 so just delay for a while */
640 writel(readl(pi
->mpsc_base
+ MPSC_CHR_2
) | MPSC_CHR_2_EH
,
641 pi
->mpsc_base
+ MPSC_CHR_2
);
643 while (readl(pi
->mpsc_base
+ MPSC_CHR_2
) & MPSC_CHR_2_EH
)
648 static void mpsc_freeze(struct mpsc_port_info
*pi
)
652 pr_debug("mpsc_freeze[%d]: Freezing\n", pi
->port
.line
);
654 v
= (pi
->mirror_regs
) ? pi
->MPSC_MPCR_m
:
655 readl(pi
->mpsc_base
+ MPSC_MPCR
);
660 writel(v
, pi
->mpsc_base
+ MPSC_MPCR
);
663 static void mpsc_unfreeze(struct mpsc_port_info
*pi
)
667 v
= (pi
->mirror_regs
) ? pi
->MPSC_MPCR_m
:
668 readl(pi
->mpsc_base
+ MPSC_MPCR
);
673 writel(v
, pi
->mpsc_base
+ MPSC_MPCR
);
675 pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi
->port
.line
);
678 static void mpsc_set_char_length(struct mpsc_port_info
*pi
, u32 len
)
682 pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi
->port
.line
,len
);
684 v
= (pi
->mirror_regs
) ? pi
->MPSC_MPCR_m
:
685 readl(pi
->mpsc_base
+ MPSC_MPCR
);
686 v
= (v
& ~(0x3 << 12)) | ((len
& 0x3) << 12);
690 writel(v
, pi
->mpsc_base
+ MPSC_MPCR
);
693 static void mpsc_set_stop_bit_length(struct mpsc_port_info
*pi
, u32 len
)
697 pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n",
700 v
= (pi
->mirror_regs
) ? pi
->MPSC_MPCR_m
:
701 readl(pi
->mpsc_base
+ MPSC_MPCR
);
703 v
= (v
& ~(1 << 14)) | ((len
& 0x1) << 14);
707 writel(v
, pi
->mpsc_base
+ MPSC_MPCR
);
710 static void mpsc_set_parity(struct mpsc_port_info
*pi
, u32 p
)
714 pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi
->port
.line
, p
);
716 v
= (pi
->mirror_regs
) ? pi
->MPSC_CHR_2_m
:
717 readl(pi
->mpsc_base
+ MPSC_CHR_2
);
720 v
= (v
& ~0xc000c) | (p
<< 18) | (p
<< 2);
723 pi
->MPSC_CHR_2_m
= v
;
724 writel(v
, pi
->mpsc_base
+ MPSC_CHR_2
);
728 ******************************************************************************
730 * Driver Init Routines
732 ******************************************************************************
735 static void mpsc_init_hw(struct mpsc_port_info
*pi
)
737 pr_debug("mpsc_init_hw[%d]: Initializing\n", pi
->port
.line
);
739 mpsc_brg_init(pi
, pi
->brg_clk_src
);
741 mpsc_sdma_init(pi
, dma_get_cache_alignment()); /* burst a cacheline */
746 static int mpsc_alloc_ring_mem(struct mpsc_port_info
*pi
)
750 pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n",
753 if (!pi
->dma_region
) {
754 if (!dma_set_mask(pi
->port
.dev
, 0xffffffff)) {
755 printk(KERN_ERR
"MPSC: Inadequate DMA support\n");
757 } else if ((pi
->dma_region
= dma_alloc_noncoherent(pi
->port
.dev
,
759 &pi
->dma_region_p
, GFP_KERNEL
))
761 printk(KERN_ERR
"MPSC: Can't alloc Desc region\n");
769 static void mpsc_free_ring_mem(struct mpsc_port_info
*pi
)
771 pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi
->port
.line
);
773 if (pi
->dma_region
) {
774 dma_free_noncoherent(pi
->port
.dev
, MPSC_DMA_ALLOC_SIZE
,
775 pi
->dma_region
, pi
->dma_region_p
);
776 pi
->dma_region
= NULL
;
777 pi
->dma_region_p
= (dma_addr_t
)NULL
;
781 static void mpsc_init_rings(struct mpsc_port_info
*pi
)
783 struct mpsc_rx_desc
*rxre
;
784 struct mpsc_tx_desc
*txre
;
789 pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi
->port
.line
);
791 BUG_ON(pi
->dma_region
== NULL
);
793 memset(pi
->dma_region
, 0, MPSC_DMA_ALLOC_SIZE
);
796 * Descriptors & buffers are multiples of cacheline size and must be
799 dp
= ALIGN((u32
)pi
->dma_region
, dma_get_cache_alignment());
800 dp_p
= ALIGN((u32
)pi
->dma_region_p
, dma_get_cache_alignment());
803 * Partition dma region into rx ring descriptor, rx buffers,
804 * tx ring descriptors, and tx buffers.
809 dp_p
+= MPSC_RXR_SIZE
;
812 pi
->rxb_p
= (u8
*)dp_p
;
814 dp_p
+= MPSC_RXB_SIZE
;
821 dp_p
+= MPSC_TXR_SIZE
;
824 pi
->txb_p
= (u8
*)dp_p
;
829 /* Init rx ring descriptors */
835 for (i
= 0; i
< MPSC_RXR_ENTRIES
; i
++) {
836 rxre
= (struct mpsc_rx_desc
*)dp
;
838 rxre
->bufsize
= cpu_to_be16(MPSC_RXBE_SIZE
);
839 rxre
->bytecnt
= cpu_to_be16(0);
840 rxre
->cmdstat
= cpu_to_be32(SDMA_DESC_CMDSTAT_O
841 | SDMA_DESC_CMDSTAT_EI
| SDMA_DESC_CMDSTAT_F
842 | SDMA_DESC_CMDSTAT_L
);
843 rxre
->link
= cpu_to_be32(dp_p
+ MPSC_RXRE_SIZE
);
844 rxre
->buf_ptr
= cpu_to_be32(bp_p
);
846 dp
+= MPSC_RXRE_SIZE
;
847 dp_p
+= MPSC_RXRE_SIZE
;
848 bp
+= MPSC_RXBE_SIZE
;
849 bp_p
+= MPSC_RXBE_SIZE
;
851 rxre
->link
= cpu_to_be32(pi
->rxr_p
); /* Wrap last back to first */
853 /* Init tx ring descriptors */
859 for (i
= 0; i
< MPSC_TXR_ENTRIES
; i
++) {
860 txre
= (struct mpsc_tx_desc
*)dp
;
862 txre
->link
= cpu_to_be32(dp_p
+ MPSC_TXRE_SIZE
);
863 txre
->buf_ptr
= cpu_to_be32(bp_p
);
865 dp
+= MPSC_TXRE_SIZE
;
866 dp_p
+= MPSC_TXRE_SIZE
;
867 bp
+= MPSC_TXBE_SIZE
;
868 bp_p
+= MPSC_TXBE_SIZE
;
870 txre
->link
= cpu_to_be32(pi
->txr_p
); /* Wrap last back to first */
872 dma_cache_sync(pi
->port
.dev
, (void *)pi
->dma_region
,
873 MPSC_DMA_ALLOC_SIZE
, DMA_BIDIRECTIONAL
);
874 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
875 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
876 flush_dcache_range((ulong
)pi
->dma_region
,
877 (ulong
)pi
->dma_region
878 + MPSC_DMA_ALLOC_SIZE
);
884 static void mpsc_uninit_rings(struct mpsc_port_info
*pi
)
886 pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi
->port
.line
);
888 BUG_ON(pi
->dma_region
== NULL
);
904 static int mpsc_make_ready(struct mpsc_port_info
*pi
)
908 pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi
->port
.line
);
912 rc
= mpsc_alloc_ring_mem(pi
);
922 #ifdef CONFIG_CONSOLE_POLL
923 static int serial_polled
;
927 ******************************************************************************
929 * Interrupt Handling Routines
931 ******************************************************************************
934 static int mpsc_rx_intr(struct mpsc_port_info
*pi
, unsigned long *flags
)
936 struct mpsc_rx_desc
*rxre
;
937 struct tty_port
*port
= &pi
->port
.state
->port
;
938 u32 cmdstat
, bytes_in
, i
;
941 char flag
= TTY_NORMAL
;
943 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi
->port
.line
);
945 rxre
= (struct mpsc_rx_desc
*)(pi
->rxr
+ (pi
->rxr_posn
*MPSC_RXRE_SIZE
));
947 dma_cache_sync(pi
->port
.dev
, (void *)rxre
, MPSC_RXRE_SIZE
,
949 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
950 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
951 invalidate_dcache_range((ulong
)rxre
,
952 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
956 * Loop through Rx descriptors handling ones that have been completed.
958 while (!((cmdstat
= be32_to_cpu(rxre
->cmdstat
))
959 & SDMA_DESC_CMDSTAT_O
)) {
960 bytes_in
= be16_to_cpu(rxre
->bytecnt
);
961 #ifdef CONFIG_CONSOLE_POLL
962 if (unlikely(serial_polled
)) {
967 /* Following use of tty struct directly is deprecated */
968 if (tty_buffer_request_room(port
, bytes_in
) < bytes_in
) {
969 if (port
->low_latency
) {
970 spin_unlock_irqrestore(&pi
->port
.lock
, *flags
);
971 tty_flip_buffer_push(port
);
972 spin_lock_irqsave(&pi
->port
.lock
, *flags
);
975 * If this failed then we will throw away the bytes
976 * but must do so to clear interrupts.
980 bp
= pi
->rxb
+ (pi
->rxr_posn
* MPSC_RXBE_SIZE
);
981 dma_cache_sync(pi
->port
.dev
, (void *)bp
, MPSC_RXBE_SIZE
,
983 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
984 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
985 invalidate_dcache_range((ulong
)bp
,
986 (ulong
)bp
+ MPSC_RXBE_SIZE
);
990 * Other than for parity error, the manual provides little
991 * info on what data will be in a frame flagged by any of
992 * these errors. For parity error, it is the last byte in
993 * the buffer that had the error. As for the rest, I guess
994 * we'll assume there is no data in the buffer.
995 * If there is...it gets lost.
997 if (unlikely(cmdstat
& (SDMA_DESC_CMDSTAT_BR
998 | SDMA_DESC_CMDSTAT_FR
999 | SDMA_DESC_CMDSTAT_OR
))) {
1001 pi
->port
.icount
.rx
++;
1003 if (cmdstat
& SDMA_DESC_CMDSTAT_BR
) { /* Break */
1004 pi
->port
.icount
.brk
++;
1006 if (uart_handle_break(&pi
->port
))
1008 } else if (cmdstat
& SDMA_DESC_CMDSTAT_FR
) {
1009 pi
->port
.icount
.frame
++;
1010 } else if (cmdstat
& SDMA_DESC_CMDSTAT_OR
) {
1011 pi
->port
.icount
.overrun
++;
1014 cmdstat
&= pi
->port
.read_status_mask
;
1016 if (cmdstat
& SDMA_DESC_CMDSTAT_BR
)
1018 else if (cmdstat
& SDMA_DESC_CMDSTAT_FR
)
1020 else if (cmdstat
& SDMA_DESC_CMDSTAT_OR
)
1022 else if (cmdstat
& SDMA_DESC_CMDSTAT_PE
)
1026 if (uart_handle_sysrq_char(&pi
->port
, *bp
)) {
1029 #ifdef CONFIG_CONSOLE_POLL
1030 if (unlikely(serial_polled
)) {
1038 if ((unlikely(cmdstat
& (SDMA_DESC_CMDSTAT_BR
1039 | SDMA_DESC_CMDSTAT_FR
1040 | SDMA_DESC_CMDSTAT_OR
)))
1041 && !(cmdstat
& pi
->port
.ignore_status_mask
)) {
1042 tty_insert_flip_char(port
, *bp
, flag
);
1044 for (i
=0; i
<bytes_in
; i
++)
1045 tty_insert_flip_char(port
, *bp
++, TTY_NORMAL
);
1047 pi
->port
.icount
.rx
+= bytes_in
;
1051 rxre
->bytecnt
= cpu_to_be16(0);
1053 rxre
->cmdstat
= cpu_to_be32(SDMA_DESC_CMDSTAT_O
1054 | SDMA_DESC_CMDSTAT_EI
| SDMA_DESC_CMDSTAT_F
1055 | SDMA_DESC_CMDSTAT_L
);
1057 dma_cache_sync(pi
->port
.dev
, (void *)rxre
, MPSC_RXRE_SIZE
,
1059 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1060 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1061 flush_dcache_range((ulong
)rxre
,
1062 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
1065 /* Advance to next descriptor */
1066 pi
->rxr_posn
= (pi
->rxr_posn
+ 1) & (MPSC_RXR_ENTRIES
- 1);
1067 rxre
= (struct mpsc_rx_desc
*)
1068 (pi
->rxr
+ (pi
->rxr_posn
* MPSC_RXRE_SIZE
));
1069 dma_cache_sync(pi
->port
.dev
, (void *)rxre
, MPSC_RXRE_SIZE
,
1071 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1072 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1073 invalidate_dcache_range((ulong
)rxre
,
1074 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
1079 /* Restart rx engine, if its stopped */
1080 if ((readl(pi
->sdma_base
+ SDMA_SDCM
) & SDMA_SDCM_ERD
) == 0)
1083 spin_unlock_irqrestore(&pi
->port
.lock
, *flags
);
1084 tty_flip_buffer_push(port
);
1085 spin_lock_irqsave(&pi
->port
.lock
, *flags
);
1089 static void mpsc_setup_tx_desc(struct mpsc_port_info
*pi
, u32 count
, u32 intr
)
1091 struct mpsc_tx_desc
*txre
;
1093 txre
= (struct mpsc_tx_desc
*)(pi
->txr
1094 + (pi
->txr_head
* MPSC_TXRE_SIZE
));
1096 txre
->bytecnt
= cpu_to_be16(count
);
1097 txre
->shadow
= txre
->bytecnt
;
1098 wmb(); /* ensure cmdstat is last field updated */
1099 txre
->cmdstat
= cpu_to_be32(SDMA_DESC_CMDSTAT_O
| SDMA_DESC_CMDSTAT_F
1100 | SDMA_DESC_CMDSTAT_L
1101 | ((intr
) ? SDMA_DESC_CMDSTAT_EI
: 0));
1103 dma_cache_sync(pi
->port
.dev
, (void *)txre
, MPSC_TXRE_SIZE
,
1105 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1106 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1107 flush_dcache_range((ulong
)txre
,
1108 (ulong
)txre
+ MPSC_TXRE_SIZE
);
1112 static void mpsc_copy_tx_data(struct mpsc_port_info
*pi
)
1114 struct circ_buf
*xmit
= &pi
->port
.state
->xmit
;
1118 /* Make sure the desc ring isn't full */
1119 while (CIRC_CNT(pi
->txr_head
, pi
->txr_tail
, MPSC_TXR_ENTRIES
)
1120 < (MPSC_TXR_ENTRIES
- 1)) {
1121 if (pi
->port
.x_char
) {
1123 * Ideally, we should use the TCS field in
1124 * CHR_1 to put the x_char out immediately but
1125 * errata prevents us from being able to read
1126 * CHR_2 to know that its safe to write to
1127 * CHR_1. Instead, just put it in-band with
1128 * all the other Tx data.
1130 bp
= pi
->txb
+ (pi
->txr_head
* MPSC_TXBE_SIZE
);
1131 *bp
= pi
->port
.x_char
;
1132 pi
->port
.x_char
= 0;
1134 } else if (!uart_circ_empty(xmit
)
1135 && !uart_tx_stopped(&pi
->port
)) {
1136 i
= min((u32
)MPSC_TXBE_SIZE
,
1137 (u32
)uart_circ_chars_pending(xmit
));
1138 i
= min(i
, (u32
)CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
,
1140 bp
= pi
->txb
+ (pi
->txr_head
* MPSC_TXBE_SIZE
);
1141 memcpy(bp
, &xmit
->buf
[xmit
->tail
], i
);
1142 xmit
->tail
= (xmit
->tail
+ i
) & (UART_XMIT_SIZE
- 1);
1144 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
1145 uart_write_wakeup(&pi
->port
);
1146 } else { /* All tx data copied into ring bufs */
1150 dma_cache_sync(pi
->port
.dev
, (void *)bp
, MPSC_TXBE_SIZE
,
1152 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1153 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1154 flush_dcache_range((ulong
)bp
,
1155 (ulong
)bp
+ MPSC_TXBE_SIZE
);
1157 mpsc_setup_tx_desc(pi
, i
, 1);
1159 /* Advance to next descriptor */
1160 pi
->txr_head
= (pi
->txr_head
+ 1) & (MPSC_TXR_ENTRIES
- 1);
1164 static int mpsc_tx_intr(struct mpsc_port_info
*pi
)
1166 struct mpsc_tx_desc
*txre
;
1168 unsigned long iflags
;
1170 spin_lock_irqsave(&pi
->tx_lock
, iflags
);
1172 if (!mpsc_sdma_tx_active(pi
)) {
1173 txre
= (struct mpsc_tx_desc
*)(pi
->txr
1174 + (pi
->txr_tail
* MPSC_TXRE_SIZE
));
1176 dma_cache_sync(pi
->port
.dev
, (void *)txre
, MPSC_TXRE_SIZE
,
1178 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1179 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1180 invalidate_dcache_range((ulong
)txre
,
1181 (ulong
)txre
+ MPSC_TXRE_SIZE
);
1184 while (!(be32_to_cpu(txre
->cmdstat
) & SDMA_DESC_CMDSTAT_O
)) {
1186 pi
->port
.icount
.tx
+= be16_to_cpu(txre
->bytecnt
);
1187 pi
->txr_tail
= (pi
->txr_tail
+1) & (MPSC_TXR_ENTRIES
-1);
1189 /* If no more data to tx, fall out of loop */
1190 if (pi
->txr_head
== pi
->txr_tail
)
1193 txre
= (struct mpsc_tx_desc
*)(pi
->txr
1194 + (pi
->txr_tail
* MPSC_TXRE_SIZE
));
1195 dma_cache_sync(pi
->port
.dev
, (void *)txre
,
1196 MPSC_TXRE_SIZE
, DMA_FROM_DEVICE
);
1197 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1198 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1199 invalidate_dcache_range((ulong
)txre
,
1200 (ulong
)txre
+ MPSC_TXRE_SIZE
);
1204 mpsc_copy_tx_data(pi
);
1205 mpsc_sdma_start_tx(pi
); /* start next desc if ready */
1208 spin_unlock_irqrestore(&pi
->tx_lock
, iflags
);
1213 * This is the driver's interrupt handler. To avoid a race, we first clear
1214 * the interrupt, then handle any completed Rx/Tx descriptors. When done
1215 * handling those descriptors, we restart the Rx/Tx engines if they're stopped.
1217 static irqreturn_t
mpsc_sdma_intr(int irq
, void *dev_id
)
1219 struct mpsc_port_info
*pi
= dev_id
;
1223 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi
->port
.line
);
1225 spin_lock_irqsave(&pi
->port
.lock
, iflags
);
1226 mpsc_sdma_intr_ack(pi
);
1227 if (mpsc_rx_intr(pi
, &iflags
))
1229 if (mpsc_tx_intr(pi
))
1231 spin_unlock_irqrestore(&pi
->port
.lock
, iflags
);
1233 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi
->port
.line
);
1238 ******************************************************************************
1240 * serial_core.c Interface routines
1242 ******************************************************************************
1244 static uint
mpsc_tx_empty(struct uart_port
*port
)
1246 struct mpsc_port_info
*pi
=
1247 container_of(port
, struct mpsc_port_info
, port
);
1251 spin_lock_irqsave(&pi
->port
.lock
, iflags
);
1252 rc
= mpsc_sdma_tx_active(pi
) ? 0 : TIOCSER_TEMT
;
1253 spin_unlock_irqrestore(&pi
->port
.lock
, iflags
);
1258 static void mpsc_set_mctrl(struct uart_port
*port
, uint mctrl
)
1260 /* Have no way to set modem control lines AFAICT */
1263 static uint
mpsc_get_mctrl(struct uart_port
*port
)
1265 struct mpsc_port_info
*pi
=
1266 container_of(port
, struct mpsc_port_info
, port
);
1269 status
= (pi
->mirror_regs
) ? pi
->MPSC_CHR_10_m
1270 : readl(pi
->mpsc_base
+ MPSC_CHR_10
);
1274 mflags
|= TIOCM_CTS
;
1276 mflags
|= TIOCM_CAR
;
1278 return mflags
| TIOCM_DSR
; /* No way to tell if DSR asserted */
1281 static void mpsc_stop_tx(struct uart_port
*port
)
1283 struct mpsc_port_info
*pi
=
1284 container_of(port
, struct mpsc_port_info
, port
);
1286 pr_debug("mpsc_stop_tx[%d]\n", port
->line
);
1291 static void mpsc_start_tx(struct uart_port
*port
)
1293 struct mpsc_port_info
*pi
=
1294 container_of(port
, struct mpsc_port_info
, port
);
1295 unsigned long iflags
;
1297 spin_lock_irqsave(&pi
->tx_lock
, iflags
);
1300 mpsc_copy_tx_data(pi
);
1301 mpsc_sdma_start_tx(pi
);
1303 spin_unlock_irqrestore(&pi
->tx_lock
, iflags
);
1305 pr_debug("mpsc_start_tx[%d]\n", port
->line
);
1308 static void mpsc_start_rx(struct mpsc_port_info
*pi
)
1310 pr_debug("mpsc_start_rx[%d]: Starting...\n", pi
->port
.line
);
1313 mpsc_enter_hunt(pi
);
1314 mpsc_sdma_cmd(pi
, SDMA_SDCM_ERD
);
1318 static void mpsc_stop_rx(struct uart_port
*port
)
1320 struct mpsc_port_info
*pi
=
1321 container_of(port
, struct mpsc_port_info
, port
);
1323 pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port
->line
);
1325 if (pi
->mirror_regs
) {
1326 writel(pi
->MPSC_CHR_2_m
| MPSC_CHR_2_RA
,
1327 pi
->mpsc_base
+ MPSC_CHR_2
);
1328 /* Erratum prevents reading CHR_2 so just delay for a while */
1331 writel(readl(pi
->mpsc_base
+ MPSC_CHR_2
) | MPSC_CHR_2_RA
,
1332 pi
->mpsc_base
+ MPSC_CHR_2
);
1334 while (readl(pi
->mpsc_base
+ MPSC_CHR_2
) & MPSC_CHR_2_RA
)
1338 mpsc_sdma_cmd(pi
, SDMA_SDCM_AR
);
1341 static void mpsc_break_ctl(struct uart_port
*port
, int ctl
)
1343 struct mpsc_port_info
*pi
=
1344 container_of(port
, struct mpsc_port_info
, port
);
1348 v
= ctl
? 0x00ff0000 : 0;
1350 spin_lock_irqsave(&pi
->port
.lock
, flags
);
1351 if (pi
->mirror_regs
)
1352 pi
->MPSC_CHR_1_m
= v
;
1353 writel(v
, pi
->mpsc_base
+ MPSC_CHR_1
);
1354 spin_unlock_irqrestore(&pi
->port
.lock
, flags
);
1357 static int mpsc_startup(struct uart_port
*port
)
1359 struct mpsc_port_info
*pi
=
1360 container_of(port
, struct mpsc_port_info
, port
);
1364 pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n",
1365 port
->line
, pi
->port
.irq
);
1367 if ((rc
= mpsc_make_ready(pi
)) == 0) {
1368 /* Setup IRQ handler */
1369 mpsc_sdma_intr_ack(pi
);
1371 /* If irq's are shared, need to set flag */
1372 if (mpsc_ports
[0].port
.irq
== mpsc_ports
[1].port
.irq
)
1375 if (request_irq(pi
->port
.irq
, mpsc_sdma_intr
, flag
,
1377 printk(KERN_ERR
"MPSC: Can't get SDMA IRQ %d\n",
1380 mpsc_sdma_intr_unmask(pi
, 0xf);
1381 mpsc_sdma_set_rx_ring(pi
, (struct mpsc_rx_desc
*)(pi
->rxr_p
1382 + (pi
->rxr_posn
* MPSC_RXRE_SIZE
)));
1388 static void mpsc_shutdown(struct uart_port
*port
)
1390 struct mpsc_port_info
*pi
=
1391 container_of(port
, struct mpsc_port_info
, port
);
1393 pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port
->line
);
1396 free_irq(pi
->port
.irq
, pi
);
1399 static void mpsc_set_termios(struct uart_port
*port
, struct ktermios
*termios
,
1400 struct ktermios
*old
)
1402 struct mpsc_port_info
*pi
=
1403 container_of(port
, struct mpsc_port_info
, port
);
1406 u32 chr_bits
, stop_bits
, par
;
1408 switch (termios
->c_cflag
& CSIZE
) {
1410 chr_bits
= MPSC_MPCR_CL_5
;
1413 chr_bits
= MPSC_MPCR_CL_6
;
1416 chr_bits
= MPSC_MPCR_CL_7
;
1420 chr_bits
= MPSC_MPCR_CL_8
;
1424 if (termios
->c_cflag
& CSTOPB
)
1425 stop_bits
= MPSC_MPCR_SBL_2
;
1427 stop_bits
= MPSC_MPCR_SBL_1
;
1429 par
= MPSC_CHR_2_PAR_EVEN
;
1430 if (termios
->c_cflag
& PARENB
)
1431 if (termios
->c_cflag
& PARODD
)
1432 par
= MPSC_CHR_2_PAR_ODD
;
1434 if (termios
->c_cflag
& CMSPAR
) {
1435 if (termios
->c_cflag
& PARODD
)
1436 par
= MPSC_CHR_2_PAR_MARK
;
1438 par
= MPSC_CHR_2_PAR_SPACE
;
1442 baud
= uart_get_baud_rate(port
, termios
, old
, 0, port
->uartclk
);
1444 spin_lock_irqsave(&pi
->port
.lock
, flags
);
1446 uart_update_timeout(port
, termios
->c_cflag
, baud
);
1448 mpsc_set_char_length(pi
, chr_bits
);
1449 mpsc_set_stop_bit_length(pi
, stop_bits
);
1450 mpsc_set_parity(pi
, par
);
1451 mpsc_set_baudrate(pi
, baud
);
1453 /* Characters/events to read */
1454 pi
->port
.read_status_mask
= SDMA_DESC_CMDSTAT_OR
;
1456 if (termios
->c_iflag
& INPCK
)
1457 pi
->port
.read_status_mask
|= SDMA_DESC_CMDSTAT_PE
1458 | SDMA_DESC_CMDSTAT_FR
;
1460 if (termios
->c_iflag
& (IGNBRK
| BRKINT
| PARMRK
))
1461 pi
->port
.read_status_mask
|= SDMA_DESC_CMDSTAT_BR
;
1463 /* Characters/events to ignore */
1464 pi
->port
.ignore_status_mask
= 0;
1466 if (termios
->c_iflag
& IGNPAR
)
1467 pi
->port
.ignore_status_mask
|= SDMA_DESC_CMDSTAT_PE
1468 | SDMA_DESC_CMDSTAT_FR
;
1470 if (termios
->c_iflag
& IGNBRK
) {
1471 pi
->port
.ignore_status_mask
|= SDMA_DESC_CMDSTAT_BR
;
1473 if (termios
->c_iflag
& IGNPAR
)
1474 pi
->port
.ignore_status_mask
|= SDMA_DESC_CMDSTAT_OR
;
1477 if ((termios
->c_cflag
& CREAD
)) {
1478 if (!pi
->rcv_data
) {
1482 } else if (pi
->rcv_data
) {
1487 spin_unlock_irqrestore(&pi
->port
.lock
, flags
);
1490 static const char *mpsc_type(struct uart_port
*port
)
1492 pr_debug("mpsc_type[%d]: port type: %s\n", port
->line
,MPSC_DRIVER_NAME
);
1493 return MPSC_DRIVER_NAME
;
1496 static int mpsc_request_port(struct uart_port
*port
)
1498 /* Should make chip/platform specific call */
1502 static void mpsc_release_port(struct uart_port
*port
)
1504 struct mpsc_port_info
*pi
=
1505 container_of(port
, struct mpsc_port_info
, port
);
1508 mpsc_uninit_rings(pi
);
1509 mpsc_free_ring_mem(pi
);
1514 static void mpsc_config_port(struct uart_port
*port
, int flags
)
1518 static int mpsc_verify_port(struct uart_port
*port
, struct serial_struct
*ser
)
1520 struct mpsc_port_info
*pi
=
1521 container_of(port
, struct mpsc_port_info
, port
);
1524 pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi
->port
.line
);
1526 if (ser
->type
!= PORT_UNKNOWN
&& ser
->type
!= PORT_MPSC
)
1528 else if (pi
->port
.irq
!= ser
->irq
)
1530 else if (ser
->io_type
!= SERIAL_IO_MEM
)
1532 else if (pi
->port
.uartclk
/ 16 != ser
->baud_base
) /* Not sure */
1534 else if ((void *)pi
->port
.mapbase
!= ser
->iomem_base
)
1536 else if (pi
->port
.iobase
!= ser
->port
)
1538 else if (ser
->hub6
!= 0)
1543 #ifdef CONFIG_CONSOLE_POLL
1544 /* Serial polling routines for writing and reading from the uart while
1545 * in an interrupt or debug context.
1548 static char poll_buf
[2048];
1549 static int poll_ptr
;
1550 static int poll_cnt
;
1551 static void mpsc_put_poll_char(struct uart_port
*port
,
1554 static int mpsc_get_poll_char(struct uart_port
*port
)
1556 struct mpsc_port_info
*pi
=
1557 container_of(port
, struct mpsc_port_info
, port
);
1558 struct mpsc_rx_desc
*rxre
;
1559 u32 cmdstat
, bytes_in
, i
;
1565 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi
->port
.line
);
1569 return poll_buf
[poll_ptr
++];
1574 while (poll_cnt
== 0) {
1575 rxre
= (struct mpsc_rx_desc
*)(pi
->rxr
+
1576 (pi
->rxr_posn
*MPSC_RXRE_SIZE
));
1577 dma_cache_sync(pi
->port
.dev
, (void *)rxre
,
1578 MPSC_RXRE_SIZE
, DMA_FROM_DEVICE
);
1579 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1580 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1581 invalidate_dcache_range((ulong
)rxre
,
1582 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
1585 * Loop through Rx descriptors handling ones that have
1588 while (poll_cnt
== 0 &&
1589 !((cmdstat
= be32_to_cpu(rxre
->cmdstat
)) &
1590 SDMA_DESC_CMDSTAT_O
)){
1591 bytes_in
= be16_to_cpu(rxre
->bytecnt
);
1592 bp
= pi
->rxb
+ (pi
->rxr_posn
* MPSC_RXBE_SIZE
);
1593 dma_cache_sync(pi
->port
.dev
, (void *) bp
,
1594 MPSC_RXBE_SIZE
, DMA_FROM_DEVICE
);
1595 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1596 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1597 invalidate_dcache_range((ulong
)bp
,
1598 (ulong
)bp
+ MPSC_RXBE_SIZE
);
1600 if ((unlikely(cmdstat
& (SDMA_DESC_CMDSTAT_BR
|
1601 SDMA_DESC_CMDSTAT_FR
| SDMA_DESC_CMDSTAT_OR
))) &&
1602 !(cmdstat
& pi
->port
.ignore_status_mask
)) {
1603 poll_buf
[poll_cnt
] = *bp
;
1606 for (i
= 0; i
< bytes_in
; i
++) {
1607 poll_buf
[poll_cnt
] = *bp
++;
1610 pi
->port
.icount
.rx
+= bytes_in
;
1612 rxre
->bytecnt
= cpu_to_be16(0);
1614 rxre
->cmdstat
= cpu_to_be32(SDMA_DESC_CMDSTAT_O
|
1615 SDMA_DESC_CMDSTAT_EI
|
1616 SDMA_DESC_CMDSTAT_F
|
1617 SDMA_DESC_CMDSTAT_L
);
1619 dma_cache_sync(pi
->port
.dev
, (void *)rxre
,
1620 MPSC_RXRE_SIZE
, DMA_BIDIRECTIONAL
);
1621 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1622 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1623 flush_dcache_range((ulong
)rxre
,
1624 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
1627 /* Advance to next descriptor */
1628 pi
->rxr_posn
= (pi
->rxr_posn
+ 1) &
1629 (MPSC_RXR_ENTRIES
- 1);
1630 rxre
= (struct mpsc_rx_desc
*)(pi
->rxr
+
1631 (pi
->rxr_posn
* MPSC_RXRE_SIZE
));
1632 dma_cache_sync(pi
->port
.dev
, (void *)rxre
,
1633 MPSC_RXRE_SIZE
, DMA_FROM_DEVICE
);
1634 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1635 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1636 invalidate_dcache_range((ulong
)rxre
,
1637 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
1641 /* Restart rx engine, if its stopped */
1642 if ((readl(pi
->sdma_base
+ SDMA_SDCM
) & SDMA_SDCM_ERD
) == 0)
1647 return poll_buf
[poll_ptr
++];
1654 static void mpsc_put_poll_char(struct uart_port
*port
,
1657 struct mpsc_port_info
*pi
=
1658 container_of(port
, struct mpsc_port_info
, port
);
1661 data
= readl(pi
->mpsc_base
+ MPSC_MPCR
);
1662 writeb(c
, pi
->mpsc_base
+ MPSC_CHR_1
);
1664 data
= readl(pi
->mpsc_base
+ MPSC_CHR_2
);
1665 data
|= MPSC_CHR_2_TTCS
;
1666 writel(data
, pi
->mpsc_base
+ MPSC_CHR_2
);
1669 while (readl(pi
->mpsc_base
+ MPSC_CHR_2
) & MPSC_CHR_2_TTCS
);
1673 static struct uart_ops mpsc_pops
= {
1674 .tx_empty
= mpsc_tx_empty
,
1675 .set_mctrl
= mpsc_set_mctrl
,
1676 .get_mctrl
= mpsc_get_mctrl
,
1677 .stop_tx
= mpsc_stop_tx
,
1678 .start_tx
= mpsc_start_tx
,
1679 .stop_rx
= mpsc_stop_rx
,
1680 .break_ctl
= mpsc_break_ctl
,
1681 .startup
= mpsc_startup
,
1682 .shutdown
= mpsc_shutdown
,
1683 .set_termios
= mpsc_set_termios
,
1685 .release_port
= mpsc_release_port
,
1686 .request_port
= mpsc_request_port
,
1687 .config_port
= mpsc_config_port
,
1688 .verify_port
= mpsc_verify_port
,
1689 #ifdef CONFIG_CONSOLE_POLL
1690 .poll_get_char
= mpsc_get_poll_char
,
1691 .poll_put_char
= mpsc_put_poll_char
,
1696 ******************************************************************************
1698 * Console Interface Routines
1700 ******************************************************************************
1703 #ifdef CONFIG_SERIAL_MPSC_CONSOLE
1704 static void mpsc_console_write(struct console
*co
, const char *s
, uint count
)
1706 struct mpsc_port_info
*pi
= &mpsc_ports
[co
->index
];
1707 u8
*bp
, *dp
, add_cr
= 0;
1709 unsigned long iflags
;
1711 spin_lock_irqsave(&pi
->tx_lock
, iflags
);
1713 while (pi
->txr_head
!= pi
->txr_tail
) {
1714 while (mpsc_sdma_tx_active(pi
))
1716 mpsc_sdma_intr_ack(pi
);
1720 while (mpsc_sdma_tx_active(pi
))
1724 bp
= dp
= pi
->txb
+ (pi
->txr_head
* MPSC_TXBE_SIZE
);
1726 for (i
= 0; i
< MPSC_TXBE_SIZE
; i
++) {
1736 if (*(s
++) == '\n') { /* add '\r' after '\n' */
1745 dma_cache_sync(pi
->port
.dev
, (void *)bp
, MPSC_TXBE_SIZE
,
1747 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1748 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1749 flush_dcache_range((ulong
)bp
,
1750 (ulong
)bp
+ MPSC_TXBE_SIZE
);
1752 mpsc_setup_tx_desc(pi
, i
, 0);
1753 pi
->txr_head
= (pi
->txr_head
+ 1) & (MPSC_TXR_ENTRIES
- 1);
1754 mpsc_sdma_start_tx(pi
);
1756 while (mpsc_sdma_tx_active(pi
))
1759 pi
->txr_tail
= (pi
->txr_tail
+ 1) & (MPSC_TXR_ENTRIES
- 1);
1762 spin_unlock_irqrestore(&pi
->tx_lock
, iflags
);
1765 static int __init
mpsc_console_setup(struct console
*co
, char *options
)
1767 struct mpsc_port_info
*pi
;
1768 int baud
, bits
, parity
, flow
;
1770 pr_debug("mpsc_console_setup[%d]: options: %s\n", co
->index
, options
);
1772 if (co
->index
>= MPSC_NUM_CTLRS
)
1775 pi
= &mpsc_ports
[co
->index
];
1777 baud
= pi
->default_baud
;
1778 bits
= pi
->default_bits
;
1779 parity
= pi
->default_parity
;
1780 flow
= pi
->default_flow
;
1785 spin_lock_init(&pi
->port
.lock
); /* Temporary fix--copied from 8250.c */
1788 uart_parse_options(options
, &baud
, &parity
, &bits
, &flow
);
1790 return uart_set_options(&pi
->port
, co
, baud
, parity
, bits
, flow
);
1793 static struct console mpsc_console
= {
1794 .name
= MPSC_DEV_NAME
,
1795 .write
= mpsc_console_write
,
1796 .device
= uart_console_device
,
1797 .setup
= mpsc_console_setup
,
1798 .flags
= CON_PRINTBUFFER
,
1803 static int __init
mpsc_late_console_init(void)
1805 pr_debug("mpsc_late_console_init: Enter\n");
1807 if (!(mpsc_console
.flags
& CON_ENABLED
))
1808 register_console(&mpsc_console
);
1812 late_initcall(mpsc_late_console_init
);
1814 #define MPSC_CONSOLE &mpsc_console
1816 #define MPSC_CONSOLE NULL
1819 ******************************************************************************
1821 * Dummy Platform Driver to extract & map shared register regions
1823 ******************************************************************************
1825 static void mpsc_resource_err(char *s
)
1827 printk(KERN_WARNING
"MPSC: Platform device resource error in %s\n", s
);
1830 static int mpsc_shared_map_regs(struct platform_device
*pd
)
1834 if ((r
= platform_get_resource(pd
, IORESOURCE_MEM
,
1835 MPSC_ROUTING_BASE_ORDER
))
1836 && request_mem_region(r
->start
,
1837 MPSC_ROUTING_REG_BLOCK_SIZE
,
1838 "mpsc_routing_regs")) {
1839 mpsc_shared_regs
.mpsc_routing_base
= ioremap(r
->start
,
1840 MPSC_ROUTING_REG_BLOCK_SIZE
);
1841 mpsc_shared_regs
.mpsc_routing_base_p
= r
->start
;
1843 mpsc_resource_err("MPSC routing base");
1847 if ((r
= platform_get_resource(pd
, IORESOURCE_MEM
,
1848 MPSC_SDMA_INTR_BASE_ORDER
))
1849 && request_mem_region(r
->start
,
1850 MPSC_SDMA_INTR_REG_BLOCK_SIZE
,
1851 "sdma_intr_regs")) {
1852 mpsc_shared_regs
.sdma_intr_base
= ioremap(r
->start
,
1853 MPSC_SDMA_INTR_REG_BLOCK_SIZE
);
1854 mpsc_shared_regs
.sdma_intr_base_p
= r
->start
;
1856 iounmap(mpsc_shared_regs
.mpsc_routing_base
);
1857 release_mem_region(mpsc_shared_regs
.mpsc_routing_base_p
,
1858 MPSC_ROUTING_REG_BLOCK_SIZE
);
1859 mpsc_resource_err("SDMA intr base");
1866 static void mpsc_shared_unmap_regs(void)
1868 if (mpsc_shared_regs
.mpsc_routing_base
) {
1869 iounmap(mpsc_shared_regs
.mpsc_routing_base
);
1870 release_mem_region(mpsc_shared_regs
.mpsc_routing_base_p
,
1871 MPSC_ROUTING_REG_BLOCK_SIZE
);
1873 if (mpsc_shared_regs
.sdma_intr_base
) {
1874 iounmap(mpsc_shared_regs
.sdma_intr_base
);
1875 release_mem_region(mpsc_shared_regs
.sdma_intr_base_p
,
1876 MPSC_SDMA_INTR_REG_BLOCK_SIZE
);
1879 mpsc_shared_regs
.mpsc_routing_base
= NULL
;
1880 mpsc_shared_regs
.sdma_intr_base
= NULL
;
1882 mpsc_shared_regs
.mpsc_routing_base_p
= 0;
1883 mpsc_shared_regs
.sdma_intr_base_p
= 0;
1886 static int mpsc_shared_drv_probe(struct platform_device
*dev
)
1888 struct mpsc_shared_pdata
*pdata
;
1894 rc
= mpsc_shared_map_regs(dev
);
1898 pdata
= dev_get_platdata(&dev
->dev
);
1900 mpsc_shared_regs
.MPSC_MRR_m
= pdata
->mrr_val
;
1901 mpsc_shared_regs
.MPSC_RCRR_m
= pdata
->rcrr_val
;
1902 mpsc_shared_regs
.MPSC_TCRR_m
= pdata
->tcrr_val
;
1903 mpsc_shared_regs
.SDMA_INTR_CAUSE_m
= pdata
->intr_cause_val
;
1904 mpsc_shared_regs
.SDMA_INTR_MASK_m
= pdata
->intr_mask_val
;
1909 static int mpsc_shared_drv_remove(struct platform_device
*dev
)
1914 mpsc_shared_unmap_regs();
1915 mpsc_shared_regs
.MPSC_MRR_m
= 0;
1916 mpsc_shared_regs
.MPSC_RCRR_m
= 0;
1917 mpsc_shared_regs
.MPSC_TCRR_m
= 0;
1918 mpsc_shared_regs
.SDMA_INTR_CAUSE_m
= 0;
1919 mpsc_shared_regs
.SDMA_INTR_MASK_m
= 0;
1924 static struct platform_driver mpsc_shared_driver
= {
1925 .probe
= mpsc_shared_drv_probe
,
1926 .remove
= mpsc_shared_drv_remove
,
1928 .name
= MPSC_SHARED_NAME
,
1933 ******************************************************************************
1935 * Driver Interface Routines
1937 ******************************************************************************
1939 static struct uart_driver mpsc_reg
= {
1940 .owner
= THIS_MODULE
,
1941 .driver_name
= MPSC_DRIVER_NAME
,
1942 .dev_name
= MPSC_DEV_NAME
,
1943 .major
= MPSC_MAJOR
,
1944 .minor
= MPSC_MINOR_START
,
1945 .nr
= MPSC_NUM_CTLRS
,
1946 .cons
= MPSC_CONSOLE
,
1949 static int mpsc_drv_map_regs(struct mpsc_port_info
*pi
,
1950 struct platform_device
*pd
)
1954 if ((r
= platform_get_resource(pd
, IORESOURCE_MEM
, MPSC_BASE_ORDER
))
1955 && request_mem_region(r
->start
, MPSC_REG_BLOCK_SIZE
,
1957 pi
->mpsc_base
= ioremap(r
->start
, MPSC_REG_BLOCK_SIZE
);
1958 pi
->mpsc_base_p
= r
->start
;
1960 mpsc_resource_err("MPSC base");
1964 if ((r
= platform_get_resource(pd
, IORESOURCE_MEM
,
1965 MPSC_SDMA_BASE_ORDER
))
1966 && request_mem_region(r
->start
,
1967 MPSC_SDMA_REG_BLOCK_SIZE
, "sdma_regs")) {
1968 pi
->sdma_base
= ioremap(r
->start
,MPSC_SDMA_REG_BLOCK_SIZE
);
1969 pi
->sdma_base_p
= r
->start
;
1971 mpsc_resource_err("SDMA base");
1975 if ((r
= platform_get_resource(pd
,IORESOURCE_MEM
,MPSC_BRG_BASE_ORDER
))
1976 && request_mem_region(r
->start
,
1977 MPSC_BRG_REG_BLOCK_SIZE
, "brg_regs")) {
1978 pi
->brg_base
= ioremap(r
->start
, MPSC_BRG_REG_BLOCK_SIZE
);
1979 pi
->brg_base_p
= r
->start
;
1981 mpsc_resource_err("BRG base");
1987 if (pi
->sdma_base
) {
1988 iounmap(pi
->sdma_base
);
1989 pi
->sdma_base
= NULL
;
1991 if (pi
->mpsc_base
) {
1992 iounmap(pi
->mpsc_base
);
1993 pi
->mpsc_base
= NULL
;
1998 static void mpsc_drv_unmap_regs(struct mpsc_port_info
*pi
)
2000 if (pi
->mpsc_base
) {
2001 iounmap(pi
->mpsc_base
);
2002 release_mem_region(pi
->mpsc_base_p
, MPSC_REG_BLOCK_SIZE
);
2004 if (pi
->sdma_base
) {
2005 iounmap(pi
->sdma_base
);
2006 release_mem_region(pi
->sdma_base_p
, MPSC_SDMA_REG_BLOCK_SIZE
);
2009 iounmap(pi
->brg_base
);
2010 release_mem_region(pi
->brg_base_p
, MPSC_BRG_REG_BLOCK_SIZE
);
2013 pi
->mpsc_base
= NULL
;
2014 pi
->sdma_base
= NULL
;
2015 pi
->brg_base
= NULL
;
2017 pi
->mpsc_base_p
= 0;
2018 pi
->sdma_base_p
= 0;
2022 static void mpsc_drv_get_platform_data(struct mpsc_port_info
*pi
,
2023 struct platform_device
*pd
, int num
)
2025 struct mpsc_pdata
*pdata
;
2027 pdata
= dev_get_platdata(&pd
->dev
);
2029 pi
->port
.uartclk
= pdata
->brg_clk_freq
;
2030 pi
->port
.iotype
= UPIO_MEM
;
2031 pi
->port
.line
= num
;
2032 pi
->port
.type
= PORT_MPSC
;
2033 pi
->port
.fifosize
= MPSC_TXBE_SIZE
;
2034 pi
->port
.membase
= pi
->mpsc_base
;
2035 pi
->port
.mapbase
= (ulong
)pi
->mpsc_base
;
2036 pi
->port
.ops
= &mpsc_pops
;
2038 pi
->mirror_regs
= pdata
->mirror_regs
;
2039 pi
->cache_mgmt
= pdata
->cache_mgmt
;
2040 pi
->brg_can_tune
= pdata
->brg_can_tune
;
2041 pi
->brg_clk_src
= pdata
->brg_clk_src
;
2042 pi
->mpsc_max_idle
= pdata
->max_idle
;
2043 pi
->default_baud
= pdata
->default_baud
;
2044 pi
->default_bits
= pdata
->default_bits
;
2045 pi
->default_parity
= pdata
->default_parity
;
2046 pi
->default_flow
= pdata
->default_flow
;
2048 /* Initial values of mirrored regs */
2049 pi
->MPSC_CHR_1_m
= pdata
->chr_1_val
;
2050 pi
->MPSC_CHR_2_m
= pdata
->chr_2_val
;
2051 pi
->MPSC_CHR_10_m
= pdata
->chr_10_val
;
2052 pi
->MPSC_MPCR_m
= pdata
->mpcr_val
;
2053 pi
->BRG_BCR_m
= pdata
->bcr_val
;
2055 pi
->shared_regs
= &mpsc_shared_regs
;
2057 pi
->port
.irq
= platform_get_irq(pd
, 0);
2060 static int mpsc_drv_probe(struct platform_device
*dev
)
2062 struct mpsc_port_info
*pi
;
2065 dev_dbg(&dev
->dev
, "mpsc_drv_probe: Adding MPSC %d\n", dev
->id
);
2067 if (dev
->id
>= MPSC_NUM_CTLRS
)
2070 pi
= &mpsc_ports
[dev
->id
];
2072 rc
= mpsc_drv_map_regs(pi
, dev
);
2076 mpsc_drv_get_platform_data(pi
, dev
, dev
->id
);
2077 pi
->port
.dev
= &dev
->dev
;
2079 rc
= mpsc_make_ready(pi
);
2083 spin_lock_init(&pi
->tx_lock
);
2084 rc
= uart_add_one_port(&mpsc_reg
, &pi
->port
);
2090 mpsc_release_port(&pi
->port
);
2092 mpsc_drv_unmap_regs(pi
);
2096 static struct platform_driver mpsc_driver
= {
2097 .probe
= mpsc_drv_probe
,
2099 .name
= MPSC_CTLR_NAME
,
2100 .suppress_bind_attrs
= true,
2104 static int __init
mpsc_drv_init(void)
2108 printk(KERN_INFO
"Serial: MPSC driver\n");
2110 memset(mpsc_ports
, 0, sizeof(mpsc_ports
));
2111 memset(&mpsc_shared_regs
, 0, sizeof(mpsc_shared_regs
));
2113 rc
= uart_register_driver(&mpsc_reg
);
2117 rc
= platform_driver_register(&mpsc_shared_driver
);
2119 goto err_unreg_uart
;
2121 rc
= platform_driver_register(&mpsc_driver
);
2123 goto err_unreg_plat
;
2127 platform_driver_unregister(&mpsc_shared_driver
);
2129 uart_unregister_driver(&mpsc_reg
);
2132 device_initcall(mpsc_drv_init
);
2135 MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
2136 MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver");
2137 MODULE_LICENSE("GPL");