2 * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
3 * GT64260, MV64340, MV64360, GT96100, ... ).
5 * Author: Mark A. Greer <mgreer@mvista.com>
7 * Based on an old MPSC driver that was in the linuxppc tree. It appears to
8 * have been created by Chris Zankel (formerly of MontaVista) but there
9 * is no proper Copyright so I'm not sure. Apparently, parts were also
10 * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c
13 * 2004 (c) MontaVista, Software, Inc. This file is licensed under
14 * the terms of the GNU General Public License version 2. This program
15 * is licensed "as is" without any warranty of any kind, whether express
19 * The MPSC interface is much like a typical network controller's interface.
20 * That is, you set up separate rings of descriptors for transmitting and
21 * receiving data. There is also a pool of buffers with (one buffer per
22 * descriptor) that incoming data are dma'd into or outgoing data are dma'd
25 * The MPSC requires two other controllers to be able to work. The Baud Rate
26 * Generator (BRG) provides a clock at programmable frequencies which determines
27 * the baud rate. The Serial DMA Controller (SDMA) takes incoming data from the
28 * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the
29 * MPSC. It is actually the SDMA interrupt that the driver uses to keep the
30 * transmit and receive "engines" going (i.e., indicate data has been
31 * transmitted or received).
35 * 1) Some chips have an erratum where several regs cannot be
36 * read. To work around that, we keep a local copy of those regs in
39 * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr
40 * accesses system mem with coherency enabled. For that reason, the driver
41 * assumes that coherency for that ctlr has been disabled. This means
42 * that when in a cache coherent system, the driver has to manually manage
43 * the data cache on the areas that it touches because the dma_* macro are
46 * 3) There is an erratum (on PPC) where you can't use the instruction to do
47 * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places
48 * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed.
50 * 4) AFAICT, hardware flow control isn't supported by the controller --MAG.
54 #if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
58 #include <linux/module.h>
59 #include <linux/moduleparam.h>
60 #include <linux/tty.h>
61 #include <linux/tty_flip.h>
62 #include <linux/ioport.h>
63 #include <linux/init.h>
64 #include <linux/console.h>
65 #include <linux/sysrq.h>
66 #include <linux/serial.h>
67 #include <linux/serial_core.h>
68 #include <linux/delay.h>
69 #include <linux/device.h>
70 #include <linux/dma-mapping.h>
71 #include <linux/mv643xx.h>
72 #include <linux/platform_device.h>
77 #if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
81 #define MPSC_NUM_CTLRS 2
84 * Descriptors and buffers must be cache line aligned.
85 * Buffers lengths must be multiple of cache line size.
86 * Number of Tx & Rx descriptors must be powers of 2.
88 #define MPSC_RXR_ENTRIES 32
89 #define MPSC_RXRE_SIZE dma_get_cache_alignment()
90 #define MPSC_RXR_SIZE (MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE)
91 #define MPSC_RXBE_SIZE dma_get_cache_alignment()
92 #define MPSC_RXB_SIZE (MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE)
94 #define MPSC_TXR_ENTRIES 32
95 #define MPSC_TXRE_SIZE dma_get_cache_alignment()
96 #define MPSC_TXR_SIZE (MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE)
97 #define MPSC_TXBE_SIZE dma_get_cache_alignment()
98 #define MPSC_TXB_SIZE (MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE)
100 #define MPSC_DMA_ALLOC_SIZE (MPSC_RXR_SIZE + MPSC_RXB_SIZE + \
101 MPSC_TXR_SIZE + MPSC_TXB_SIZE + \
102 dma_get_cache_alignment() /* for alignment */)
104 /* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */
105 struct mpsc_rx_desc
{
111 } __attribute((packed
));
113 struct mpsc_tx_desc
{
119 } __attribute((packed
));
122 * Some regs that have the erratum that you can't read them are are shared
123 * between the two MPSC controllers. This struct contains those shared regs.
125 struct mpsc_shared_regs
{
126 phys_addr_t mpsc_routing_base_p
;
127 phys_addr_t sdma_intr_base_p
;
129 void __iomem
*mpsc_routing_base
;
130 void __iomem
*sdma_intr_base
;
135 u32 SDMA_INTR_CAUSE_m
;
136 u32 SDMA_INTR_MASK_m
;
139 /* The main driver data structure */
140 struct mpsc_port_info
{
141 struct uart_port port
; /* Overlay uart_port structure */
143 /* Internal driver state for this ctlr */
146 tcflag_t c_iflag
; /* save termios->c_iflag */
147 tcflag_t c_cflag
; /* save termios->c_cflag */
149 /* Info passed in from platform */
150 u8 mirror_regs
; /* Need to mirror regs? */
151 u8 cache_mgmt
; /* Need manual cache mgmt? */
152 u8 brg_can_tune
; /* BRG has baud tuning? */
160 /* Physical addresses of various blocks of registers (from platform) */
161 phys_addr_t mpsc_base_p
;
162 phys_addr_t sdma_base_p
;
163 phys_addr_t brg_base_p
;
165 /* Virtual addresses of various blocks of registers (from platform) */
166 void __iomem
*mpsc_base
;
167 void __iomem
*sdma_base
;
168 void __iomem
*brg_base
;
170 /* Descriptor ring and buffer allocations */
172 dma_addr_t dma_region_p
;
174 dma_addr_t rxr
; /* Rx descriptor ring */
175 dma_addr_t rxr_p
; /* Phys addr of rxr */
176 u8
*rxb
; /* Rx Ring I/O buf */
177 u8
*rxb_p
; /* Phys addr of rxb */
178 u32 rxr_posn
; /* First desc w/ Rx data */
180 dma_addr_t txr
; /* Tx descriptor ring */
181 dma_addr_t txr_p
; /* Phys addr of txr */
182 u8
*txb
; /* Tx Ring I/O buf */
183 u8
*txb_p
; /* Phys addr of txb */
184 int txr_head
; /* Where new data goes */
185 int txr_tail
; /* Where sent data comes off */
186 spinlock_t tx_lock
; /* transmit lock */
188 /* Mirrored values of regs we can't read (if 'mirror_regs' set) */
194 struct mpsc_shared_regs
*shared_regs
;
197 /* Hooks to platform-specific code */
198 int mpsc_platform_register_driver(void);
199 void mpsc_platform_unregister_driver(void);
201 /* Hooks back in to mpsc common to be called by platform-specific code */
202 struct mpsc_port_info
*mpsc_device_probe(int index
);
203 struct mpsc_port_info
*mpsc_device_remove(int index
);
205 /* Main MPSC Configuration Register Offsets */
206 #define MPSC_MMCRL 0x0000
207 #define MPSC_MMCRH 0x0004
208 #define MPSC_MPCR 0x0008
209 #define MPSC_CHR_1 0x000c
210 #define MPSC_CHR_2 0x0010
211 #define MPSC_CHR_3 0x0014
212 #define MPSC_CHR_4 0x0018
213 #define MPSC_CHR_5 0x001c
214 #define MPSC_CHR_6 0x0020
215 #define MPSC_CHR_7 0x0024
216 #define MPSC_CHR_8 0x0028
217 #define MPSC_CHR_9 0x002c
218 #define MPSC_CHR_10 0x0030
219 #define MPSC_CHR_11 0x0034
221 #define MPSC_MPCR_FRZ (1 << 9)
222 #define MPSC_MPCR_CL_5 0
223 #define MPSC_MPCR_CL_6 1
224 #define MPSC_MPCR_CL_7 2
225 #define MPSC_MPCR_CL_8 3
226 #define MPSC_MPCR_SBL_1 0
227 #define MPSC_MPCR_SBL_2 1
229 #define MPSC_CHR_2_TEV (1<<1)
230 #define MPSC_CHR_2_TA (1<<7)
231 #define MPSC_CHR_2_TTCS (1<<9)
232 #define MPSC_CHR_2_REV (1<<17)
233 #define MPSC_CHR_2_RA (1<<23)
234 #define MPSC_CHR_2_CRD (1<<25)
235 #define MPSC_CHR_2_EH (1<<31)
236 #define MPSC_CHR_2_PAR_ODD 0
237 #define MPSC_CHR_2_PAR_SPACE 1
238 #define MPSC_CHR_2_PAR_EVEN 2
239 #define MPSC_CHR_2_PAR_MARK 3
241 /* MPSC Signal Routing */
242 #define MPSC_MRR 0x0000
243 #define MPSC_RCRR 0x0004
244 #define MPSC_TCRR 0x0008
246 /* Serial DMA Controller Interface Registers */
247 #define SDMA_SDC 0x0000
248 #define SDMA_SDCM 0x0008
249 #define SDMA_RX_DESC 0x0800
250 #define SDMA_RX_BUF_PTR 0x0808
251 #define SDMA_SCRDP 0x0810
252 #define SDMA_TX_DESC 0x0c00
253 #define SDMA_SCTDP 0x0c10
254 #define SDMA_SFTDP 0x0c14
256 #define SDMA_DESC_CMDSTAT_PE (1<<0)
257 #define SDMA_DESC_CMDSTAT_CDL (1<<1)
258 #define SDMA_DESC_CMDSTAT_FR (1<<3)
259 #define SDMA_DESC_CMDSTAT_OR (1<<6)
260 #define SDMA_DESC_CMDSTAT_BR (1<<9)
261 #define SDMA_DESC_CMDSTAT_MI (1<<10)
262 #define SDMA_DESC_CMDSTAT_A (1<<11)
263 #define SDMA_DESC_CMDSTAT_AM (1<<12)
264 #define SDMA_DESC_CMDSTAT_CT (1<<13)
265 #define SDMA_DESC_CMDSTAT_C (1<<14)
266 #define SDMA_DESC_CMDSTAT_ES (1<<15)
267 #define SDMA_DESC_CMDSTAT_L (1<<16)
268 #define SDMA_DESC_CMDSTAT_F (1<<17)
269 #define SDMA_DESC_CMDSTAT_P (1<<18)
270 #define SDMA_DESC_CMDSTAT_EI (1<<23)
271 #define SDMA_DESC_CMDSTAT_O (1<<31)
273 #define SDMA_DESC_DFLT (SDMA_DESC_CMDSTAT_O | \
274 SDMA_DESC_CMDSTAT_EI)
276 #define SDMA_SDC_RFT (1<<0)
277 #define SDMA_SDC_SFM (1<<1)
278 #define SDMA_SDC_BLMR (1<<6)
279 #define SDMA_SDC_BLMT (1<<7)
280 #define SDMA_SDC_POVR (1<<8)
281 #define SDMA_SDC_RIFB (1<<9)
283 #define SDMA_SDCM_ERD (1<<7)
284 #define SDMA_SDCM_AR (1<<15)
285 #define SDMA_SDCM_STD (1<<16)
286 #define SDMA_SDCM_TXD (1<<23)
287 #define SDMA_SDCM_AT (1<<31)
289 #define SDMA_0_CAUSE_RXBUF (1<<0)
290 #define SDMA_0_CAUSE_RXERR (1<<1)
291 #define SDMA_0_CAUSE_TXBUF (1<<2)
292 #define SDMA_0_CAUSE_TXEND (1<<3)
293 #define SDMA_1_CAUSE_RXBUF (1<<8)
294 #define SDMA_1_CAUSE_RXERR (1<<9)
295 #define SDMA_1_CAUSE_TXBUF (1<<10)
296 #define SDMA_1_CAUSE_TXEND (1<<11)
298 #define SDMA_CAUSE_RX_MASK (SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR | \
299 SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR)
300 #define SDMA_CAUSE_TX_MASK (SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND | \
301 SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND)
303 /* SDMA Interrupt registers */
304 #define SDMA_INTR_CAUSE 0x0000
305 #define SDMA_INTR_MASK 0x0080
307 /* Baud Rate Generator Interface Registers */
308 #define BRG_BCR 0x0000
309 #define BRG_BTR 0x0004
312 * Define how this driver is known to the outside (we've been assigned a
313 * range on the "Low-density serial ports" major).
315 #define MPSC_MAJOR 204
316 #define MPSC_MINOR_START 44
317 #define MPSC_DRIVER_NAME "MPSC"
318 #define MPSC_DEV_NAME "ttyMM"
319 #define MPSC_VERSION "1.00"
321 static struct mpsc_port_info mpsc_ports
[MPSC_NUM_CTLRS
];
322 static struct mpsc_shared_regs mpsc_shared_regs
;
323 static struct uart_driver mpsc_reg
;
325 static void mpsc_start_rx(struct mpsc_port_info
*pi
);
326 static void mpsc_free_ring_mem(struct mpsc_port_info
*pi
);
327 static void mpsc_release_port(struct uart_port
*port
);
329 ******************************************************************************
331 * Baud Rate Generator Routines (BRG)
333 ******************************************************************************
336 mpsc_brg_init(struct mpsc_port_info
*pi
, u32 clk_src
)
340 v
= (pi
->mirror_regs
) ? pi
->BRG_BCR_m
: readl(pi
->brg_base
+ BRG_BCR
);
341 v
= (v
& ~(0xf << 18)) | ((clk_src
& 0xf) << 18);
343 if (pi
->brg_can_tune
)
348 writel(v
, pi
->brg_base
+ BRG_BCR
);
350 writel(readl(pi
->brg_base
+ BRG_BTR
) & 0xffff0000,
351 pi
->brg_base
+ BRG_BTR
);
356 mpsc_brg_enable(struct mpsc_port_info
*pi
)
360 v
= (pi
->mirror_regs
) ? pi
->BRG_BCR_m
: readl(pi
->brg_base
+ BRG_BCR
);
365 writel(v
, pi
->brg_base
+ BRG_BCR
);
370 mpsc_brg_disable(struct mpsc_port_info
*pi
)
374 v
= (pi
->mirror_regs
) ? pi
->BRG_BCR_m
: readl(pi
->brg_base
+ BRG_BCR
);
379 writel(v
, pi
->brg_base
+ BRG_BCR
);
384 mpsc_set_baudrate(struct mpsc_port_info
*pi
, u32 baud
)
387 * To set the baud, we adjust the CDV field in the BRG_BCR reg.
388 * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1.
389 * However, the input clock is divided by 16 in the MPSC b/c of how
390 * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our
391 * calculation by 16 to account for that. So the real calculation
392 * that accounts for the way the mpsc is set up is:
393 * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1.
395 u32 cdv
= (pi
->port
.uartclk
/ (baud
<< 5)) - 1;
398 mpsc_brg_disable(pi
);
399 v
= (pi
->mirror_regs
) ? pi
->BRG_BCR_m
: readl(pi
->brg_base
+ BRG_BCR
);
400 v
= (v
& 0xffff0000) | (cdv
& 0xffff);
404 writel(v
, pi
->brg_base
+ BRG_BCR
);
411 ******************************************************************************
413 * Serial DMA Routines (SDMA)
415 ******************************************************************************
419 mpsc_sdma_burstsize(struct mpsc_port_info
*pi
, u32 burst_size
)
423 pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n",
424 pi
->port
.line
, burst_size
);
426 burst_size
>>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */
429 v
= 0x0; /* 1 64-bit word */
430 else if (burst_size
< 4)
431 v
= 0x1; /* 2 64-bit words */
432 else if (burst_size
< 8)
433 v
= 0x2; /* 4 64-bit words */
435 v
= 0x3; /* 8 64-bit words */
437 writel((readl(pi
->sdma_base
+ SDMA_SDC
) & (0x3 << 12)) | (v
<< 12),
438 pi
->sdma_base
+ SDMA_SDC
);
443 mpsc_sdma_init(struct mpsc_port_info
*pi
, u32 burst_size
)
445 pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi
->port
.line
,
448 writel((readl(pi
->sdma_base
+ SDMA_SDC
) & 0x3ff) | 0x03f,
449 pi
->sdma_base
+ SDMA_SDC
);
450 mpsc_sdma_burstsize(pi
, burst_size
);
455 mpsc_sdma_intr_mask(struct mpsc_port_info
*pi
, u32 mask
)
459 pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi
->port
.line
, mask
);
461 old
= v
= (pi
->mirror_regs
) ? pi
->shared_regs
->SDMA_INTR_MASK_m
:
462 readl(pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_MASK
);
470 pi
->shared_regs
->SDMA_INTR_MASK_m
= v
;
471 writel(v
, pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_MASK
);
479 mpsc_sdma_intr_unmask(struct mpsc_port_info
*pi
, u32 mask
)
483 pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi
->port
.line
,mask
);
485 v
= (pi
->mirror_regs
) ? pi
->shared_regs
->SDMA_INTR_MASK_m
:
486 readl(pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_MASK
);
494 pi
->shared_regs
->SDMA_INTR_MASK_m
= v
;
495 writel(v
, pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_MASK
);
500 mpsc_sdma_intr_ack(struct mpsc_port_info
*pi
)
502 pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi
->port
.line
);
505 pi
->shared_regs
->SDMA_INTR_CAUSE_m
= 0;
506 writeb(0x00, pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_CAUSE
+
512 mpsc_sdma_set_rx_ring(struct mpsc_port_info
*pi
, struct mpsc_rx_desc
*rxre_p
)
514 pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n",
515 pi
->port
.line
, (u32
) rxre_p
);
517 writel((u32
)rxre_p
, pi
->sdma_base
+ SDMA_SCRDP
);
522 mpsc_sdma_set_tx_ring(struct mpsc_port_info
*pi
, struct mpsc_tx_desc
*txre_p
)
524 writel((u32
)txre_p
, pi
->sdma_base
+ SDMA_SFTDP
);
525 writel((u32
)txre_p
, pi
->sdma_base
+ SDMA_SCTDP
);
530 mpsc_sdma_cmd(struct mpsc_port_info
*pi
, u32 val
)
534 v
= readl(pi
->sdma_base
+ SDMA_SDCM
);
540 writel(v
, pi
->sdma_base
+ SDMA_SDCM
);
546 mpsc_sdma_tx_active(struct mpsc_port_info
*pi
)
548 return readl(pi
->sdma_base
+ SDMA_SDCM
) & SDMA_SDCM_TXD
;
552 mpsc_sdma_start_tx(struct mpsc_port_info
*pi
)
554 struct mpsc_tx_desc
*txre
, *txre_p
;
556 /* If tx isn't running & there's a desc ready to go, start it */
557 if (!mpsc_sdma_tx_active(pi
)) {
558 txre
= (struct mpsc_tx_desc
*)(pi
->txr
+
559 (pi
->txr_tail
* MPSC_TXRE_SIZE
));
560 dma_cache_sync(pi
->port
.dev
, (void *) txre
, MPSC_TXRE_SIZE
, DMA_FROM_DEVICE
);
561 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
562 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
563 invalidate_dcache_range((ulong
)txre
,
564 (ulong
)txre
+ MPSC_TXRE_SIZE
);
567 if (be32_to_cpu(txre
->cmdstat
) & SDMA_DESC_CMDSTAT_O
) {
568 txre_p
= (struct mpsc_tx_desc
*)(pi
->txr_p
+
572 mpsc_sdma_set_tx_ring(pi
, txre_p
);
573 mpsc_sdma_cmd(pi
, SDMA_SDCM_STD
| SDMA_SDCM_TXD
);
581 mpsc_sdma_stop(struct mpsc_port_info
*pi
)
583 pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi
->port
.line
);
585 /* Abort any SDMA transfers */
586 mpsc_sdma_cmd(pi
, 0);
587 mpsc_sdma_cmd(pi
, SDMA_SDCM_AR
| SDMA_SDCM_AT
);
589 /* Clear the SDMA current and first TX and RX pointers */
590 mpsc_sdma_set_tx_ring(pi
, NULL
);
591 mpsc_sdma_set_rx_ring(pi
, NULL
);
593 /* Disable interrupts */
594 mpsc_sdma_intr_mask(pi
, 0xf);
595 mpsc_sdma_intr_ack(pi
);
601 ******************************************************************************
603 * Multi-Protocol Serial Controller Routines (MPSC)
605 ******************************************************************************
609 mpsc_hw_init(struct mpsc_port_info
*pi
)
613 pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi
->port
.line
);
615 /* Set up clock routing */
616 if (pi
->mirror_regs
) {
617 v
= pi
->shared_regs
->MPSC_MRR_m
;
619 pi
->shared_regs
->MPSC_MRR_m
= v
;
620 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_MRR
);
622 v
= pi
->shared_regs
->MPSC_RCRR_m
;
623 v
= (v
& ~0xf0f) | 0x100;
624 pi
->shared_regs
->MPSC_RCRR_m
= v
;
625 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_RCRR
);
627 v
= pi
->shared_regs
->MPSC_TCRR_m
;
628 v
= (v
& ~0xf0f) | 0x100;
629 pi
->shared_regs
->MPSC_TCRR_m
= v
;
630 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_TCRR
);
633 v
= readl(pi
->shared_regs
->mpsc_routing_base
+ MPSC_MRR
);
635 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_MRR
);
637 v
= readl(pi
->shared_regs
->mpsc_routing_base
+ MPSC_RCRR
);
638 v
= (v
& ~0xf0f) | 0x100;
639 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_RCRR
);
641 v
= readl(pi
->shared_regs
->mpsc_routing_base
+ MPSC_TCRR
);
642 v
= (v
& ~0xf0f) | 0x100;
643 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_TCRR
);
646 /* Put MPSC in UART mode & enabel Tx/Rx egines */
647 writel(0x000004c4, pi
->mpsc_base
+ MPSC_MMCRL
);
649 /* No preamble, 16x divider, low-latency, */
650 writel(0x04400400, pi
->mpsc_base
+ MPSC_MMCRH
);
652 if (pi
->mirror_regs
) {
653 pi
->MPSC_CHR_1_m
= 0;
654 pi
->MPSC_CHR_2_m
= 0;
656 writel(0, pi
->mpsc_base
+ MPSC_CHR_1
);
657 writel(0, pi
->mpsc_base
+ MPSC_CHR_2
);
658 writel(pi
->mpsc_max_idle
, pi
->mpsc_base
+ MPSC_CHR_3
);
659 writel(0, pi
->mpsc_base
+ MPSC_CHR_4
);
660 writel(0, pi
->mpsc_base
+ MPSC_CHR_5
);
661 writel(0, pi
->mpsc_base
+ MPSC_CHR_6
);
662 writel(0, pi
->mpsc_base
+ MPSC_CHR_7
);
663 writel(0, pi
->mpsc_base
+ MPSC_CHR_8
);
664 writel(0, pi
->mpsc_base
+ MPSC_CHR_9
);
665 writel(0, pi
->mpsc_base
+ MPSC_CHR_10
);
671 mpsc_enter_hunt(struct mpsc_port_info
*pi
)
673 pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi
->port
.line
);
675 if (pi
->mirror_regs
) {
676 writel(pi
->MPSC_CHR_2_m
| MPSC_CHR_2_EH
,
677 pi
->mpsc_base
+ MPSC_CHR_2
);
678 /* Erratum prevents reading CHR_2 so just delay for a while */
682 writel(readl(pi
->mpsc_base
+ MPSC_CHR_2
) | MPSC_CHR_2_EH
,
683 pi
->mpsc_base
+ MPSC_CHR_2
);
685 while (readl(pi
->mpsc_base
+ MPSC_CHR_2
) & MPSC_CHR_2_EH
)
693 mpsc_freeze(struct mpsc_port_info
*pi
)
697 pr_debug("mpsc_freeze[%d]: Freezing\n", pi
->port
.line
);
699 v
= (pi
->mirror_regs
) ? pi
->MPSC_MPCR_m
:
700 readl(pi
->mpsc_base
+ MPSC_MPCR
);
705 writel(v
, pi
->mpsc_base
+ MPSC_MPCR
);
710 mpsc_unfreeze(struct mpsc_port_info
*pi
)
714 v
= (pi
->mirror_regs
) ? pi
->MPSC_MPCR_m
:
715 readl(pi
->mpsc_base
+ MPSC_MPCR
);
720 writel(v
, pi
->mpsc_base
+ MPSC_MPCR
);
722 pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi
->port
.line
);
727 mpsc_set_char_length(struct mpsc_port_info
*pi
, u32 len
)
731 pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi
->port
.line
,len
);
733 v
= (pi
->mirror_regs
) ? pi
->MPSC_MPCR_m
:
734 readl(pi
->mpsc_base
+ MPSC_MPCR
);
735 v
= (v
& ~(0x3 << 12)) | ((len
& 0x3) << 12);
739 writel(v
, pi
->mpsc_base
+ MPSC_MPCR
);
744 mpsc_set_stop_bit_length(struct mpsc_port_info
*pi
, u32 len
)
748 pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n",
751 v
= (pi
->mirror_regs
) ? pi
->MPSC_MPCR_m
:
752 readl(pi
->mpsc_base
+ MPSC_MPCR
);
754 v
= (v
& ~(1 << 14)) | ((len
& 0x1) << 14);
758 writel(v
, pi
->mpsc_base
+ MPSC_MPCR
);
763 mpsc_set_parity(struct mpsc_port_info
*pi
, u32 p
)
767 pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi
->port
.line
, p
);
769 v
= (pi
->mirror_regs
) ? pi
->MPSC_CHR_2_m
:
770 readl(pi
->mpsc_base
+ MPSC_CHR_2
);
773 v
= (v
& ~0xc000c) | (p
<< 18) | (p
<< 2);
776 pi
->MPSC_CHR_2_m
= v
;
777 writel(v
, pi
->mpsc_base
+ MPSC_CHR_2
);
782 ******************************************************************************
784 * Driver Init Routines
786 ******************************************************************************
790 mpsc_init_hw(struct mpsc_port_info
*pi
)
792 pr_debug("mpsc_init_hw[%d]: Initializing\n", pi
->port
.line
);
794 mpsc_brg_init(pi
, pi
->brg_clk_src
);
796 mpsc_sdma_init(pi
, dma_get_cache_alignment()); /* burst a cacheline */
804 mpsc_alloc_ring_mem(struct mpsc_port_info
*pi
)
808 pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n",
811 if (!pi
->dma_region
) {
812 if (!dma_supported(pi
->port
.dev
, 0xffffffff)) {
813 printk(KERN_ERR
"MPSC: Inadequate DMA support\n");
816 else if ((pi
->dma_region
= dma_alloc_noncoherent(pi
->port
.dev
,
817 MPSC_DMA_ALLOC_SIZE
, &pi
->dma_region_p
, GFP_KERNEL
))
820 printk(KERN_ERR
"MPSC: Can't alloc Desc region\n");
829 mpsc_free_ring_mem(struct mpsc_port_info
*pi
)
831 pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi
->port
.line
);
833 if (pi
->dma_region
) {
834 dma_free_noncoherent(pi
->port
.dev
, MPSC_DMA_ALLOC_SIZE
,
835 pi
->dma_region
, pi
->dma_region_p
);
836 pi
->dma_region
= NULL
;
837 pi
->dma_region_p
= (dma_addr_t
) NULL
;
844 mpsc_init_rings(struct mpsc_port_info
*pi
)
846 struct mpsc_rx_desc
*rxre
;
847 struct mpsc_tx_desc
*txre
;
852 pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi
->port
.line
);
854 BUG_ON(pi
->dma_region
== NULL
);
856 memset(pi
->dma_region
, 0, MPSC_DMA_ALLOC_SIZE
);
859 * Descriptors & buffers are multiples of cacheline size and must be
862 dp
= ALIGN((u32
) pi
->dma_region
, dma_get_cache_alignment());
863 dp_p
= ALIGN((u32
) pi
->dma_region_p
, dma_get_cache_alignment());
866 * Partition dma region into rx ring descriptor, rx buffers,
867 * tx ring descriptors, and tx buffers.
872 dp_p
+= MPSC_RXR_SIZE
;
875 pi
->rxb_p
= (u8
*) dp_p
;
877 dp_p
+= MPSC_RXB_SIZE
;
884 dp_p
+= MPSC_TXR_SIZE
;
887 pi
->txb_p
= (u8
*) dp_p
;
892 /* Init rx ring descriptors */
898 for (i
= 0; i
< MPSC_RXR_ENTRIES
; i
++) {
899 rxre
= (struct mpsc_rx_desc
*)dp
;
901 rxre
->bufsize
= cpu_to_be16(MPSC_RXBE_SIZE
);
902 rxre
->bytecnt
= cpu_to_be16(0);
903 rxre
->cmdstat
= cpu_to_be32(SDMA_DESC_CMDSTAT_O
|
904 SDMA_DESC_CMDSTAT_EI
|
905 SDMA_DESC_CMDSTAT_F
|
906 SDMA_DESC_CMDSTAT_L
);
907 rxre
->link
= cpu_to_be32(dp_p
+ MPSC_RXRE_SIZE
);
908 rxre
->buf_ptr
= cpu_to_be32(bp_p
);
910 dp
+= MPSC_RXRE_SIZE
;
911 dp_p
+= MPSC_RXRE_SIZE
;
912 bp
+= MPSC_RXBE_SIZE
;
913 bp_p
+= MPSC_RXBE_SIZE
;
915 rxre
->link
= cpu_to_be32(pi
->rxr_p
); /* Wrap last back to first */
917 /* Init tx ring descriptors */
923 for (i
= 0; i
< MPSC_TXR_ENTRIES
; i
++) {
924 txre
= (struct mpsc_tx_desc
*)dp
;
926 txre
->link
= cpu_to_be32(dp_p
+ MPSC_TXRE_SIZE
);
927 txre
->buf_ptr
= cpu_to_be32(bp_p
);
929 dp
+= MPSC_TXRE_SIZE
;
930 dp_p
+= MPSC_TXRE_SIZE
;
931 bp
+= MPSC_TXBE_SIZE
;
932 bp_p
+= MPSC_TXBE_SIZE
;
934 txre
->link
= cpu_to_be32(pi
->txr_p
); /* Wrap last back to first */
936 dma_cache_sync(pi
->port
.dev
, (void *) pi
->dma_region
, MPSC_DMA_ALLOC_SIZE
,
938 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
939 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
940 flush_dcache_range((ulong
)pi
->dma_region
,
941 (ulong
)pi
->dma_region
+ MPSC_DMA_ALLOC_SIZE
);
948 mpsc_uninit_rings(struct mpsc_port_info
*pi
)
950 pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi
->port
.line
);
952 BUG_ON(pi
->dma_region
== NULL
);
971 mpsc_make_ready(struct mpsc_port_info
*pi
)
975 pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi
->port
.line
);
979 if ((rc
= mpsc_alloc_ring_mem(pi
)))
989 ******************************************************************************
991 * Interrupt Handling Routines
993 ******************************************************************************
997 mpsc_rx_intr(struct mpsc_port_info
*pi
)
999 struct mpsc_rx_desc
*rxre
;
1000 struct tty_struct
*tty
= pi
->port
.info
->tty
;
1001 u32 cmdstat
, bytes_in
, i
;
1004 char flag
= TTY_NORMAL
;
1006 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi
->port
.line
);
1008 rxre
= (struct mpsc_rx_desc
*)(pi
->rxr
+ (pi
->rxr_posn
*MPSC_RXRE_SIZE
));
1010 dma_cache_sync(pi
->port
.dev
, (void *)rxre
, MPSC_RXRE_SIZE
, DMA_FROM_DEVICE
);
1011 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1012 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1013 invalidate_dcache_range((ulong
)rxre
,
1014 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
1018 * Loop through Rx descriptors handling ones that have been completed.
1020 while (!((cmdstat
= be32_to_cpu(rxre
->cmdstat
)) & SDMA_DESC_CMDSTAT_O
)){
1021 bytes_in
= be16_to_cpu(rxre
->bytecnt
);
1023 /* Following use of tty struct directly is deprecated */
1024 if (unlikely(tty_buffer_request_room(tty
, bytes_in
) < bytes_in
)) {
1025 if (tty
->low_latency
)
1026 tty_flip_buffer_push(tty
);
1028 * If this failed then we will throw away the bytes
1029 * but must do so to clear interrupts.
1033 bp
= pi
->rxb
+ (pi
->rxr_posn
* MPSC_RXBE_SIZE
);
1034 dma_cache_sync(pi
->port
.dev
, (void *) bp
, MPSC_RXBE_SIZE
, DMA_FROM_DEVICE
);
1035 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1036 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1037 invalidate_dcache_range((ulong
)bp
,
1038 (ulong
)bp
+ MPSC_RXBE_SIZE
);
1042 * Other than for parity error, the manual provides little
1043 * info on what data will be in a frame flagged by any of
1044 * these errors. For parity error, it is the last byte in
1045 * the buffer that had the error. As for the rest, I guess
1046 * we'll assume there is no data in the buffer.
1047 * If there is...it gets lost.
1049 if (unlikely(cmdstat
& (SDMA_DESC_CMDSTAT_BR
|
1050 SDMA_DESC_CMDSTAT_FR
| SDMA_DESC_CMDSTAT_OR
))) {
1052 pi
->port
.icount
.rx
++;
1054 if (cmdstat
& SDMA_DESC_CMDSTAT_BR
) { /* Break */
1055 pi
->port
.icount
.brk
++;
1057 if (uart_handle_break(&pi
->port
))
1060 else if (cmdstat
& SDMA_DESC_CMDSTAT_FR
)/* Framing */
1061 pi
->port
.icount
.frame
++;
1062 else if (cmdstat
& SDMA_DESC_CMDSTAT_OR
) /* Overrun */
1063 pi
->port
.icount
.overrun
++;
1065 cmdstat
&= pi
->port
.read_status_mask
;
1067 if (cmdstat
& SDMA_DESC_CMDSTAT_BR
)
1069 else if (cmdstat
& SDMA_DESC_CMDSTAT_FR
)
1071 else if (cmdstat
& SDMA_DESC_CMDSTAT_OR
)
1073 else if (cmdstat
& SDMA_DESC_CMDSTAT_PE
)
1077 if (uart_handle_sysrq_char(&pi
->port
, *bp
)) {
1083 if ((unlikely(cmdstat
& (SDMA_DESC_CMDSTAT_BR
|
1084 SDMA_DESC_CMDSTAT_FR
| SDMA_DESC_CMDSTAT_OR
))) &&
1085 !(cmdstat
& pi
->port
.ignore_status_mask
))
1087 tty_insert_flip_char(tty
, *bp
, flag
);
1089 for (i
=0; i
<bytes_in
; i
++)
1090 tty_insert_flip_char(tty
, *bp
++, TTY_NORMAL
);
1092 pi
->port
.icount
.rx
+= bytes_in
;
1096 rxre
->bytecnt
= cpu_to_be16(0);
1098 rxre
->cmdstat
= cpu_to_be32(SDMA_DESC_CMDSTAT_O
|
1099 SDMA_DESC_CMDSTAT_EI
|
1100 SDMA_DESC_CMDSTAT_F
|
1101 SDMA_DESC_CMDSTAT_L
);
1103 dma_cache_sync(pi
->port
.dev
, (void *)rxre
, MPSC_RXRE_SIZE
, DMA_BIDIRECTIONAL
);
1104 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1105 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1106 flush_dcache_range((ulong
)rxre
,
1107 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
1110 /* Advance to next descriptor */
1111 pi
->rxr_posn
= (pi
->rxr_posn
+ 1) & (MPSC_RXR_ENTRIES
- 1);
1112 rxre
= (struct mpsc_rx_desc
*)(pi
->rxr
+
1113 (pi
->rxr_posn
* MPSC_RXRE_SIZE
));
1114 dma_cache_sync(pi
->port
.dev
, (void *)rxre
, MPSC_RXRE_SIZE
, DMA_FROM_DEVICE
);
1115 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1116 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1117 invalidate_dcache_range((ulong
)rxre
,
1118 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
1124 /* Restart rx engine, if its stopped */
1125 if ((readl(pi
->sdma_base
+ SDMA_SDCM
) & SDMA_SDCM_ERD
) == 0)
1128 tty_flip_buffer_push(tty
);
1133 mpsc_setup_tx_desc(struct mpsc_port_info
*pi
, u32 count
, u32 intr
)
1135 struct mpsc_tx_desc
*txre
;
1137 txre
= (struct mpsc_tx_desc
*)(pi
->txr
+
1138 (pi
->txr_head
* MPSC_TXRE_SIZE
));
1140 txre
->bytecnt
= cpu_to_be16(count
);
1141 txre
->shadow
= txre
->bytecnt
;
1142 wmb(); /* ensure cmdstat is last field updated */
1143 txre
->cmdstat
= cpu_to_be32(SDMA_DESC_CMDSTAT_O
| SDMA_DESC_CMDSTAT_F
|
1144 SDMA_DESC_CMDSTAT_L
| ((intr
) ?
1145 SDMA_DESC_CMDSTAT_EI
1148 dma_cache_sync(pi
->port
.dev
, (void *) txre
, MPSC_TXRE_SIZE
, DMA_BIDIRECTIONAL
);
1149 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1150 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1151 flush_dcache_range((ulong
)txre
,
1152 (ulong
)txre
+ MPSC_TXRE_SIZE
);
1159 mpsc_copy_tx_data(struct mpsc_port_info
*pi
)
1161 struct circ_buf
*xmit
= &pi
->port
.info
->xmit
;
1165 /* Make sure the desc ring isn't full */
1166 while (CIRC_CNT(pi
->txr_head
, pi
->txr_tail
, MPSC_TXR_ENTRIES
) <
1167 (MPSC_TXR_ENTRIES
- 1)) {
1168 if (pi
->port
.x_char
) {
1170 * Ideally, we should use the TCS field in
1171 * CHR_1 to put the x_char out immediately but
1172 * errata prevents us from being able to read
1173 * CHR_2 to know that its safe to write to
1174 * CHR_1. Instead, just put it in-band with
1175 * all the other Tx data.
1177 bp
= pi
->txb
+ (pi
->txr_head
* MPSC_TXBE_SIZE
);
1178 *bp
= pi
->port
.x_char
;
1179 pi
->port
.x_char
= 0;
1182 else if (!uart_circ_empty(xmit
) && !uart_tx_stopped(&pi
->port
)){
1183 i
= min((u32
) MPSC_TXBE_SIZE
,
1184 (u32
) uart_circ_chars_pending(xmit
));
1185 i
= min(i
, (u32
) CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
,
1187 bp
= pi
->txb
+ (pi
->txr_head
* MPSC_TXBE_SIZE
);
1188 memcpy(bp
, &xmit
->buf
[xmit
->tail
], i
);
1189 xmit
->tail
= (xmit
->tail
+ i
) & (UART_XMIT_SIZE
- 1);
1191 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
1192 uart_write_wakeup(&pi
->port
);
1194 else /* All tx data copied into ring bufs */
1197 dma_cache_sync(pi
->port
.dev
, (void *) bp
, MPSC_TXBE_SIZE
, DMA_BIDIRECTIONAL
);
1198 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1199 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1200 flush_dcache_range((ulong
)bp
,
1201 (ulong
)bp
+ MPSC_TXBE_SIZE
);
1203 mpsc_setup_tx_desc(pi
, i
, 1);
1205 /* Advance to next descriptor */
1206 pi
->txr_head
= (pi
->txr_head
+ 1) & (MPSC_TXR_ENTRIES
- 1);
1213 mpsc_tx_intr(struct mpsc_port_info
*pi
)
1215 struct mpsc_tx_desc
*txre
;
1217 unsigned long iflags
;
1219 spin_lock_irqsave(&pi
->tx_lock
, iflags
);
1221 if (!mpsc_sdma_tx_active(pi
)) {
1222 txre
= (struct mpsc_tx_desc
*)(pi
->txr
+
1223 (pi
->txr_tail
* MPSC_TXRE_SIZE
));
1225 dma_cache_sync(pi
->port
.dev
, (void *) txre
, MPSC_TXRE_SIZE
, DMA_FROM_DEVICE
);
1226 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1227 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1228 invalidate_dcache_range((ulong
)txre
,
1229 (ulong
)txre
+ MPSC_TXRE_SIZE
);
1232 while (!(be32_to_cpu(txre
->cmdstat
) & SDMA_DESC_CMDSTAT_O
)) {
1234 pi
->port
.icount
.tx
+= be16_to_cpu(txre
->bytecnt
);
1235 pi
->txr_tail
= (pi
->txr_tail
+1) & (MPSC_TXR_ENTRIES
-1);
1237 /* If no more data to tx, fall out of loop */
1238 if (pi
->txr_head
== pi
->txr_tail
)
1241 txre
= (struct mpsc_tx_desc
*)(pi
->txr
+
1242 (pi
->txr_tail
* MPSC_TXRE_SIZE
));
1243 dma_cache_sync(pi
->port
.dev
, (void *) txre
, MPSC_TXRE_SIZE
,
1245 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1246 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1247 invalidate_dcache_range((ulong
)txre
,
1248 (ulong
)txre
+ MPSC_TXRE_SIZE
);
1252 mpsc_copy_tx_data(pi
);
1253 mpsc_sdma_start_tx(pi
); /* start next desc if ready */
1256 spin_unlock_irqrestore(&pi
->tx_lock
, iflags
);
1261 * This is the driver's interrupt handler. To avoid a race, we first clear
1262 * the interrupt, then handle any completed Rx/Tx descriptors. When done
1263 * handling those descriptors, we restart the Rx/Tx engines if they're stopped.
1266 mpsc_sdma_intr(int irq
, void *dev_id
)
1268 struct mpsc_port_info
*pi
= dev_id
;
1272 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi
->port
.line
);
1274 spin_lock_irqsave(&pi
->port
.lock
, iflags
);
1275 mpsc_sdma_intr_ack(pi
);
1276 if (mpsc_rx_intr(pi
))
1278 if (mpsc_tx_intr(pi
))
1280 spin_unlock_irqrestore(&pi
->port
.lock
, iflags
);
1282 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi
->port
.line
);
1287 ******************************************************************************
1289 * serial_core.c Interface routines
1291 ******************************************************************************
1294 mpsc_tx_empty(struct uart_port
*port
)
1296 struct mpsc_port_info
*pi
= (struct mpsc_port_info
*)port
;
1300 spin_lock_irqsave(&pi
->port
.lock
, iflags
);
1301 rc
= mpsc_sdma_tx_active(pi
) ? 0 : TIOCSER_TEMT
;
1302 spin_unlock_irqrestore(&pi
->port
.lock
, iflags
);
1308 mpsc_set_mctrl(struct uart_port
*port
, uint mctrl
)
1310 /* Have no way to set modem control lines AFAICT */
1315 mpsc_get_mctrl(struct uart_port
*port
)
1317 struct mpsc_port_info
*pi
= (struct mpsc_port_info
*)port
;
1320 status
= (pi
->mirror_regs
) ? pi
->MPSC_CHR_10_m
:
1321 readl(pi
->mpsc_base
+ MPSC_CHR_10
);
1325 mflags
|= TIOCM_CTS
;
1327 mflags
|= TIOCM_CAR
;
1329 return mflags
| TIOCM_DSR
; /* No way to tell if DSR asserted */
1333 mpsc_stop_tx(struct uart_port
*port
)
1335 struct mpsc_port_info
*pi
= (struct mpsc_port_info
*)port
;
1337 pr_debug("mpsc_stop_tx[%d]\n", port
->line
);
1344 mpsc_start_tx(struct uart_port
*port
)
1346 struct mpsc_port_info
*pi
= (struct mpsc_port_info
*)port
;
1347 unsigned long iflags
;
1349 spin_lock_irqsave(&pi
->tx_lock
, iflags
);
1352 mpsc_copy_tx_data(pi
);
1353 mpsc_sdma_start_tx(pi
);
1355 spin_unlock_irqrestore(&pi
->tx_lock
, iflags
);
1357 pr_debug("mpsc_start_tx[%d]\n", port
->line
);
1362 mpsc_start_rx(struct mpsc_port_info
*pi
)
1364 pr_debug("mpsc_start_rx[%d]: Starting...\n", pi
->port
.line
);
1366 /* Issue a Receive Abort to clear any receive errors */
1367 writel(MPSC_CHR_2_RA
, pi
->mpsc_base
+ MPSC_CHR_2
);
1369 mpsc_enter_hunt(pi
);
1370 mpsc_sdma_cmd(pi
, SDMA_SDCM_ERD
);
1376 mpsc_stop_rx(struct uart_port
*port
)
1378 struct mpsc_port_info
*pi
= (struct mpsc_port_info
*)port
;
1380 pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port
->line
);
1382 mpsc_sdma_cmd(pi
, SDMA_SDCM_AR
);
1387 mpsc_enable_ms(struct uart_port
*port
)
1389 return; /* Not supported */
1393 mpsc_break_ctl(struct uart_port
*port
, int ctl
)
1395 struct mpsc_port_info
*pi
= (struct mpsc_port_info
*)port
;
1399 v
= ctl
? 0x00ff0000 : 0;
1401 spin_lock_irqsave(&pi
->port
.lock
, flags
);
1402 if (pi
->mirror_regs
)
1403 pi
->MPSC_CHR_1_m
= v
;
1404 writel(v
, pi
->mpsc_base
+ MPSC_CHR_1
);
1405 spin_unlock_irqrestore(&pi
->port
.lock
, flags
);
1411 mpsc_startup(struct uart_port
*port
)
1413 struct mpsc_port_info
*pi
= (struct mpsc_port_info
*)port
;
1417 pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n",
1418 port
->line
, pi
->port
.irq
);
1420 if ((rc
= mpsc_make_ready(pi
)) == 0) {
1421 /* Setup IRQ handler */
1422 mpsc_sdma_intr_ack(pi
);
1424 /* If irq's are shared, need to set flag */
1425 if (mpsc_ports
[0].port
.irq
== mpsc_ports
[1].port
.irq
)
1428 if (request_irq(pi
->port
.irq
, mpsc_sdma_intr
, flag
,
1430 printk(KERN_ERR
"MPSC: Can't get SDMA IRQ %d\n",
1433 mpsc_sdma_intr_unmask(pi
, 0xf);
1434 mpsc_sdma_set_rx_ring(pi
, (struct mpsc_rx_desc
*)(pi
->rxr_p
+
1435 (pi
->rxr_posn
* MPSC_RXRE_SIZE
)));
1442 mpsc_shutdown(struct uart_port
*port
)
1444 struct mpsc_port_info
*pi
= (struct mpsc_port_info
*)port
;
1446 pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port
->line
);
1449 free_irq(pi
->port
.irq
, pi
);
1454 mpsc_set_termios(struct uart_port
*port
, struct ktermios
*termios
,
1455 struct ktermios
*old
)
1457 struct mpsc_port_info
*pi
= (struct mpsc_port_info
*)port
;
1460 u32 chr_bits
, stop_bits
, par
;
1462 pi
->c_iflag
= termios
->c_iflag
;
1463 pi
->c_cflag
= termios
->c_cflag
;
1465 switch (termios
->c_cflag
& CSIZE
) {
1467 chr_bits
= MPSC_MPCR_CL_5
;
1470 chr_bits
= MPSC_MPCR_CL_6
;
1473 chr_bits
= MPSC_MPCR_CL_7
;
1477 chr_bits
= MPSC_MPCR_CL_8
;
1481 if (termios
->c_cflag
& CSTOPB
)
1482 stop_bits
= MPSC_MPCR_SBL_2
;
1484 stop_bits
= MPSC_MPCR_SBL_1
;
1486 par
= MPSC_CHR_2_PAR_EVEN
;
1487 if (termios
->c_cflag
& PARENB
)
1488 if (termios
->c_cflag
& PARODD
)
1489 par
= MPSC_CHR_2_PAR_ODD
;
1491 if (termios
->c_cflag
& CMSPAR
) {
1492 if (termios
->c_cflag
& PARODD
)
1493 par
= MPSC_CHR_2_PAR_MARK
;
1495 par
= MPSC_CHR_2_PAR_SPACE
;
1499 baud
= uart_get_baud_rate(port
, termios
, old
, 0, port
->uartclk
);
1501 spin_lock_irqsave(&pi
->port
.lock
, flags
);
1503 uart_update_timeout(port
, termios
->c_cflag
, baud
);
1505 mpsc_set_char_length(pi
, chr_bits
);
1506 mpsc_set_stop_bit_length(pi
, stop_bits
);
1507 mpsc_set_parity(pi
, par
);
1508 mpsc_set_baudrate(pi
, baud
);
1510 /* Characters/events to read */
1512 pi
->port
.read_status_mask
= SDMA_DESC_CMDSTAT_OR
;
1514 if (termios
->c_iflag
& INPCK
)
1515 pi
->port
.read_status_mask
|= SDMA_DESC_CMDSTAT_PE
|
1516 SDMA_DESC_CMDSTAT_FR
;
1518 if (termios
->c_iflag
& (BRKINT
| PARMRK
))
1519 pi
->port
.read_status_mask
|= SDMA_DESC_CMDSTAT_BR
;
1521 /* Characters/events to ignore */
1522 pi
->port
.ignore_status_mask
= 0;
1524 if (termios
->c_iflag
& IGNPAR
)
1525 pi
->port
.ignore_status_mask
|= SDMA_DESC_CMDSTAT_PE
|
1526 SDMA_DESC_CMDSTAT_FR
;
1528 if (termios
->c_iflag
& IGNBRK
) {
1529 pi
->port
.ignore_status_mask
|= SDMA_DESC_CMDSTAT_BR
;
1531 if (termios
->c_iflag
& IGNPAR
)
1532 pi
->port
.ignore_status_mask
|= SDMA_DESC_CMDSTAT_OR
;
1535 /* Ignore all chars if CREAD not set */
1536 if (!(termios
->c_cflag
& CREAD
))
1541 spin_unlock_irqrestore(&pi
->port
.lock
, flags
);
1546 mpsc_type(struct uart_port
*port
)
1548 pr_debug("mpsc_type[%d]: port type: %s\n", port
->line
,MPSC_DRIVER_NAME
);
1549 return MPSC_DRIVER_NAME
;
1553 mpsc_request_port(struct uart_port
*port
)
1555 /* Should make chip/platform specific call */
1560 mpsc_release_port(struct uart_port
*port
)
1562 struct mpsc_port_info
*pi
= (struct mpsc_port_info
*)port
;
1565 mpsc_uninit_rings(pi
);
1566 mpsc_free_ring_mem(pi
);
1574 mpsc_config_port(struct uart_port
*port
, int flags
)
1580 mpsc_verify_port(struct uart_port
*port
, struct serial_struct
*ser
)
1582 struct mpsc_port_info
*pi
= (struct mpsc_port_info
*)port
;
1585 pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi
->port
.line
);
1587 if (ser
->type
!= PORT_UNKNOWN
&& ser
->type
!= PORT_MPSC
)
1589 else if (pi
->port
.irq
!= ser
->irq
)
1591 else if (ser
->io_type
!= SERIAL_IO_MEM
)
1593 else if (pi
->port
.uartclk
/ 16 != ser
->baud_base
) /* Not sure */
1595 else if ((void *)pi
->port
.mapbase
!= ser
->iomem_base
)
1597 else if (pi
->port
.iobase
!= ser
->port
)
1599 else if (ser
->hub6
!= 0)
1605 static struct uart_ops mpsc_pops
= {
1606 .tx_empty
= mpsc_tx_empty
,
1607 .set_mctrl
= mpsc_set_mctrl
,
1608 .get_mctrl
= mpsc_get_mctrl
,
1609 .stop_tx
= mpsc_stop_tx
,
1610 .start_tx
= mpsc_start_tx
,
1611 .stop_rx
= mpsc_stop_rx
,
1612 .enable_ms
= mpsc_enable_ms
,
1613 .break_ctl
= mpsc_break_ctl
,
1614 .startup
= mpsc_startup
,
1615 .shutdown
= mpsc_shutdown
,
1616 .set_termios
= mpsc_set_termios
,
1618 .release_port
= mpsc_release_port
,
1619 .request_port
= mpsc_request_port
,
1620 .config_port
= mpsc_config_port
,
1621 .verify_port
= mpsc_verify_port
,
1625 ******************************************************************************
1627 * Console Interface Routines
1629 ******************************************************************************
1632 #ifdef CONFIG_SERIAL_MPSC_CONSOLE
1634 mpsc_console_write(struct console
*co
, const char *s
, uint count
)
1636 struct mpsc_port_info
*pi
= &mpsc_ports
[co
->index
];
1637 u8
*bp
, *dp
, add_cr
= 0;
1639 unsigned long iflags
;
1641 spin_lock_irqsave(&pi
->tx_lock
, iflags
);
1643 while (pi
->txr_head
!= pi
->txr_tail
) {
1644 while (mpsc_sdma_tx_active(pi
))
1646 mpsc_sdma_intr_ack(pi
);
1650 while (mpsc_sdma_tx_active(pi
))
1654 bp
= dp
= pi
->txb
+ (pi
->txr_head
* MPSC_TXBE_SIZE
);
1656 for (i
= 0; i
< MPSC_TXBE_SIZE
; i
++) {
1667 if (*(s
++) == '\n') { /* add '\r' after '\n' */
1676 dma_cache_sync(pi
->port
.dev
, (void *) bp
, MPSC_TXBE_SIZE
, DMA_BIDIRECTIONAL
);
1677 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1678 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1679 flush_dcache_range((ulong
)bp
,
1680 (ulong
)bp
+ MPSC_TXBE_SIZE
);
1682 mpsc_setup_tx_desc(pi
, i
, 0);
1683 pi
->txr_head
= (pi
->txr_head
+ 1) & (MPSC_TXR_ENTRIES
- 1);
1684 mpsc_sdma_start_tx(pi
);
1686 while (mpsc_sdma_tx_active(pi
))
1689 pi
->txr_tail
= (pi
->txr_tail
+ 1) & (MPSC_TXR_ENTRIES
- 1);
1692 spin_unlock_irqrestore(&pi
->tx_lock
, iflags
);
1697 mpsc_console_setup(struct console
*co
, char *options
)
1699 struct mpsc_port_info
*pi
;
1700 int baud
, bits
, parity
, flow
;
1702 pr_debug("mpsc_console_setup[%d]: options: %s\n", co
->index
, options
);
1704 if (co
->index
>= MPSC_NUM_CTLRS
)
1707 pi
= &mpsc_ports
[co
->index
];
1709 baud
= pi
->default_baud
;
1710 bits
= pi
->default_bits
;
1711 parity
= pi
->default_parity
;
1712 flow
= pi
->default_flow
;
1717 spin_lock_init(&pi
->port
.lock
); /* Temporary fix--copied from 8250.c */
1720 uart_parse_options(options
, &baud
, &parity
, &bits
, &flow
);
1722 return uart_set_options(&pi
->port
, co
, baud
, parity
, bits
, flow
);
1725 static struct console mpsc_console
= {
1726 .name
= MPSC_DEV_NAME
,
1727 .write
= mpsc_console_write
,
1728 .device
= uart_console_device
,
1729 .setup
= mpsc_console_setup
,
1730 .flags
= CON_PRINTBUFFER
,
1736 mpsc_late_console_init(void)
1738 pr_debug("mpsc_late_console_init: Enter\n");
1740 if (!(mpsc_console
.flags
& CON_ENABLED
))
1741 register_console(&mpsc_console
);
1745 late_initcall(mpsc_late_console_init
);
1747 #define MPSC_CONSOLE &mpsc_console
1749 #define MPSC_CONSOLE NULL
1752 ******************************************************************************
1754 * Dummy Platform Driver to extract & map shared register regions
1756 ******************************************************************************
1759 mpsc_resource_err(char *s
)
1761 printk(KERN_WARNING
"MPSC: Platform device resource error in %s\n", s
);
1766 mpsc_shared_map_regs(struct platform_device
*pd
)
1770 if ((r
= platform_get_resource(pd
, IORESOURCE_MEM
,
1771 MPSC_ROUTING_BASE_ORDER
)) && request_mem_region(r
->start
,
1772 MPSC_ROUTING_REG_BLOCK_SIZE
, "mpsc_routing_regs")) {
1774 mpsc_shared_regs
.mpsc_routing_base
= ioremap(r
->start
,
1775 MPSC_ROUTING_REG_BLOCK_SIZE
);
1776 mpsc_shared_regs
.mpsc_routing_base_p
= r
->start
;
1779 mpsc_resource_err("MPSC routing base");
1783 if ((r
= platform_get_resource(pd
, IORESOURCE_MEM
,
1784 MPSC_SDMA_INTR_BASE_ORDER
)) && request_mem_region(r
->start
,
1785 MPSC_SDMA_INTR_REG_BLOCK_SIZE
, "sdma_intr_regs")) {
1787 mpsc_shared_regs
.sdma_intr_base
= ioremap(r
->start
,
1788 MPSC_SDMA_INTR_REG_BLOCK_SIZE
);
1789 mpsc_shared_regs
.sdma_intr_base_p
= r
->start
;
1792 iounmap(mpsc_shared_regs
.mpsc_routing_base
);
1793 release_mem_region(mpsc_shared_regs
.mpsc_routing_base_p
,
1794 MPSC_ROUTING_REG_BLOCK_SIZE
);
1795 mpsc_resource_err("SDMA intr base");
1803 mpsc_shared_unmap_regs(void)
1805 if (!mpsc_shared_regs
.mpsc_routing_base
) {
1806 iounmap(mpsc_shared_regs
.mpsc_routing_base
);
1807 release_mem_region(mpsc_shared_regs
.mpsc_routing_base_p
,
1808 MPSC_ROUTING_REG_BLOCK_SIZE
);
1810 if (!mpsc_shared_regs
.sdma_intr_base
) {
1811 iounmap(mpsc_shared_regs
.sdma_intr_base
);
1812 release_mem_region(mpsc_shared_regs
.sdma_intr_base_p
,
1813 MPSC_SDMA_INTR_REG_BLOCK_SIZE
);
1816 mpsc_shared_regs
.mpsc_routing_base
= NULL
;
1817 mpsc_shared_regs
.sdma_intr_base
= NULL
;
1819 mpsc_shared_regs
.mpsc_routing_base_p
= 0;
1820 mpsc_shared_regs
.sdma_intr_base_p
= 0;
1826 mpsc_shared_drv_probe(struct platform_device
*dev
)
1828 struct mpsc_shared_pdata
*pdata
;
1832 if (!(rc
= mpsc_shared_map_regs(dev
))) {
1833 pdata
= (struct mpsc_shared_pdata
*)dev
->dev
.platform_data
;
1835 mpsc_shared_regs
.MPSC_MRR_m
= pdata
->mrr_val
;
1836 mpsc_shared_regs
.MPSC_RCRR_m
= pdata
->rcrr_val
;
1837 mpsc_shared_regs
.MPSC_TCRR_m
= pdata
->tcrr_val
;
1838 mpsc_shared_regs
.SDMA_INTR_CAUSE_m
=
1839 pdata
->intr_cause_val
;
1840 mpsc_shared_regs
.SDMA_INTR_MASK_m
=
1841 pdata
->intr_mask_val
;
1851 mpsc_shared_drv_remove(struct platform_device
*dev
)
1856 mpsc_shared_unmap_regs();
1857 mpsc_shared_regs
.MPSC_MRR_m
= 0;
1858 mpsc_shared_regs
.MPSC_RCRR_m
= 0;
1859 mpsc_shared_regs
.MPSC_TCRR_m
= 0;
1860 mpsc_shared_regs
.SDMA_INTR_CAUSE_m
= 0;
1861 mpsc_shared_regs
.SDMA_INTR_MASK_m
= 0;
1868 static struct platform_driver mpsc_shared_driver
= {
1869 .probe
= mpsc_shared_drv_probe
,
1870 .remove
= mpsc_shared_drv_remove
,
1872 .name
= MPSC_SHARED_NAME
,
1877 ******************************************************************************
1879 * Driver Interface Routines
1881 ******************************************************************************
1883 static struct uart_driver mpsc_reg
= {
1884 .owner
= THIS_MODULE
,
1885 .driver_name
= MPSC_DRIVER_NAME
,
1886 .dev_name
= MPSC_DEV_NAME
,
1887 .major
= MPSC_MAJOR
,
1888 .minor
= MPSC_MINOR_START
,
1889 .nr
= MPSC_NUM_CTLRS
,
1890 .cons
= MPSC_CONSOLE
,
1894 mpsc_drv_map_regs(struct mpsc_port_info
*pi
, struct platform_device
*pd
)
1898 if ((r
= platform_get_resource(pd
, IORESOURCE_MEM
, MPSC_BASE_ORDER
)) &&
1899 request_mem_region(r
->start
, MPSC_REG_BLOCK_SIZE
, "mpsc_regs")){
1901 pi
->mpsc_base
= ioremap(r
->start
, MPSC_REG_BLOCK_SIZE
);
1902 pi
->mpsc_base_p
= r
->start
;
1905 mpsc_resource_err("MPSC base");
1909 if ((r
= platform_get_resource(pd
, IORESOURCE_MEM
,
1910 MPSC_SDMA_BASE_ORDER
)) && request_mem_region(r
->start
,
1911 MPSC_SDMA_REG_BLOCK_SIZE
, "sdma_regs")) {
1913 pi
->sdma_base
= ioremap(r
->start
,MPSC_SDMA_REG_BLOCK_SIZE
);
1914 pi
->sdma_base_p
= r
->start
;
1917 mpsc_resource_err("SDMA base");
1918 if (pi
->mpsc_base
) {
1919 iounmap(pi
->mpsc_base
);
1920 pi
->mpsc_base
= NULL
;
1925 if ((r
= platform_get_resource(pd
,IORESOURCE_MEM
,MPSC_BRG_BASE_ORDER
))
1926 && request_mem_region(r
->start
, MPSC_BRG_REG_BLOCK_SIZE
,
1929 pi
->brg_base
= ioremap(r
->start
, MPSC_BRG_REG_BLOCK_SIZE
);
1930 pi
->brg_base_p
= r
->start
;
1933 mpsc_resource_err("BRG base");
1934 if (pi
->mpsc_base
) {
1935 iounmap(pi
->mpsc_base
);
1936 pi
->mpsc_base
= NULL
;
1938 if (pi
->sdma_base
) {
1939 iounmap(pi
->sdma_base
);
1940 pi
->sdma_base
= NULL
;
1949 mpsc_drv_unmap_regs(struct mpsc_port_info
*pi
)
1951 if (!pi
->mpsc_base
) {
1952 iounmap(pi
->mpsc_base
);
1953 release_mem_region(pi
->mpsc_base_p
, MPSC_REG_BLOCK_SIZE
);
1955 if (!pi
->sdma_base
) {
1956 iounmap(pi
->sdma_base
);
1957 release_mem_region(pi
->sdma_base_p
, MPSC_SDMA_REG_BLOCK_SIZE
);
1959 if (!pi
->brg_base
) {
1960 iounmap(pi
->brg_base
);
1961 release_mem_region(pi
->brg_base_p
, MPSC_BRG_REG_BLOCK_SIZE
);
1964 pi
->mpsc_base
= NULL
;
1965 pi
->sdma_base
= NULL
;
1966 pi
->brg_base
= NULL
;
1968 pi
->mpsc_base_p
= 0;
1969 pi
->sdma_base_p
= 0;
1976 mpsc_drv_get_platform_data(struct mpsc_port_info
*pi
,
1977 struct platform_device
*pd
, int num
)
1979 struct mpsc_pdata
*pdata
;
1981 pdata
= (struct mpsc_pdata
*)pd
->dev
.platform_data
;
1983 pi
->port
.uartclk
= pdata
->brg_clk_freq
;
1984 pi
->port
.iotype
= UPIO_MEM
;
1985 pi
->port
.line
= num
;
1986 pi
->port
.type
= PORT_MPSC
;
1987 pi
->port
.fifosize
= MPSC_TXBE_SIZE
;
1988 pi
->port
.membase
= pi
->mpsc_base
;
1989 pi
->port
.mapbase
= (ulong
)pi
->mpsc_base
;
1990 pi
->port
.ops
= &mpsc_pops
;
1992 pi
->mirror_regs
= pdata
->mirror_regs
;
1993 pi
->cache_mgmt
= pdata
->cache_mgmt
;
1994 pi
->brg_can_tune
= pdata
->brg_can_tune
;
1995 pi
->brg_clk_src
= pdata
->brg_clk_src
;
1996 pi
->mpsc_max_idle
= pdata
->max_idle
;
1997 pi
->default_baud
= pdata
->default_baud
;
1998 pi
->default_bits
= pdata
->default_bits
;
1999 pi
->default_parity
= pdata
->default_parity
;
2000 pi
->default_flow
= pdata
->default_flow
;
2002 /* Initial values of mirrored regs */
2003 pi
->MPSC_CHR_1_m
= pdata
->chr_1_val
;
2004 pi
->MPSC_CHR_2_m
= pdata
->chr_2_val
;
2005 pi
->MPSC_CHR_10_m
= pdata
->chr_10_val
;
2006 pi
->MPSC_MPCR_m
= pdata
->mpcr_val
;
2007 pi
->BRG_BCR_m
= pdata
->bcr_val
;
2009 pi
->shared_regs
= &mpsc_shared_regs
;
2011 pi
->port
.irq
= platform_get_irq(pd
, 0);
2017 mpsc_drv_probe(struct platform_device
*dev
)
2019 struct mpsc_port_info
*pi
;
2022 pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev
->id
);
2024 if (dev
->id
< MPSC_NUM_CTLRS
) {
2025 pi
= &mpsc_ports
[dev
->id
];
2027 if (!(rc
= mpsc_drv_map_regs(pi
, dev
))) {
2028 mpsc_drv_get_platform_data(pi
, dev
, dev
->id
);
2030 if (!(rc
= mpsc_make_ready(pi
))) {
2031 spin_lock_init(&pi
->tx_lock
);
2032 if (!(rc
= uart_add_one_port(&mpsc_reg
,
2037 (struct uart_port
*)pi
);
2038 mpsc_drv_unmap_regs(pi
);
2042 mpsc_drv_unmap_regs(pi
);
2050 mpsc_drv_remove(struct platform_device
*dev
)
2052 pr_debug("mpsc_drv_exit: Removing MPSC %d\n", dev
->id
);
2054 if (dev
->id
< MPSC_NUM_CTLRS
) {
2055 uart_remove_one_port(&mpsc_reg
, &mpsc_ports
[dev
->id
].port
);
2056 mpsc_release_port((struct uart_port
*)&mpsc_ports
[dev
->id
].port
);
2057 mpsc_drv_unmap_regs(&mpsc_ports
[dev
->id
]);
2064 static struct platform_driver mpsc_driver
= {
2065 .probe
= mpsc_drv_probe
,
2066 .remove
= mpsc_drv_remove
,
2068 .name
= MPSC_CTLR_NAME
,
2077 printk(KERN_INFO
"Serial: MPSC driver $Revision: 1.00 $\n");
2079 memset(mpsc_ports
, 0, sizeof(mpsc_ports
));
2080 memset(&mpsc_shared_regs
, 0, sizeof(mpsc_shared_regs
));
2082 if (!(rc
= uart_register_driver(&mpsc_reg
))) {
2083 if (!(rc
= platform_driver_register(&mpsc_shared_driver
))) {
2084 if ((rc
= platform_driver_register(&mpsc_driver
))) {
2085 platform_driver_unregister(&mpsc_shared_driver
);
2086 uart_unregister_driver(&mpsc_reg
);
2090 uart_unregister_driver(&mpsc_reg
);
2100 platform_driver_unregister(&mpsc_driver
);
2101 platform_driver_unregister(&mpsc_shared_driver
);
2102 uart_unregister_driver(&mpsc_reg
);
2103 memset(mpsc_ports
, 0, sizeof(mpsc_ports
));
2104 memset(&mpsc_shared_regs
, 0, sizeof(mpsc_shared_regs
));
2108 module_init(mpsc_drv_init
);
2109 module_exit(mpsc_drv_exit
);
2111 MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
2112 MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver $Revision: 1.00 $");
2113 MODULE_VERSION(MPSC_VERSION
);
2114 MODULE_LICENSE("GPL");
2115 MODULE_ALIAS_CHARDEV_MAJOR(MPSC_MAJOR
);