2 * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
3 * GT64260, MV64340, MV64360, GT96100, ... ).
5 * Author: Mark A. Greer <mgreer@mvista.com>
7 * Based on an old MPSC driver that was in the linuxppc tree. It appears to
8 * have been created by Chris Zankel (formerly of MontaVista) but there
9 * is no proper Copyright so I'm not sure. Apparently, parts were also
10 * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c
13 * 2004 (c) MontaVista, Software, Inc. This file is licensed under
14 * the terms of the GNU General Public License version 2. This program
15 * is licensed "as is" without any warranty of any kind, whether express
19 * The MPSC interface is much like a typical network controller's interface.
20 * That is, you set up separate rings of descriptors for transmitting and
21 * receiving data. There is also a pool of buffers with (one buffer per
22 * descriptor) that incoming data are dma'd into or outgoing data are dma'd
25 * The MPSC requires two other controllers to be able to work. The Baud Rate
26 * Generator (BRG) provides a clock at programmable frequencies which determines
27 * the baud rate. The Serial DMA Controller (SDMA) takes incoming data from the
28 * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the
29 * MPSC. It is actually the SDMA interrupt that the driver uses to keep the
30 * transmit and receive "engines" going (i.e., indicate data has been
31 * transmitted or received).
35 * 1) Some chips have an erratum where several regs cannot be
36 * read. To work around that, we keep a local copy of those regs in
39 * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr
40 * accesses system mem with coherency enabled. For that reason, the driver
41 * assumes that coherency for that ctlr has been disabled. This means
42 * that when in a cache coherent system, the driver has to manually manage
43 * the data cache on the areas that it touches because the dma_* macro are
46 * 3) There is an erratum (on PPC) where you can't use the instruction to do
47 * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places
48 * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed.
50 * 4) AFAICT, hardware flow control isn't supported by the controller --MAG.
54 #if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
58 #include <linux/module.h>
59 #include <linux/moduleparam.h>
60 #include <linux/tty.h>
61 #include <linux/tty_flip.h>
62 #include <linux/ioport.h>
63 #include <linux/init.h>
64 #include <linux/console.h>
65 #include <linux/sysrq.h>
66 #include <linux/serial.h>
67 #include <linux/serial_core.h>
68 #include <linux/delay.h>
69 #include <linux/device.h>
70 #include <linux/dma-mapping.h>
71 #include <linux/mv643xx.h>
72 #include <linux/platform_device.h>
73 #include <linux/gfp.h>
78 #define MPSC_NUM_CTLRS 2
81 * Descriptors and buffers must be cache line aligned.
82 * Buffers lengths must be multiple of cache line size.
83 * Number of Tx & Rx descriptors must be powers of 2.
85 #define MPSC_RXR_ENTRIES 32
86 #define MPSC_RXRE_SIZE dma_get_cache_alignment()
87 #define MPSC_RXR_SIZE (MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE)
88 #define MPSC_RXBE_SIZE dma_get_cache_alignment()
89 #define MPSC_RXB_SIZE (MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE)
91 #define MPSC_TXR_ENTRIES 32
92 #define MPSC_TXRE_SIZE dma_get_cache_alignment()
93 #define MPSC_TXR_SIZE (MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE)
94 #define MPSC_TXBE_SIZE dma_get_cache_alignment()
95 #define MPSC_TXB_SIZE (MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE)
97 #define MPSC_DMA_ALLOC_SIZE (MPSC_RXR_SIZE + MPSC_RXB_SIZE + MPSC_TXR_SIZE \
98 + MPSC_TXB_SIZE + dma_get_cache_alignment() /* for alignment */)
100 /* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */
101 struct mpsc_rx_desc
{
107 } __attribute((packed
));
109 struct mpsc_tx_desc
{
115 } __attribute((packed
));
118 * Some regs that have the erratum that you can't read them are are shared
119 * between the two MPSC controllers. This struct contains those shared regs.
121 struct mpsc_shared_regs
{
122 phys_addr_t mpsc_routing_base_p
;
123 phys_addr_t sdma_intr_base_p
;
125 void __iomem
*mpsc_routing_base
;
126 void __iomem
*sdma_intr_base
;
131 u32 SDMA_INTR_CAUSE_m
;
132 u32 SDMA_INTR_MASK_m
;
135 /* The main driver data structure */
136 struct mpsc_port_info
{
137 struct uart_port port
; /* Overlay uart_port structure */
139 /* Internal driver state for this ctlr */
142 tcflag_t c_iflag
; /* save termios->c_iflag */
143 tcflag_t c_cflag
; /* save termios->c_cflag */
145 /* Info passed in from platform */
146 u8 mirror_regs
; /* Need to mirror regs? */
147 u8 cache_mgmt
; /* Need manual cache mgmt? */
148 u8 brg_can_tune
; /* BRG has baud tuning? */
156 /* Physical addresses of various blocks of registers (from platform) */
157 phys_addr_t mpsc_base_p
;
158 phys_addr_t sdma_base_p
;
159 phys_addr_t brg_base_p
;
161 /* Virtual addresses of various blocks of registers (from platform) */
162 void __iomem
*mpsc_base
;
163 void __iomem
*sdma_base
;
164 void __iomem
*brg_base
;
166 /* Descriptor ring and buffer allocations */
168 dma_addr_t dma_region_p
;
170 dma_addr_t rxr
; /* Rx descriptor ring */
171 dma_addr_t rxr_p
; /* Phys addr of rxr */
172 u8
*rxb
; /* Rx Ring I/O buf */
173 u8
*rxb_p
; /* Phys addr of rxb */
174 u32 rxr_posn
; /* First desc w/ Rx data */
176 dma_addr_t txr
; /* Tx descriptor ring */
177 dma_addr_t txr_p
; /* Phys addr of txr */
178 u8
*txb
; /* Tx Ring I/O buf */
179 u8
*txb_p
; /* Phys addr of txb */
180 int txr_head
; /* Where new data goes */
181 int txr_tail
; /* Where sent data comes off */
182 spinlock_t tx_lock
; /* transmit lock */
184 /* Mirrored values of regs we can't read (if 'mirror_regs' set) */
190 struct mpsc_shared_regs
*shared_regs
;
193 /* Hooks to platform-specific code */
194 int mpsc_platform_register_driver(void);
195 void mpsc_platform_unregister_driver(void);
197 /* Hooks back in to mpsc common to be called by platform-specific code */
198 struct mpsc_port_info
*mpsc_device_probe(int index
);
199 struct mpsc_port_info
*mpsc_device_remove(int index
);
201 /* Main MPSC Configuration Register Offsets */
202 #define MPSC_MMCRL 0x0000
203 #define MPSC_MMCRH 0x0004
204 #define MPSC_MPCR 0x0008
205 #define MPSC_CHR_1 0x000c
206 #define MPSC_CHR_2 0x0010
207 #define MPSC_CHR_3 0x0014
208 #define MPSC_CHR_4 0x0018
209 #define MPSC_CHR_5 0x001c
210 #define MPSC_CHR_6 0x0020
211 #define MPSC_CHR_7 0x0024
212 #define MPSC_CHR_8 0x0028
213 #define MPSC_CHR_9 0x002c
214 #define MPSC_CHR_10 0x0030
215 #define MPSC_CHR_11 0x0034
217 #define MPSC_MPCR_FRZ (1 << 9)
218 #define MPSC_MPCR_CL_5 0
219 #define MPSC_MPCR_CL_6 1
220 #define MPSC_MPCR_CL_7 2
221 #define MPSC_MPCR_CL_8 3
222 #define MPSC_MPCR_SBL_1 0
223 #define MPSC_MPCR_SBL_2 1
225 #define MPSC_CHR_2_TEV (1<<1)
226 #define MPSC_CHR_2_TA (1<<7)
227 #define MPSC_CHR_2_TTCS (1<<9)
228 #define MPSC_CHR_2_REV (1<<17)
229 #define MPSC_CHR_2_RA (1<<23)
230 #define MPSC_CHR_2_CRD (1<<25)
231 #define MPSC_CHR_2_EH (1<<31)
232 #define MPSC_CHR_2_PAR_ODD 0
233 #define MPSC_CHR_2_PAR_SPACE 1
234 #define MPSC_CHR_2_PAR_EVEN 2
235 #define MPSC_CHR_2_PAR_MARK 3
237 /* MPSC Signal Routing */
238 #define MPSC_MRR 0x0000
239 #define MPSC_RCRR 0x0004
240 #define MPSC_TCRR 0x0008
242 /* Serial DMA Controller Interface Registers */
243 #define SDMA_SDC 0x0000
244 #define SDMA_SDCM 0x0008
245 #define SDMA_RX_DESC 0x0800
246 #define SDMA_RX_BUF_PTR 0x0808
247 #define SDMA_SCRDP 0x0810
248 #define SDMA_TX_DESC 0x0c00
249 #define SDMA_SCTDP 0x0c10
250 #define SDMA_SFTDP 0x0c14
252 #define SDMA_DESC_CMDSTAT_PE (1<<0)
253 #define SDMA_DESC_CMDSTAT_CDL (1<<1)
254 #define SDMA_DESC_CMDSTAT_FR (1<<3)
255 #define SDMA_DESC_CMDSTAT_OR (1<<6)
256 #define SDMA_DESC_CMDSTAT_BR (1<<9)
257 #define SDMA_DESC_CMDSTAT_MI (1<<10)
258 #define SDMA_DESC_CMDSTAT_A (1<<11)
259 #define SDMA_DESC_CMDSTAT_AM (1<<12)
260 #define SDMA_DESC_CMDSTAT_CT (1<<13)
261 #define SDMA_DESC_CMDSTAT_C (1<<14)
262 #define SDMA_DESC_CMDSTAT_ES (1<<15)
263 #define SDMA_DESC_CMDSTAT_L (1<<16)
264 #define SDMA_DESC_CMDSTAT_F (1<<17)
265 #define SDMA_DESC_CMDSTAT_P (1<<18)
266 #define SDMA_DESC_CMDSTAT_EI (1<<23)
267 #define SDMA_DESC_CMDSTAT_O (1<<31)
269 #define SDMA_DESC_DFLT (SDMA_DESC_CMDSTAT_O \
270 | SDMA_DESC_CMDSTAT_EI)
272 #define SDMA_SDC_RFT (1<<0)
273 #define SDMA_SDC_SFM (1<<1)
274 #define SDMA_SDC_BLMR (1<<6)
275 #define SDMA_SDC_BLMT (1<<7)
276 #define SDMA_SDC_POVR (1<<8)
277 #define SDMA_SDC_RIFB (1<<9)
279 #define SDMA_SDCM_ERD (1<<7)
280 #define SDMA_SDCM_AR (1<<15)
281 #define SDMA_SDCM_STD (1<<16)
282 #define SDMA_SDCM_TXD (1<<23)
283 #define SDMA_SDCM_AT (1<<31)
285 #define SDMA_0_CAUSE_RXBUF (1<<0)
286 #define SDMA_0_CAUSE_RXERR (1<<1)
287 #define SDMA_0_CAUSE_TXBUF (1<<2)
288 #define SDMA_0_CAUSE_TXEND (1<<3)
289 #define SDMA_1_CAUSE_RXBUF (1<<8)
290 #define SDMA_1_CAUSE_RXERR (1<<9)
291 #define SDMA_1_CAUSE_TXBUF (1<<10)
292 #define SDMA_1_CAUSE_TXEND (1<<11)
294 #define SDMA_CAUSE_RX_MASK (SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR \
295 | SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR)
296 #define SDMA_CAUSE_TX_MASK (SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND \
297 | SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND)
299 /* SDMA Interrupt registers */
300 #define SDMA_INTR_CAUSE 0x0000
301 #define SDMA_INTR_MASK 0x0080
303 /* Baud Rate Generator Interface Registers */
304 #define BRG_BCR 0x0000
305 #define BRG_BTR 0x0004
308 * Define how this driver is known to the outside (we've been assigned a
309 * range on the "Low-density serial ports" major).
311 #define MPSC_MAJOR 204
312 #define MPSC_MINOR_START 44
313 #define MPSC_DRIVER_NAME "MPSC"
314 #define MPSC_DEV_NAME "ttyMM"
315 #define MPSC_VERSION "1.00"
317 static struct mpsc_port_info mpsc_ports
[MPSC_NUM_CTLRS
];
318 static struct mpsc_shared_regs mpsc_shared_regs
;
319 static struct uart_driver mpsc_reg
;
321 static void mpsc_start_rx(struct mpsc_port_info
*pi
);
322 static void mpsc_free_ring_mem(struct mpsc_port_info
*pi
);
323 static void mpsc_release_port(struct uart_port
*port
);
325 ******************************************************************************
327 * Baud Rate Generator Routines (BRG)
329 ******************************************************************************
331 static void mpsc_brg_init(struct mpsc_port_info
*pi
, u32 clk_src
)
335 v
= (pi
->mirror_regs
) ? pi
->BRG_BCR_m
: readl(pi
->brg_base
+ BRG_BCR
);
336 v
= (v
& ~(0xf << 18)) | ((clk_src
& 0xf) << 18);
338 if (pi
->brg_can_tune
)
343 writel(v
, pi
->brg_base
+ BRG_BCR
);
345 writel(readl(pi
->brg_base
+ BRG_BTR
) & 0xffff0000,
346 pi
->brg_base
+ BRG_BTR
);
349 static void mpsc_brg_enable(struct mpsc_port_info
*pi
)
353 v
= (pi
->mirror_regs
) ? pi
->BRG_BCR_m
: readl(pi
->brg_base
+ BRG_BCR
);
358 writel(v
, pi
->brg_base
+ BRG_BCR
);
361 static void mpsc_brg_disable(struct mpsc_port_info
*pi
)
365 v
= (pi
->mirror_regs
) ? pi
->BRG_BCR_m
: readl(pi
->brg_base
+ BRG_BCR
);
370 writel(v
, pi
->brg_base
+ BRG_BCR
);
374 * To set the baud, we adjust the CDV field in the BRG_BCR reg.
375 * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1.
376 * However, the input clock is divided by 16 in the MPSC b/c of how
377 * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our
378 * calculation by 16 to account for that. So the real calculation
379 * that accounts for the way the mpsc is set up is:
380 * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1.
382 static void mpsc_set_baudrate(struct mpsc_port_info
*pi
, u32 baud
)
384 u32 cdv
= (pi
->port
.uartclk
/ (baud
<< 5)) - 1;
387 mpsc_brg_disable(pi
);
388 v
= (pi
->mirror_regs
) ? pi
->BRG_BCR_m
: readl(pi
->brg_base
+ BRG_BCR
);
389 v
= (v
& 0xffff0000) | (cdv
& 0xffff);
393 writel(v
, pi
->brg_base
+ BRG_BCR
);
398 ******************************************************************************
400 * Serial DMA Routines (SDMA)
402 ******************************************************************************
405 static void mpsc_sdma_burstsize(struct mpsc_port_info
*pi
, u32 burst_size
)
409 pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n",
410 pi
->port
.line
, burst_size
);
412 burst_size
>>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */
415 v
= 0x0; /* 1 64-bit word */
416 else if (burst_size
< 4)
417 v
= 0x1; /* 2 64-bit words */
418 else if (burst_size
< 8)
419 v
= 0x2; /* 4 64-bit words */
421 v
= 0x3; /* 8 64-bit words */
423 writel((readl(pi
->sdma_base
+ SDMA_SDC
) & (0x3 << 12)) | (v
<< 12),
424 pi
->sdma_base
+ SDMA_SDC
);
427 static void mpsc_sdma_init(struct mpsc_port_info
*pi
, u32 burst_size
)
429 pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi
->port
.line
,
432 writel((readl(pi
->sdma_base
+ SDMA_SDC
) & 0x3ff) | 0x03f,
433 pi
->sdma_base
+ SDMA_SDC
);
434 mpsc_sdma_burstsize(pi
, burst_size
);
437 static u32
mpsc_sdma_intr_mask(struct mpsc_port_info
*pi
, u32 mask
)
441 pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi
->port
.line
, mask
);
443 old
= v
= (pi
->mirror_regs
) ? pi
->shared_regs
->SDMA_INTR_MASK_m
:
444 readl(pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_MASK
);
452 pi
->shared_regs
->SDMA_INTR_MASK_m
= v
;
453 writel(v
, pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_MASK
);
460 static void mpsc_sdma_intr_unmask(struct mpsc_port_info
*pi
, u32 mask
)
464 pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi
->port
.line
,mask
);
466 v
= (pi
->mirror_regs
) ? pi
->shared_regs
->SDMA_INTR_MASK_m
467 : readl(pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_MASK
);
475 pi
->shared_regs
->SDMA_INTR_MASK_m
= v
;
476 writel(v
, pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_MASK
);
479 static void mpsc_sdma_intr_ack(struct mpsc_port_info
*pi
)
481 pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi
->port
.line
);
484 pi
->shared_regs
->SDMA_INTR_CAUSE_m
= 0;
485 writeb(0x00, pi
->shared_regs
->sdma_intr_base
+ SDMA_INTR_CAUSE
489 static void mpsc_sdma_set_rx_ring(struct mpsc_port_info
*pi
,
490 struct mpsc_rx_desc
*rxre_p
)
492 pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n",
493 pi
->port
.line
, (u32
)rxre_p
);
495 writel((u32
)rxre_p
, pi
->sdma_base
+ SDMA_SCRDP
);
498 static void mpsc_sdma_set_tx_ring(struct mpsc_port_info
*pi
,
499 struct mpsc_tx_desc
*txre_p
)
501 writel((u32
)txre_p
, pi
->sdma_base
+ SDMA_SFTDP
);
502 writel((u32
)txre_p
, pi
->sdma_base
+ SDMA_SCTDP
);
505 static void mpsc_sdma_cmd(struct mpsc_port_info
*pi
, u32 val
)
509 v
= readl(pi
->sdma_base
+ SDMA_SDCM
);
515 writel(v
, pi
->sdma_base
+ SDMA_SDCM
);
519 static uint
mpsc_sdma_tx_active(struct mpsc_port_info
*pi
)
521 return readl(pi
->sdma_base
+ SDMA_SDCM
) & SDMA_SDCM_TXD
;
524 static void mpsc_sdma_start_tx(struct mpsc_port_info
*pi
)
526 struct mpsc_tx_desc
*txre
, *txre_p
;
528 /* If tx isn't running & there's a desc ready to go, start it */
529 if (!mpsc_sdma_tx_active(pi
)) {
530 txre
= (struct mpsc_tx_desc
*)(pi
->txr
531 + (pi
->txr_tail
* MPSC_TXRE_SIZE
));
532 dma_cache_sync(pi
->port
.dev
, (void *)txre
, MPSC_TXRE_SIZE
,
534 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
535 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
536 invalidate_dcache_range((ulong
)txre
,
537 (ulong
)txre
+ MPSC_TXRE_SIZE
);
540 if (be32_to_cpu(txre
->cmdstat
) & SDMA_DESC_CMDSTAT_O
) {
541 txre_p
= (struct mpsc_tx_desc
*)
542 (pi
->txr_p
+ (pi
->txr_tail
* MPSC_TXRE_SIZE
));
544 mpsc_sdma_set_tx_ring(pi
, txre_p
);
545 mpsc_sdma_cmd(pi
, SDMA_SDCM_STD
| SDMA_SDCM_TXD
);
550 static void mpsc_sdma_stop(struct mpsc_port_info
*pi
)
552 pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi
->port
.line
);
554 /* Abort any SDMA transfers */
555 mpsc_sdma_cmd(pi
, 0);
556 mpsc_sdma_cmd(pi
, SDMA_SDCM_AR
| SDMA_SDCM_AT
);
558 /* Clear the SDMA current and first TX and RX pointers */
559 mpsc_sdma_set_tx_ring(pi
, NULL
);
560 mpsc_sdma_set_rx_ring(pi
, NULL
);
562 /* Disable interrupts */
563 mpsc_sdma_intr_mask(pi
, 0xf);
564 mpsc_sdma_intr_ack(pi
);
568 ******************************************************************************
570 * Multi-Protocol Serial Controller Routines (MPSC)
572 ******************************************************************************
575 static void mpsc_hw_init(struct mpsc_port_info
*pi
)
579 pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi
->port
.line
);
581 /* Set up clock routing */
582 if (pi
->mirror_regs
) {
583 v
= pi
->shared_regs
->MPSC_MRR_m
;
585 pi
->shared_regs
->MPSC_MRR_m
= v
;
586 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_MRR
);
588 v
= pi
->shared_regs
->MPSC_RCRR_m
;
589 v
= (v
& ~0xf0f) | 0x100;
590 pi
->shared_regs
->MPSC_RCRR_m
= v
;
591 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_RCRR
);
593 v
= pi
->shared_regs
->MPSC_TCRR_m
;
594 v
= (v
& ~0xf0f) | 0x100;
595 pi
->shared_regs
->MPSC_TCRR_m
= v
;
596 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_TCRR
);
598 v
= readl(pi
->shared_regs
->mpsc_routing_base
+ MPSC_MRR
);
600 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_MRR
);
602 v
= readl(pi
->shared_regs
->mpsc_routing_base
+ MPSC_RCRR
);
603 v
= (v
& ~0xf0f) | 0x100;
604 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_RCRR
);
606 v
= readl(pi
->shared_regs
->mpsc_routing_base
+ MPSC_TCRR
);
607 v
= (v
& ~0xf0f) | 0x100;
608 writel(v
, pi
->shared_regs
->mpsc_routing_base
+ MPSC_TCRR
);
611 /* Put MPSC in UART mode & enabel Tx/Rx egines */
612 writel(0x000004c4, pi
->mpsc_base
+ MPSC_MMCRL
);
614 /* No preamble, 16x divider, low-latency, */
615 writel(0x04400400, pi
->mpsc_base
+ MPSC_MMCRH
);
616 mpsc_set_baudrate(pi
, pi
->default_baud
);
618 if (pi
->mirror_regs
) {
619 pi
->MPSC_CHR_1_m
= 0;
620 pi
->MPSC_CHR_2_m
= 0;
622 writel(0, pi
->mpsc_base
+ MPSC_CHR_1
);
623 writel(0, pi
->mpsc_base
+ MPSC_CHR_2
);
624 writel(pi
->mpsc_max_idle
, pi
->mpsc_base
+ MPSC_CHR_3
);
625 writel(0, pi
->mpsc_base
+ MPSC_CHR_4
);
626 writel(0, pi
->mpsc_base
+ MPSC_CHR_5
);
627 writel(0, pi
->mpsc_base
+ MPSC_CHR_6
);
628 writel(0, pi
->mpsc_base
+ MPSC_CHR_7
);
629 writel(0, pi
->mpsc_base
+ MPSC_CHR_8
);
630 writel(0, pi
->mpsc_base
+ MPSC_CHR_9
);
631 writel(0, pi
->mpsc_base
+ MPSC_CHR_10
);
634 static void mpsc_enter_hunt(struct mpsc_port_info
*pi
)
636 pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi
->port
.line
);
638 if (pi
->mirror_regs
) {
639 writel(pi
->MPSC_CHR_2_m
| MPSC_CHR_2_EH
,
640 pi
->mpsc_base
+ MPSC_CHR_2
);
641 /* Erratum prevents reading CHR_2 so just delay for a while */
644 writel(readl(pi
->mpsc_base
+ MPSC_CHR_2
) | MPSC_CHR_2_EH
,
645 pi
->mpsc_base
+ MPSC_CHR_2
);
647 while (readl(pi
->mpsc_base
+ MPSC_CHR_2
) & MPSC_CHR_2_EH
)
652 static void mpsc_freeze(struct mpsc_port_info
*pi
)
656 pr_debug("mpsc_freeze[%d]: Freezing\n", pi
->port
.line
);
658 v
= (pi
->mirror_regs
) ? pi
->MPSC_MPCR_m
:
659 readl(pi
->mpsc_base
+ MPSC_MPCR
);
664 writel(v
, pi
->mpsc_base
+ MPSC_MPCR
);
667 static void mpsc_unfreeze(struct mpsc_port_info
*pi
)
671 v
= (pi
->mirror_regs
) ? pi
->MPSC_MPCR_m
:
672 readl(pi
->mpsc_base
+ MPSC_MPCR
);
677 writel(v
, pi
->mpsc_base
+ MPSC_MPCR
);
679 pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi
->port
.line
);
682 static void mpsc_set_char_length(struct mpsc_port_info
*pi
, u32 len
)
686 pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi
->port
.line
,len
);
688 v
= (pi
->mirror_regs
) ? pi
->MPSC_MPCR_m
:
689 readl(pi
->mpsc_base
+ MPSC_MPCR
);
690 v
= (v
& ~(0x3 << 12)) | ((len
& 0x3) << 12);
694 writel(v
, pi
->mpsc_base
+ MPSC_MPCR
);
697 static void mpsc_set_stop_bit_length(struct mpsc_port_info
*pi
, u32 len
)
701 pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n",
704 v
= (pi
->mirror_regs
) ? pi
->MPSC_MPCR_m
:
705 readl(pi
->mpsc_base
+ MPSC_MPCR
);
707 v
= (v
& ~(1 << 14)) | ((len
& 0x1) << 14);
711 writel(v
, pi
->mpsc_base
+ MPSC_MPCR
);
714 static void mpsc_set_parity(struct mpsc_port_info
*pi
, u32 p
)
718 pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi
->port
.line
, p
);
720 v
= (pi
->mirror_regs
) ? pi
->MPSC_CHR_2_m
:
721 readl(pi
->mpsc_base
+ MPSC_CHR_2
);
724 v
= (v
& ~0xc000c) | (p
<< 18) | (p
<< 2);
727 pi
->MPSC_CHR_2_m
= v
;
728 writel(v
, pi
->mpsc_base
+ MPSC_CHR_2
);
732 ******************************************************************************
734 * Driver Init Routines
736 ******************************************************************************
739 static void mpsc_init_hw(struct mpsc_port_info
*pi
)
741 pr_debug("mpsc_init_hw[%d]: Initializing\n", pi
->port
.line
);
743 mpsc_brg_init(pi
, pi
->brg_clk_src
);
745 mpsc_sdma_init(pi
, dma_get_cache_alignment()); /* burst a cacheline */
750 static int mpsc_alloc_ring_mem(struct mpsc_port_info
*pi
)
754 pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n",
757 if (!pi
->dma_region
) {
758 if (!dma_supported(pi
->port
.dev
, 0xffffffff)) {
759 printk(KERN_ERR
"MPSC: Inadequate DMA support\n");
761 } else if ((pi
->dma_region
= dma_alloc_noncoherent(pi
->port
.dev
,
763 &pi
->dma_region_p
, GFP_KERNEL
))
765 printk(KERN_ERR
"MPSC: Can't alloc Desc region\n");
773 static void mpsc_free_ring_mem(struct mpsc_port_info
*pi
)
775 pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi
->port
.line
);
777 if (pi
->dma_region
) {
778 dma_free_noncoherent(pi
->port
.dev
, MPSC_DMA_ALLOC_SIZE
,
779 pi
->dma_region
, pi
->dma_region_p
);
780 pi
->dma_region
= NULL
;
781 pi
->dma_region_p
= (dma_addr_t
)NULL
;
785 static void mpsc_init_rings(struct mpsc_port_info
*pi
)
787 struct mpsc_rx_desc
*rxre
;
788 struct mpsc_tx_desc
*txre
;
793 pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi
->port
.line
);
795 BUG_ON(pi
->dma_region
== NULL
);
797 memset(pi
->dma_region
, 0, MPSC_DMA_ALLOC_SIZE
);
800 * Descriptors & buffers are multiples of cacheline size and must be
803 dp
= ALIGN((u32
)pi
->dma_region
, dma_get_cache_alignment());
804 dp_p
= ALIGN((u32
)pi
->dma_region_p
, dma_get_cache_alignment());
807 * Partition dma region into rx ring descriptor, rx buffers,
808 * tx ring descriptors, and tx buffers.
813 dp_p
+= MPSC_RXR_SIZE
;
816 pi
->rxb_p
= (u8
*)dp_p
;
818 dp_p
+= MPSC_RXB_SIZE
;
825 dp_p
+= MPSC_TXR_SIZE
;
828 pi
->txb_p
= (u8
*)dp_p
;
833 /* Init rx ring descriptors */
839 for (i
= 0; i
< MPSC_RXR_ENTRIES
; i
++) {
840 rxre
= (struct mpsc_rx_desc
*)dp
;
842 rxre
->bufsize
= cpu_to_be16(MPSC_RXBE_SIZE
);
843 rxre
->bytecnt
= cpu_to_be16(0);
844 rxre
->cmdstat
= cpu_to_be32(SDMA_DESC_CMDSTAT_O
845 | SDMA_DESC_CMDSTAT_EI
| SDMA_DESC_CMDSTAT_F
846 | SDMA_DESC_CMDSTAT_L
);
847 rxre
->link
= cpu_to_be32(dp_p
+ MPSC_RXRE_SIZE
);
848 rxre
->buf_ptr
= cpu_to_be32(bp_p
);
850 dp
+= MPSC_RXRE_SIZE
;
851 dp_p
+= MPSC_RXRE_SIZE
;
852 bp
+= MPSC_RXBE_SIZE
;
853 bp_p
+= MPSC_RXBE_SIZE
;
855 rxre
->link
= cpu_to_be32(pi
->rxr_p
); /* Wrap last back to first */
857 /* Init tx ring descriptors */
863 for (i
= 0; i
< MPSC_TXR_ENTRIES
; i
++) {
864 txre
= (struct mpsc_tx_desc
*)dp
;
866 txre
->link
= cpu_to_be32(dp_p
+ MPSC_TXRE_SIZE
);
867 txre
->buf_ptr
= cpu_to_be32(bp_p
);
869 dp
+= MPSC_TXRE_SIZE
;
870 dp_p
+= MPSC_TXRE_SIZE
;
871 bp
+= MPSC_TXBE_SIZE
;
872 bp_p
+= MPSC_TXBE_SIZE
;
874 txre
->link
= cpu_to_be32(pi
->txr_p
); /* Wrap last back to first */
876 dma_cache_sync(pi
->port
.dev
, (void *)pi
->dma_region
,
877 MPSC_DMA_ALLOC_SIZE
, DMA_BIDIRECTIONAL
);
878 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
879 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
880 flush_dcache_range((ulong
)pi
->dma_region
,
881 (ulong
)pi
->dma_region
882 + MPSC_DMA_ALLOC_SIZE
);
888 static void mpsc_uninit_rings(struct mpsc_port_info
*pi
)
890 pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi
->port
.line
);
892 BUG_ON(pi
->dma_region
== NULL
);
908 static int mpsc_make_ready(struct mpsc_port_info
*pi
)
912 pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi
->port
.line
);
916 if ((rc
= mpsc_alloc_ring_mem(pi
)))
925 #ifdef CONFIG_CONSOLE_POLL
926 static int serial_polled
;
930 ******************************************************************************
932 * Interrupt Handling Routines
934 ******************************************************************************
937 static int mpsc_rx_intr(struct mpsc_port_info
*pi
, unsigned long *flags
)
939 struct mpsc_rx_desc
*rxre
;
940 struct tty_port
*port
= &pi
->port
.state
->port
;
941 u32 cmdstat
, bytes_in
, i
;
944 char flag
= TTY_NORMAL
;
946 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi
->port
.line
);
948 rxre
= (struct mpsc_rx_desc
*)(pi
->rxr
+ (pi
->rxr_posn
*MPSC_RXRE_SIZE
));
950 dma_cache_sync(pi
->port
.dev
, (void *)rxre
, MPSC_RXRE_SIZE
,
952 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
953 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
954 invalidate_dcache_range((ulong
)rxre
,
955 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
959 * Loop through Rx descriptors handling ones that have been completed.
961 while (!((cmdstat
= be32_to_cpu(rxre
->cmdstat
))
962 & SDMA_DESC_CMDSTAT_O
)) {
963 bytes_in
= be16_to_cpu(rxre
->bytecnt
);
964 #ifdef CONFIG_CONSOLE_POLL
965 if (unlikely(serial_polled
)) {
970 /* Following use of tty struct directly is deprecated */
971 if (tty_buffer_request_room(port
, bytes_in
) < bytes_in
) {
972 if (port
->low_latency
) {
973 spin_unlock_irqrestore(&pi
->port
.lock
, *flags
);
974 tty_flip_buffer_push(port
);
975 spin_lock_irqsave(&pi
->port
.lock
, *flags
);
978 * If this failed then we will throw away the bytes
979 * but must do so to clear interrupts.
983 bp
= pi
->rxb
+ (pi
->rxr_posn
* MPSC_RXBE_SIZE
);
984 dma_cache_sync(pi
->port
.dev
, (void *)bp
, MPSC_RXBE_SIZE
,
986 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
987 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
988 invalidate_dcache_range((ulong
)bp
,
989 (ulong
)bp
+ MPSC_RXBE_SIZE
);
993 * Other than for parity error, the manual provides little
994 * info on what data will be in a frame flagged by any of
995 * these errors. For parity error, it is the last byte in
996 * the buffer that had the error. As for the rest, I guess
997 * we'll assume there is no data in the buffer.
998 * If there is...it gets lost.
1000 if (unlikely(cmdstat
& (SDMA_DESC_CMDSTAT_BR
1001 | SDMA_DESC_CMDSTAT_FR
1002 | SDMA_DESC_CMDSTAT_OR
))) {
1004 pi
->port
.icount
.rx
++;
1006 if (cmdstat
& SDMA_DESC_CMDSTAT_BR
) { /* Break */
1007 pi
->port
.icount
.brk
++;
1009 if (uart_handle_break(&pi
->port
))
1011 } else if (cmdstat
& SDMA_DESC_CMDSTAT_FR
) {
1012 pi
->port
.icount
.frame
++;
1013 } else if (cmdstat
& SDMA_DESC_CMDSTAT_OR
) {
1014 pi
->port
.icount
.overrun
++;
1017 cmdstat
&= pi
->port
.read_status_mask
;
1019 if (cmdstat
& SDMA_DESC_CMDSTAT_BR
)
1021 else if (cmdstat
& SDMA_DESC_CMDSTAT_FR
)
1023 else if (cmdstat
& SDMA_DESC_CMDSTAT_OR
)
1025 else if (cmdstat
& SDMA_DESC_CMDSTAT_PE
)
1029 if (uart_handle_sysrq_char(&pi
->port
, *bp
)) {
1032 #ifdef CONFIG_CONSOLE_POLL
1033 if (unlikely(serial_polled
)) {
1041 if ((unlikely(cmdstat
& (SDMA_DESC_CMDSTAT_BR
1042 | SDMA_DESC_CMDSTAT_FR
1043 | SDMA_DESC_CMDSTAT_OR
)))
1044 && !(cmdstat
& pi
->port
.ignore_status_mask
)) {
1045 tty_insert_flip_char(port
, *bp
, flag
);
1047 for (i
=0; i
<bytes_in
; i
++)
1048 tty_insert_flip_char(port
, *bp
++, TTY_NORMAL
);
1050 pi
->port
.icount
.rx
+= bytes_in
;
1054 rxre
->bytecnt
= cpu_to_be16(0);
1056 rxre
->cmdstat
= cpu_to_be32(SDMA_DESC_CMDSTAT_O
1057 | SDMA_DESC_CMDSTAT_EI
| SDMA_DESC_CMDSTAT_F
1058 | SDMA_DESC_CMDSTAT_L
);
1060 dma_cache_sync(pi
->port
.dev
, (void *)rxre
, MPSC_RXRE_SIZE
,
1062 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1063 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1064 flush_dcache_range((ulong
)rxre
,
1065 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
1068 /* Advance to next descriptor */
1069 pi
->rxr_posn
= (pi
->rxr_posn
+ 1) & (MPSC_RXR_ENTRIES
- 1);
1070 rxre
= (struct mpsc_rx_desc
*)
1071 (pi
->rxr
+ (pi
->rxr_posn
* MPSC_RXRE_SIZE
));
1072 dma_cache_sync(pi
->port
.dev
, (void *)rxre
, MPSC_RXRE_SIZE
,
1074 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1075 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1076 invalidate_dcache_range((ulong
)rxre
,
1077 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
1082 /* Restart rx engine, if its stopped */
1083 if ((readl(pi
->sdma_base
+ SDMA_SDCM
) & SDMA_SDCM_ERD
) == 0)
1086 spin_unlock_irqrestore(&pi
->port
.lock
, *flags
);
1087 tty_flip_buffer_push(port
);
1088 spin_lock_irqsave(&pi
->port
.lock
, *flags
);
1092 static void mpsc_setup_tx_desc(struct mpsc_port_info
*pi
, u32 count
, u32 intr
)
1094 struct mpsc_tx_desc
*txre
;
1096 txre
= (struct mpsc_tx_desc
*)(pi
->txr
1097 + (pi
->txr_head
* MPSC_TXRE_SIZE
));
1099 txre
->bytecnt
= cpu_to_be16(count
);
1100 txre
->shadow
= txre
->bytecnt
;
1101 wmb(); /* ensure cmdstat is last field updated */
1102 txre
->cmdstat
= cpu_to_be32(SDMA_DESC_CMDSTAT_O
| SDMA_DESC_CMDSTAT_F
1103 | SDMA_DESC_CMDSTAT_L
1104 | ((intr
) ? SDMA_DESC_CMDSTAT_EI
: 0));
1106 dma_cache_sync(pi
->port
.dev
, (void *)txre
, MPSC_TXRE_SIZE
,
1108 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1109 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1110 flush_dcache_range((ulong
)txre
,
1111 (ulong
)txre
+ MPSC_TXRE_SIZE
);
1115 static void mpsc_copy_tx_data(struct mpsc_port_info
*pi
)
1117 struct circ_buf
*xmit
= &pi
->port
.state
->xmit
;
1121 /* Make sure the desc ring isn't full */
1122 while (CIRC_CNT(pi
->txr_head
, pi
->txr_tail
, MPSC_TXR_ENTRIES
)
1123 < (MPSC_TXR_ENTRIES
- 1)) {
1124 if (pi
->port
.x_char
) {
1126 * Ideally, we should use the TCS field in
1127 * CHR_1 to put the x_char out immediately but
1128 * errata prevents us from being able to read
1129 * CHR_2 to know that its safe to write to
1130 * CHR_1. Instead, just put it in-band with
1131 * all the other Tx data.
1133 bp
= pi
->txb
+ (pi
->txr_head
* MPSC_TXBE_SIZE
);
1134 *bp
= pi
->port
.x_char
;
1135 pi
->port
.x_char
= 0;
1137 } else if (!uart_circ_empty(xmit
)
1138 && !uart_tx_stopped(&pi
->port
)) {
1139 i
= min((u32
)MPSC_TXBE_SIZE
,
1140 (u32
)uart_circ_chars_pending(xmit
));
1141 i
= min(i
, (u32
)CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
,
1143 bp
= pi
->txb
+ (pi
->txr_head
* MPSC_TXBE_SIZE
);
1144 memcpy(bp
, &xmit
->buf
[xmit
->tail
], i
);
1145 xmit
->tail
= (xmit
->tail
+ i
) & (UART_XMIT_SIZE
- 1);
1147 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
1148 uart_write_wakeup(&pi
->port
);
1149 } else { /* All tx data copied into ring bufs */
1153 dma_cache_sync(pi
->port
.dev
, (void *)bp
, MPSC_TXBE_SIZE
,
1155 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1156 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1157 flush_dcache_range((ulong
)bp
,
1158 (ulong
)bp
+ MPSC_TXBE_SIZE
);
1160 mpsc_setup_tx_desc(pi
, i
, 1);
1162 /* Advance to next descriptor */
1163 pi
->txr_head
= (pi
->txr_head
+ 1) & (MPSC_TXR_ENTRIES
- 1);
1167 static int mpsc_tx_intr(struct mpsc_port_info
*pi
)
1169 struct mpsc_tx_desc
*txre
;
1171 unsigned long iflags
;
1173 spin_lock_irqsave(&pi
->tx_lock
, iflags
);
1175 if (!mpsc_sdma_tx_active(pi
)) {
1176 txre
= (struct mpsc_tx_desc
*)(pi
->txr
1177 + (pi
->txr_tail
* MPSC_TXRE_SIZE
));
1179 dma_cache_sync(pi
->port
.dev
, (void *)txre
, MPSC_TXRE_SIZE
,
1181 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1182 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1183 invalidate_dcache_range((ulong
)txre
,
1184 (ulong
)txre
+ MPSC_TXRE_SIZE
);
1187 while (!(be32_to_cpu(txre
->cmdstat
) & SDMA_DESC_CMDSTAT_O
)) {
1189 pi
->port
.icount
.tx
+= be16_to_cpu(txre
->bytecnt
);
1190 pi
->txr_tail
= (pi
->txr_tail
+1) & (MPSC_TXR_ENTRIES
-1);
1192 /* If no more data to tx, fall out of loop */
1193 if (pi
->txr_head
== pi
->txr_tail
)
1196 txre
= (struct mpsc_tx_desc
*)(pi
->txr
1197 + (pi
->txr_tail
* MPSC_TXRE_SIZE
));
1198 dma_cache_sync(pi
->port
.dev
, (void *)txre
,
1199 MPSC_TXRE_SIZE
, DMA_FROM_DEVICE
);
1200 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1201 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1202 invalidate_dcache_range((ulong
)txre
,
1203 (ulong
)txre
+ MPSC_TXRE_SIZE
);
1207 mpsc_copy_tx_data(pi
);
1208 mpsc_sdma_start_tx(pi
); /* start next desc if ready */
1211 spin_unlock_irqrestore(&pi
->tx_lock
, iflags
);
1216 * This is the driver's interrupt handler. To avoid a race, we first clear
1217 * the interrupt, then handle any completed Rx/Tx descriptors. When done
1218 * handling those descriptors, we restart the Rx/Tx engines if they're stopped.
1220 static irqreturn_t
mpsc_sdma_intr(int irq
, void *dev_id
)
1222 struct mpsc_port_info
*pi
= dev_id
;
1226 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi
->port
.line
);
1228 spin_lock_irqsave(&pi
->port
.lock
, iflags
);
1229 mpsc_sdma_intr_ack(pi
);
1230 if (mpsc_rx_intr(pi
, &iflags
))
1232 if (mpsc_tx_intr(pi
))
1234 spin_unlock_irqrestore(&pi
->port
.lock
, iflags
);
1236 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi
->port
.line
);
1241 ******************************************************************************
1243 * serial_core.c Interface routines
1245 ******************************************************************************
1247 static uint
mpsc_tx_empty(struct uart_port
*port
)
1249 struct mpsc_port_info
*pi
=
1250 container_of(port
, struct mpsc_port_info
, port
);
1254 spin_lock_irqsave(&pi
->port
.lock
, iflags
);
1255 rc
= mpsc_sdma_tx_active(pi
) ? 0 : TIOCSER_TEMT
;
1256 spin_unlock_irqrestore(&pi
->port
.lock
, iflags
);
1261 static void mpsc_set_mctrl(struct uart_port
*port
, uint mctrl
)
1263 /* Have no way to set modem control lines AFAICT */
1266 static uint
mpsc_get_mctrl(struct uart_port
*port
)
1268 struct mpsc_port_info
*pi
=
1269 container_of(port
, struct mpsc_port_info
, port
);
1272 status
= (pi
->mirror_regs
) ? pi
->MPSC_CHR_10_m
1273 : readl(pi
->mpsc_base
+ MPSC_CHR_10
);
1277 mflags
|= TIOCM_CTS
;
1279 mflags
|= TIOCM_CAR
;
1281 return mflags
| TIOCM_DSR
; /* No way to tell if DSR asserted */
1284 static void mpsc_stop_tx(struct uart_port
*port
)
1286 struct mpsc_port_info
*pi
=
1287 container_of(port
, struct mpsc_port_info
, port
);
1289 pr_debug("mpsc_stop_tx[%d]\n", port
->line
);
1294 static void mpsc_start_tx(struct uart_port
*port
)
1296 struct mpsc_port_info
*pi
=
1297 container_of(port
, struct mpsc_port_info
, port
);
1298 unsigned long iflags
;
1300 spin_lock_irqsave(&pi
->tx_lock
, iflags
);
1303 mpsc_copy_tx_data(pi
);
1304 mpsc_sdma_start_tx(pi
);
1306 spin_unlock_irqrestore(&pi
->tx_lock
, iflags
);
1308 pr_debug("mpsc_start_tx[%d]\n", port
->line
);
1311 static void mpsc_start_rx(struct mpsc_port_info
*pi
)
1313 pr_debug("mpsc_start_rx[%d]: Starting...\n", pi
->port
.line
);
1316 mpsc_enter_hunt(pi
);
1317 mpsc_sdma_cmd(pi
, SDMA_SDCM_ERD
);
1321 static void mpsc_stop_rx(struct uart_port
*port
)
1323 struct mpsc_port_info
*pi
=
1324 container_of(port
, struct mpsc_port_info
, port
);
1326 pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port
->line
);
1328 if (pi
->mirror_regs
) {
1329 writel(pi
->MPSC_CHR_2_m
| MPSC_CHR_2_RA
,
1330 pi
->mpsc_base
+ MPSC_CHR_2
);
1331 /* Erratum prevents reading CHR_2 so just delay for a while */
1334 writel(readl(pi
->mpsc_base
+ MPSC_CHR_2
) | MPSC_CHR_2_RA
,
1335 pi
->mpsc_base
+ MPSC_CHR_2
);
1337 while (readl(pi
->mpsc_base
+ MPSC_CHR_2
) & MPSC_CHR_2_RA
)
1341 mpsc_sdma_cmd(pi
, SDMA_SDCM_AR
);
1344 static void mpsc_break_ctl(struct uart_port
*port
, int ctl
)
1346 struct mpsc_port_info
*pi
=
1347 container_of(port
, struct mpsc_port_info
, port
);
1351 v
= ctl
? 0x00ff0000 : 0;
1353 spin_lock_irqsave(&pi
->port
.lock
, flags
);
1354 if (pi
->mirror_regs
)
1355 pi
->MPSC_CHR_1_m
= v
;
1356 writel(v
, pi
->mpsc_base
+ MPSC_CHR_1
);
1357 spin_unlock_irqrestore(&pi
->port
.lock
, flags
);
1360 static int mpsc_startup(struct uart_port
*port
)
1362 struct mpsc_port_info
*pi
=
1363 container_of(port
, struct mpsc_port_info
, port
);
1367 pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n",
1368 port
->line
, pi
->port
.irq
);
1370 if ((rc
= mpsc_make_ready(pi
)) == 0) {
1371 /* Setup IRQ handler */
1372 mpsc_sdma_intr_ack(pi
);
1374 /* If irq's are shared, need to set flag */
1375 if (mpsc_ports
[0].port
.irq
== mpsc_ports
[1].port
.irq
)
1378 if (request_irq(pi
->port
.irq
, mpsc_sdma_intr
, flag
,
1380 printk(KERN_ERR
"MPSC: Can't get SDMA IRQ %d\n",
1383 mpsc_sdma_intr_unmask(pi
, 0xf);
1384 mpsc_sdma_set_rx_ring(pi
, (struct mpsc_rx_desc
*)(pi
->rxr_p
1385 + (pi
->rxr_posn
* MPSC_RXRE_SIZE
)));
1391 static void mpsc_shutdown(struct uart_port
*port
)
1393 struct mpsc_port_info
*pi
=
1394 container_of(port
, struct mpsc_port_info
, port
);
1396 pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port
->line
);
1399 free_irq(pi
->port
.irq
, pi
);
1402 static void mpsc_set_termios(struct uart_port
*port
, struct ktermios
*termios
,
1403 struct ktermios
*old
)
1405 struct mpsc_port_info
*pi
=
1406 container_of(port
, struct mpsc_port_info
, port
);
1409 u32 chr_bits
, stop_bits
, par
;
1411 pi
->c_iflag
= termios
->c_iflag
;
1412 pi
->c_cflag
= termios
->c_cflag
;
1414 switch (termios
->c_cflag
& CSIZE
) {
1416 chr_bits
= MPSC_MPCR_CL_5
;
1419 chr_bits
= MPSC_MPCR_CL_6
;
1422 chr_bits
= MPSC_MPCR_CL_7
;
1426 chr_bits
= MPSC_MPCR_CL_8
;
1430 if (termios
->c_cflag
& CSTOPB
)
1431 stop_bits
= MPSC_MPCR_SBL_2
;
1433 stop_bits
= MPSC_MPCR_SBL_1
;
1435 par
= MPSC_CHR_2_PAR_EVEN
;
1436 if (termios
->c_cflag
& PARENB
)
1437 if (termios
->c_cflag
& PARODD
)
1438 par
= MPSC_CHR_2_PAR_ODD
;
1440 if (termios
->c_cflag
& CMSPAR
) {
1441 if (termios
->c_cflag
& PARODD
)
1442 par
= MPSC_CHR_2_PAR_MARK
;
1444 par
= MPSC_CHR_2_PAR_SPACE
;
1448 baud
= uart_get_baud_rate(port
, termios
, old
, 0, port
->uartclk
);
1450 spin_lock_irqsave(&pi
->port
.lock
, flags
);
1452 uart_update_timeout(port
, termios
->c_cflag
, baud
);
1454 mpsc_set_char_length(pi
, chr_bits
);
1455 mpsc_set_stop_bit_length(pi
, stop_bits
);
1456 mpsc_set_parity(pi
, par
);
1457 mpsc_set_baudrate(pi
, baud
);
1459 /* Characters/events to read */
1460 pi
->port
.read_status_mask
= SDMA_DESC_CMDSTAT_OR
;
1462 if (termios
->c_iflag
& INPCK
)
1463 pi
->port
.read_status_mask
|= SDMA_DESC_CMDSTAT_PE
1464 | SDMA_DESC_CMDSTAT_FR
;
1466 if (termios
->c_iflag
& (IGNBRK
| BRKINT
| PARMRK
))
1467 pi
->port
.read_status_mask
|= SDMA_DESC_CMDSTAT_BR
;
1469 /* Characters/events to ignore */
1470 pi
->port
.ignore_status_mask
= 0;
1472 if (termios
->c_iflag
& IGNPAR
)
1473 pi
->port
.ignore_status_mask
|= SDMA_DESC_CMDSTAT_PE
1474 | SDMA_DESC_CMDSTAT_FR
;
1476 if (termios
->c_iflag
& IGNBRK
) {
1477 pi
->port
.ignore_status_mask
|= SDMA_DESC_CMDSTAT_BR
;
1479 if (termios
->c_iflag
& IGNPAR
)
1480 pi
->port
.ignore_status_mask
|= SDMA_DESC_CMDSTAT_OR
;
1483 if ((termios
->c_cflag
& CREAD
)) {
1484 if (!pi
->rcv_data
) {
1488 } else if (pi
->rcv_data
) {
1493 spin_unlock_irqrestore(&pi
->port
.lock
, flags
);
1496 static const char *mpsc_type(struct uart_port
*port
)
1498 pr_debug("mpsc_type[%d]: port type: %s\n", port
->line
,MPSC_DRIVER_NAME
);
1499 return MPSC_DRIVER_NAME
;
1502 static int mpsc_request_port(struct uart_port
*port
)
1504 /* Should make chip/platform specific call */
1508 static void mpsc_release_port(struct uart_port
*port
)
1510 struct mpsc_port_info
*pi
=
1511 container_of(port
, struct mpsc_port_info
, port
);
1514 mpsc_uninit_rings(pi
);
1515 mpsc_free_ring_mem(pi
);
1520 static void mpsc_config_port(struct uart_port
*port
, int flags
)
1524 static int mpsc_verify_port(struct uart_port
*port
, struct serial_struct
*ser
)
1526 struct mpsc_port_info
*pi
=
1527 container_of(port
, struct mpsc_port_info
, port
);
1530 pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi
->port
.line
);
1532 if (ser
->type
!= PORT_UNKNOWN
&& ser
->type
!= PORT_MPSC
)
1534 else if (pi
->port
.irq
!= ser
->irq
)
1536 else if (ser
->io_type
!= SERIAL_IO_MEM
)
1538 else if (pi
->port
.uartclk
/ 16 != ser
->baud_base
) /* Not sure */
1540 else if ((void *)pi
->port
.mapbase
!= ser
->iomem_base
)
1542 else if (pi
->port
.iobase
!= ser
->port
)
1544 else if (ser
->hub6
!= 0)
1549 #ifdef CONFIG_CONSOLE_POLL
1550 /* Serial polling routines for writing and reading from the uart while
1551 * in an interrupt or debug context.
1554 static char poll_buf
[2048];
1555 static int poll_ptr
;
1556 static int poll_cnt
;
1557 static void mpsc_put_poll_char(struct uart_port
*port
,
1560 static int mpsc_get_poll_char(struct uart_port
*port
)
1562 struct mpsc_port_info
*pi
=
1563 container_of(port
, struct mpsc_port_info
, port
);
1564 struct mpsc_rx_desc
*rxre
;
1565 u32 cmdstat
, bytes_in
, i
;
1571 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi
->port
.line
);
1575 return poll_buf
[poll_ptr
++];
1580 while (poll_cnt
== 0) {
1581 rxre
= (struct mpsc_rx_desc
*)(pi
->rxr
+
1582 (pi
->rxr_posn
*MPSC_RXRE_SIZE
));
1583 dma_cache_sync(pi
->port
.dev
, (void *)rxre
,
1584 MPSC_RXRE_SIZE
, DMA_FROM_DEVICE
);
1585 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1586 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1587 invalidate_dcache_range((ulong
)rxre
,
1588 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
1591 * Loop through Rx descriptors handling ones that have
1594 while (poll_cnt
== 0 &&
1595 !((cmdstat
= be32_to_cpu(rxre
->cmdstat
)) &
1596 SDMA_DESC_CMDSTAT_O
)){
1597 bytes_in
= be16_to_cpu(rxre
->bytecnt
);
1598 bp
= pi
->rxb
+ (pi
->rxr_posn
* MPSC_RXBE_SIZE
);
1599 dma_cache_sync(pi
->port
.dev
, (void *) bp
,
1600 MPSC_RXBE_SIZE
, DMA_FROM_DEVICE
);
1601 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1602 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1603 invalidate_dcache_range((ulong
)bp
,
1604 (ulong
)bp
+ MPSC_RXBE_SIZE
);
1606 if ((unlikely(cmdstat
& (SDMA_DESC_CMDSTAT_BR
|
1607 SDMA_DESC_CMDSTAT_FR
| SDMA_DESC_CMDSTAT_OR
))) &&
1608 !(cmdstat
& pi
->port
.ignore_status_mask
)) {
1609 poll_buf
[poll_cnt
] = *bp
;
1612 for (i
= 0; i
< bytes_in
; i
++) {
1613 poll_buf
[poll_cnt
] = *bp
++;
1616 pi
->port
.icount
.rx
+= bytes_in
;
1618 rxre
->bytecnt
= cpu_to_be16(0);
1620 rxre
->cmdstat
= cpu_to_be32(SDMA_DESC_CMDSTAT_O
|
1621 SDMA_DESC_CMDSTAT_EI
|
1622 SDMA_DESC_CMDSTAT_F
|
1623 SDMA_DESC_CMDSTAT_L
);
1625 dma_cache_sync(pi
->port
.dev
, (void *)rxre
,
1626 MPSC_RXRE_SIZE
, DMA_BIDIRECTIONAL
);
1627 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1628 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1629 flush_dcache_range((ulong
)rxre
,
1630 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
1633 /* Advance to next descriptor */
1634 pi
->rxr_posn
= (pi
->rxr_posn
+ 1) &
1635 (MPSC_RXR_ENTRIES
- 1);
1636 rxre
= (struct mpsc_rx_desc
*)(pi
->rxr
+
1637 (pi
->rxr_posn
* MPSC_RXRE_SIZE
));
1638 dma_cache_sync(pi
->port
.dev
, (void *)rxre
,
1639 MPSC_RXRE_SIZE
, DMA_FROM_DEVICE
);
1640 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1641 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1642 invalidate_dcache_range((ulong
)rxre
,
1643 (ulong
)rxre
+ MPSC_RXRE_SIZE
);
1647 /* Restart rx engine, if its stopped */
1648 if ((readl(pi
->sdma_base
+ SDMA_SDCM
) & SDMA_SDCM_ERD
) == 0)
1653 return poll_buf
[poll_ptr
++];
1660 static void mpsc_put_poll_char(struct uart_port
*port
,
1663 struct mpsc_port_info
*pi
=
1664 container_of(port
, struct mpsc_port_info
, port
);
1667 data
= readl(pi
->mpsc_base
+ MPSC_MPCR
);
1668 writeb(c
, pi
->mpsc_base
+ MPSC_CHR_1
);
1670 data
= readl(pi
->mpsc_base
+ MPSC_CHR_2
);
1671 data
|= MPSC_CHR_2_TTCS
;
1672 writel(data
, pi
->mpsc_base
+ MPSC_CHR_2
);
1675 while (readl(pi
->mpsc_base
+ MPSC_CHR_2
) & MPSC_CHR_2_TTCS
);
1679 static struct uart_ops mpsc_pops
= {
1680 .tx_empty
= mpsc_tx_empty
,
1681 .set_mctrl
= mpsc_set_mctrl
,
1682 .get_mctrl
= mpsc_get_mctrl
,
1683 .stop_tx
= mpsc_stop_tx
,
1684 .start_tx
= mpsc_start_tx
,
1685 .stop_rx
= mpsc_stop_rx
,
1686 .break_ctl
= mpsc_break_ctl
,
1687 .startup
= mpsc_startup
,
1688 .shutdown
= mpsc_shutdown
,
1689 .set_termios
= mpsc_set_termios
,
1691 .release_port
= mpsc_release_port
,
1692 .request_port
= mpsc_request_port
,
1693 .config_port
= mpsc_config_port
,
1694 .verify_port
= mpsc_verify_port
,
1695 #ifdef CONFIG_CONSOLE_POLL
1696 .poll_get_char
= mpsc_get_poll_char
,
1697 .poll_put_char
= mpsc_put_poll_char
,
1702 ******************************************************************************
1704 * Console Interface Routines
1706 ******************************************************************************
1709 #ifdef CONFIG_SERIAL_MPSC_CONSOLE
1710 static void mpsc_console_write(struct console
*co
, const char *s
, uint count
)
1712 struct mpsc_port_info
*pi
= &mpsc_ports
[co
->index
];
1713 u8
*bp
, *dp
, add_cr
= 0;
1715 unsigned long iflags
;
1717 spin_lock_irqsave(&pi
->tx_lock
, iflags
);
1719 while (pi
->txr_head
!= pi
->txr_tail
) {
1720 while (mpsc_sdma_tx_active(pi
))
1722 mpsc_sdma_intr_ack(pi
);
1726 while (mpsc_sdma_tx_active(pi
))
1730 bp
= dp
= pi
->txb
+ (pi
->txr_head
* MPSC_TXBE_SIZE
);
1732 for (i
= 0; i
< MPSC_TXBE_SIZE
; i
++) {
1742 if (*(s
++) == '\n') { /* add '\r' after '\n' */
1751 dma_cache_sync(pi
->port
.dev
, (void *)bp
, MPSC_TXBE_SIZE
,
1753 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1754 if (pi
->cache_mgmt
) /* GT642[46]0 Res #COMM-2 */
1755 flush_dcache_range((ulong
)bp
,
1756 (ulong
)bp
+ MPSC_TXBE_SIZE
);
1758 mpsc_setup_tx_desc(pi
, i
, 0);
1759 pi
->txr_head
= (pi
->txr_head
+ 1) & (MPSC_TXR_ENTRIES
- 1);
1760 mpsc_sdma_start_tx(pi
);
1762 while (mpsc_sdma_tx_active(pi
))
1765 pi
->txr_tail
= (pi
->txr_tail
+ 1) & (MPSC_TXR_ENTRIES
- 1);
1768 spin_unlock_irqrestore(&pi
->tx_lock
, iflags
);
1771 static int __init
mpsc_console_setup(struct console
*co
, char *options
)
1773 struct mpsc_port_info
*pi
;
1774 int baud
, bits
, parity
, flow
;
1776 pr_debug("mpsc_console_setup[%d]: options: %s\n", co
->index
, options
);
1778 if (co
->index
>= MPSC_NUM_CTLRS
)
1781 pi
= &mpsc_ports
[co
->index
];
1783 baud
= pi
->default_baud
;
1784 bits
= pi
->default_bits
;
1785 parity
= pi
->default_parity
;
1786 flow
= pi
->default_flow
;
1791 spin_lock_init(&pi
->port
.lock
); /* Temporary fix--copied from 8250.c */
1794 uart_parse_options(options
, &baud
, &parity
, &bits
, &flow
);
1796 return uart_set_options(&pi
->port
, co
, baud
, parity
, bits
, flow
);
1799 static struct console mpsc_console
= {
1800 .name
= MPSC_DEV_NAME
,
1801 .write
= mpsc_console_write
,
1802 .device
= uart_console_device
,
1803 .setup
= mpsc_console_setup
,
1804 .flags
= CON_PRINTBUFFER
,
1809 static int __init
mpsc_late_console_init(void)
1811 pr_debug("mpsc_late_console_init: Enter\n");
1813 if (!(mpsc_console
.flags
& CON_ENABLED
))
1814 register_console(&mpsc_console
);
1818 late_initcall(mpsc_late_console_init
);
1820 #define MPSC_CONSOLE &mpsc_console
1822 #define MPSC_CONSOLE NULL
1825 ******************************************************************************
1827 * Dummy Platform Driver to extract & map shared register regions
1829 ******************************************************************************
1831 static void mpsc_resource_err(char *s
)
1833 printk(KERN_WARNING
"MPSC: Platform device resource error in %s\n", s
);
1836 static int mpsc_shared_map_regs(struct platform_device
*pd
)
1840 if ((r
= platform_get_resource(pd
, IORESOURCE_MEM
,
1841 MPSC_ROUTING_BASE_ORDER
))
1842 && request_mem_region(r
->start
,
1843 MPSC_ROUTING_REG_BLOCK_SIZE
,
1844 "mpsc_routing_regs")) {
1845 mpsc_shared_regs
.mpsc_routing_base
= ioremap(r
->start
,
1846 MPSC_ROUTING_REG_BLOCK_SIZE
);
1847 mpsc_shared_regs
.mpsc_routing_base_p
= r
->start
;
1849 mpsc_resource_err("MPSC routing base");
1853 if ((r
= platform_get_resource(pd
, IORESOURCE_MEM
,
1854 MPSC_SDMA_INTR_BASE_ORDER
))
1855 && request_mem_region(r
->start
,
1856 MPSC_SDMA_INTR_REG_BLOCK_SIZE
,
1857 "sdma_intr_regs")) {
1858 mpsc_shared_regs
.sdma_intr_base
= ioremap(r
->start
,
1859 MPSC_SDMA_INTR_REG_BLOCK_SIZE
);
1860 mpsc_shared_regs
.sdma_intr_base_p
= r
->start
;
1862 iounmap(mpsc_shared_regs
.mpsc_routing_base
);
1863 release_mem_region(mpsc_shared_regs
.mpsc_routing_base_p
,
1864 MPSC_ROUTING_REG_BLOCK_SIZE
);
1865 mpsc_resource_err("SDMA intr base");
1872 static void mpsc_shared_unmap_regs(void)
1874 if (!mpsc_shared_regs
.mpsc_routing_base
) {
1875 iounmap(mpsc_shared_regs
.mpsc_routing_base
);
1876 release_mem_region(mpsc_shared_regs
.mpsc_routing_base_p
,
1877 MPSC_ROUTING_REG_BLOCK_SIZE
);
1879 if (!mpsc_shared_regs
.sdma_intr_base
) {
1880 iounmap(mpsc_shared_regs
.sdma_intr_base
);
1881 release_mem_region(mpsc_shared_regs
.sdma_intr_base_p
,
1882 MPSC_SDMA_INTR_REG_BLOCK_SIZE
);
1885 mpsc_shared_regs
.mpsc_routing_base
= NULL
;
1886 mpsc_shared_regs
.sdma_intr_base
= NULL
;
1888 mpsc_shared_regs
.mpsc_routing_base_p
= 0;
1889 mpsc_shared_regs
.sdma_intr_base_p
= 0;
1892 static int mpsc_shared_drv_probe(struct platform_device
*dev
)
1894 struct mpsc_shared_pdata
*pdata
;
1898 if (!(rc
= mpsc_shared_map_regs(dev
))) {
1899 pdata
= (struct mpsc_shared_pdata
*)
1900 dev_get_platdata(&dev
->dev
);
1902 mpsc_shared_regs
.MPSC_MRR_m
= pdata
->mrr_val
;
1903 mpsc_shared_regs
.MPSC_RCRR_m
= pdata
->rcrr_val
;
1904 mpsc_shared_regs
.MPSC_TCRR_m
= pdata
->tcrr_val
;
1905 mpsc_shared_regs
.SDMA_INTR_CAUSE_m
=
1906 pdata
->intr_cause_val
;
1907 mpsc_shared_regs
.SDMA_INTR_MASK_m
=
1908 pdata
->intr_mask_val
;
1917 static int mpsc_shared_drv_remove(struct platform_device
*dev
)
1922 mpsc_shared_unmap_regs();
1923 mpsc_shared_regs
.MPSC_MRR_m
= 0;
1924 mpsc_shared_regs
.MPSC_RCRR_m
= 0;
1925 mpsc_shared_regs
.MPSC_TCRR_m
= 0;
1926 mpsc_shared_regs
.SDMA_INTR_CAUSE_m
= 0;
1927 mpsc_shared_regs
.SDMA_INTR_MASK_m
= 0;
1934 static struct platform_driver mpsc_shared_driver
= {
1935 .probe
= mpsc_shared_drv_probe
,
1936 .remove
= mpsc_shared_drv_remove
,
1938 .name
= MPSC_SHARED_NAME
,
1943 ******************************************************************************
1945 * Driver Interface Routines
1947 ******************************************************************************
1949 static struct uart_driver mpsc_reg
= {
1950 .owner
= THIS_MODULE
,
1951 .driver_name
= MPSC_DRIVER_NAME
,
1952 .dev_name
= MPSC_DEV_NAME
,
1953 .major
= MPSC_MAJOR
,
1954 .minor
= MPSC_MINOR_START
,
1955 .nr
= MPSC_NUM_CTLRS
,
1956 .cons
= MPSC_CONSOLE
,
1959 static int mpsc_drv_map_regs(struct mpsc_port_info
*pi
,
1960 struct platform_device
*pd
)
1964 if ((r
= platform_get_resource(pd
, IORESOURCE_MEM
, MPSC_BASE_ORDER
))
1965 && request_mem_region(r
->start
, MPSC_REG_BLOCK_SIZE
,
1967 pi
->mpsc_base
= ioremap(r
->start
, MPSC_REG_BLOCK_SIZE
);
1968 pi
->mpsc_base_p
= r
->start
;
1970 mpsc_resource_err("MPSC base");
1974 if ((r
= platform_get_resource(pd
, IORESOURCE_MEM
,
1975 MPSC_SDMA_BASE_ORDER
))
1976 && request_mem_region(r
->start
,
1977 MPSC_SDMA_REG_BLOCK_SIZE
, "sdma_regs")) {
1978 pi
->sdma_base
= ioremap(r
->start
,MPSC_SDMA_REG_BLOCK_SIZE
);
1979 pi
->sdma_base_p
= r
->start
;
1981 mpsc_resource_err("SDMA base");
1982 if (pi
->mpsc_base
) {
1983 iounmap(pi
->mpsc_base
);
1984 pi
->mpsc_base
= NULL
;
1989 if ((r
= platform_get_resource(pd
,IORESOURCE_MEM
,MPSC_BRG_BASE_ORDER
))
1990 && request_mem_region(r
->start
,
1991 MPSC_BRG_REG_BLOCK_SIZE
, "brg_regs")) {
1992 pi
->brg_base
= ioremap(r
->start
, MPSC_BRG_REG_BLOCK_SIZE
);
1993 pi
->brg_base_p
= r
->start
;
1995 mpsc_resource_err("BRG base");
1996 if (pi
->mpsc_base
) {
1997 iounmap(pi
->mpsc_base
);
1998 pi
->mpsc_base
= NULL
;
2000 if (pi
->sdma_base
) {
2001 iounmap(pi
->sdma_base
);
2002 pi
->sdma_base
= NULL
;
2012 static void mpsc_drv_unmap_regs(struct mpsc_port_info
*pi
)
2014 if (!pi
->mpsc_base
) {
2015 iounmap(pi
->mpsc_base
);
2016 release_mem_region(pi
->mpsc_base_p
, MPSC_REG_BLOCK_SIZE
);
2018 if (!pi
->sdma_base
) {
2019 iounmap(pi
->sdma_base
);
2020 release_mem_region(pi
->sdma_base_p
, MPSC_SDMA_REG_BLOCK_SIZE
);
2022 if (!pi
->brg_base
) {
2023 iounmap(pi
->brg_base
);
2024 release_mem_region(pi
->brg_base_p
, MPSC_BRG_REG_BLOCK_SIZE
);
2027 pi
->mpsc_base
= NULL
;
2028 pi
->sdma_base
= NULL
;
2029 pi
->brg_base
= NULL
;
2031 pi
->mpsc_base_p
= 0;
2032 pi
->sdma_base_p
= 0;
2036 static void mpsc_drv_get_platform_data(struct mpsc_port_info
*pi
,
2037 struct platform_device
*pd
, int num
)
2039 struct mpsc_pdata
*pdata
;
2041 pdata
= dev_get_platdata(&pd
->dev
);
2043 pi
->port
.uartclk
= pdata
->brg_clk_freq
;
2044 pi
->port
.iotype
= UPIO_MEM
;
2045 pi
->port
.line
= num
;
2046 pi
->port
.type
= PORT_MPSC
;
2047 pi
->port
.fifosize
= MPSC_TXBE_SIZE
;
2048 pi
->port
.membase
= pi
->mpsc_base
;
2049 pi
->port
.mapbase
= (ulong
)pi
->mpsc_base
;
2050 pi
->port
.ops
= &mpsc_pops
;
2052 pi
->mirror_regs
= pdata
->mirror_regs
;
2053 pi
->cache_mgmt
= pdata
->cache_mgmt
;
2054 pi
->brg_can_tune
= pdata
->brg_can_tune
;
2055 pi
->brg_clk_src
= pdata
->brg_clk_src
;
2056 pi
->mpsc_max_idle
= pdata
->max_idle
;
2057 pi
->default_baud
= pdata
->default_baud
;
2058 pi
->default_bits
= pdata
->default_bits
;
2059 pi
->default_parity
= pdata
->default_parity
;
2060 pi
->default_flow
= pdata
->default_flow
;
2062 /* Initial values of mirrored regs */
2063 pi
->MPSC_CHR_1_m
= pdata
->chr_1_val
;
2064 pi
->MPSC_CHR_2_m
= pdata
->chr_2_val
;
2065 pi
->MPSC_CHR_10_m
= pdata
->chr_10_val
;
2066 pi
->MPSC_MPCR_m
= pdata
->mpcr_val
;
2067 pi
->BRG_BCR_m
= pdata
->bcr_val
;
2069 pi
->shared_regs
= &mpsc_shared_regs
;
2071 pi
->port
.irq
= platform_get_irq(pd
, 0);
2074 static int mpsc_drv_probe(struct platform_device
*dev
)
2076 struct mpsc_port_info
*pi
;
2079 pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev
->id
);
2081 if (dev
->id
< MPSC_NUM_CTLRS
) {
2082 pi
= &mpsc_ports
[dev
->id
];
2084 if (!(rc
= mpsc_drv_map_regs(pi
, dev
))) {
2085 mpsc_drv_get_platform_data(pi
, dev
, dev
->id
);
2086 pi
->port
.dev
= &dev
->dev
;
2088 if (!(rc
= mpsc_make_ready(pi
))) {
2089 spin_lock_init(&pi
->tx_lock
);
2090 if (!(rc
= uart_add_one_port(&mpsc_reg
,
2094 mpsc_release_port((struct uart_port
*)
2096 mpsc_drv_unmap_regs(pi
);
2099 mpsc_drv_unmap_regs(pi
);
2107 static int mpsc_drv_remove(struct platform_device
*dev
)
2109 pr_debug("mpsc_drv_exit: Removing MPSC %d\n", dev
->id
);
2111 if (dev
->id
< MPSC_NUM_CTLRS
) {
2112 uart_remove_one_port(&mpsc_reg
, &mpsc_ports
[dev
->id
].port
);
2113 mpsc_release_port((struct uart_port
*)
2114 &mpsc_ports
[dev
->id
].port
);
2115 mpsc_drv_unmap_regs(&mpsc_ports
[dev
->id
]);
2122 static struct platform_driver mpsc_driver
= {
2123 .probe
= mpsc_drv_probe
,
2124 .remove
= mpsc_drv_remove
,
2126 .name
= MPSC_CTLR_NAME
,
2130 static int __init
mpsc_drv_init(void)
2134 printk(KERN_INFO
"Serial: MPSC driver\n");
2136 memset(mpsc_ports
, 0, sizeof(mpsc_ports
));
2137 memset(&mpsc_shared_regs
, 0, sizeof(mpsc_shared_regs
));
2139 if (!(rc
= uart_register_driver(&mpsc_reg
))) {
2140 if (!(rc
= platform_driver_register(&mpsc_shared_driver
))) {
2141 if ((rc
= platform_driver_register(&mpsc_driver
))) {
2142 platform_driver_unregister(&mpsc_shared_driver
);
2143 uart_unregister_driver(&mpsc_reg
);
2146 uart_unregister_driver(&mpsc_reg
);
2153 static void __exit
mpsc_drv_exit(void)
2155 platform_driver_unregister(&mpsc_driver
);
2156 platform_driver_unregister(&mpsc_shared_driver
);
2157 uart_unregister_driver(&mpsc_reg
);
2158 memset(mpsc_ports
, 0, sizeof(mpsc_ports
));
2159 memset(&mpsc_shared_regs
, 0, sizeof(mpsc_shared_regs
));
2162 module_init(mpsc_drv_init
);
2163 module_exit(mpsc_drv_exit
);
2165 MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
2166 MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver");
2167 MODULE_VERSION(MPSC_VERSION
);
2168 MODULE_LICENSE("GPL");
2169 MODULE_ALIAS_CHARDEV_MAJOR(MPSC_MAJOR
);
2170 MODULE_ALIAS("platform:" MPSC_CTLR_NAME
);