2 * Driver for NAND MLC Controller in LPC32xx
4 * Author: Roland Stigge <stigge@antcom.de>
6 * Copyright © 2011 WORK Microwave GmbH
7 * Copyright © 2011, 2012 Roland Stigge
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 * NAND Flash Controller Operation:
22 * - Write: Auto Encode
23 * - Tested Page Sizes: 2048, 4096
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/mtd/mtd.h>
30 #include <linux/mtd/nand.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/clk.h>
33 #include <linux/err.h>
34 #include <linux/delay.h>
35 #include <linux/completion.h>
36 #include <linux/interrupt.h>
38 #include <linux/of_mtd.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mtd/lpc32xx_mlc.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/dmaengine.h>
45 #include <linux/mtd/nand_ecc.h>
47 #define DRV_NAME "lpc32xx_mlc"
49 /**********************************************************************
50 * MLC NAND controller register offsets
51 **********************************************************************/
53 #define MLC_BUFF(x) (x + 0x00000)
54 #define MLC_DATA(x) (x + 0x08000)
55 #define MLC_CMD(x) (x + 0x10000)
56 #define MLC_ADDR(x) (x + 0x10004)
57 #define MLC_ECC_ENC_REG(x) (x + 0x10008)
58 #define MLC_ECC_DEC_REG(x) (x + 0x1000C)
59 #define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010)
60 #define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014)
61 #define MLC_RPR(x) (x + 0x10018)
62 #define MLC_WPR(x) (x + 0x1001C)
63 #define MLC_RUBP(x) (x + 0x10020)
64 #define MLC_ROBP(x) (x + 0x10024)
65 #define MLC_SW_WP_ADD_LOW(x) (x + 0x10028)
66 #define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C)
67 #define MLC_ICR(x) (x + 0x10030)
68 #define MLC_TIME_REG(x) (x + 0x10034)
69 #define MLC_IRQ_MR(x) (x + 0x10038)
70 #define MLC_IRQ_SR(x) (x + 0x1003C)
71 #define MLC_LOCK_PR(x) (x + 0x10044)
72 #define MLC_ISR(x) (x + 0x10048)
73 #define MLC_CEH(x) (x + 0x1004C)
75 /**********************************************************************
76 * MLC_CMD bit definitions
77 **********************************************************************/
78 #define MLCCMD_RESET 0xFF
80 /**********************************************************************
81 * MLC_ICR bit definitions
82 **********************************************************************/
83 #define MLCICR_WPROT (1 << 3)
84 #define MLCICR_LARGEBLOCK (1 << 2)
85 #define MLCICR_LONGADDR (1 << 1)
86 #define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */
88 /**********************************************************************
89 * MLC_TIME_REG bit definitions
90 **********************************************************************/
91 #define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24)
92 #define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19)
93 #define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16)
94 #define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12)
95 #define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8)
96 #define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4)
97 #define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0)
99 /**********************************************************************
100 * MLC_IRQ_MR and MLC_IRQ_SR bit definitions
101 **********************************************************************/
102 #define MLCIRQ_NAND_READY (1 << 5)
103 #define MLCIRQ_CONTROLLER_READY (1 << 4)
104 #define MLCIRQ_DECODE_FAILURE (1 << 3)
105 #define MLCIRQ_DECODE_ERROR (1 << 2)
106 #define MLCIRQ_ECC_READY (1 << 1)
107 #define MLCIRQ_WRPROT_FAULT (1 << 0)
109 /**********************************************************************
110 * MLC_LOCK_PR bit definitions
111 **********************************************************************/
112 #define MLCLOCKPR_MAGIC 0xA25E
114 /**********************************************************************
115 * MLC_ISR bit definitions
116 **********************************************************************/
117 #define MLCISR_DECODER_FAILURE (1 << 6)
118 #define MLCISR_ERRORS ((1 << 4) | (1 << 5))
119 #define MLCISR_ERRORS_DETECTED (1 << 3)
120 #define MLCISR_ECC_READY (1 << 2)
121 #define MLCISR_CONTROLLER_READY (1 << 1)
122 #define MLCISR_NAND_READY (1 << 0)
124 /**********************************************************************
125 * MLC_CEH bit definitions
126 **********************************************************************/
127 #define MLCCEH_NORMAL (1 << 0)
129 struct lpc32xx_nand_cfg_mlc
{
138 struct mtd_partition
*parts
;
142 static struct nand_ecclayout lpc32xx_nand_oob
= {
144 .eccpos
= { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
145 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
146 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
147 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
160 static struct nand_bbt_descr lpc32xx_nand_bbt
= {
161 .options
= NAND_BBT_ABSPAGE
| NAND_BBT_2BIT
| NAND_BBT_NO_OOB
|
163 .pages
= { 524224, 0, 0, 0, 0, 0, 0, 0 },
166 static struct nand_bbt_descr lpc32xx_nand_bbt_mirror
= {
167 .options
= NAND_BBT_ABSPAGE
| NAND_BBT_2BIT
| NAND_BBT_NO_OOB
|
169 .pages
= { 524160, 0, 0, 0, 0, 0, 0, 0 },
172 struct lpc32xx_nand_host
{
173 struct nand_chip nand_chip
;
174 struct lpc32xx_mlc_platform_data
*pdata
;
177 void __iomem
*io_base
;
179 struct lpc32xx_nand_cfg_mlc
*ncfg
;
180 struct completion comp_nand
;
181 struct completion comp_controller
;
184 * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
186 dma_addr_t oob_buf_phy
;
188 * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
191 /* Physical address of DMA base address */
192 dma_addr_t io_base_phy
;
194 struct completion comp_dma
;
195 struct dma_chan
*dma_chan
;
196 struct dma_slave_config dma_slave_config
;
197 struct scatterlist sgl
;
200 int mlcsubpages
; /* number of 512bytes-subpages */
204 * Activate/Deactivate DMA Operation:
206 * Using the PL080 DMA Controller for transferring the 512 byte subpages
207 * instead of doing readl() / writel() in a loop slows it down significantly.
208 * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
210 * - readl() of 128 x 32 bits in a loop: ~20us
211 * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
212 * - DMA read of 512 bytes (32 bit, no bursts): ~100us
214 * This applies to the transfer itself. In the DMA case: only the
215 * wait_for_completion() (DMA setup _not_ included).
217 * Note that the 512 bytes subpage transfer is done directly from/to a
218 * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
219 * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
220 * controller transferring data between its internal buffer to/from the NAND
223 * Therefore, using the PL080 DMA is disabled by default, for now.
228 static void lpc32xx_nand_setup(struct lpc32xx_nand_host
*host
)
230 uint32_t clkrate
, tmp
;
232 /* Reset MLC controller */
233 writel(MLCCMD_RESET
, MLC_CMD(host
->io_base
));
236 /* Get base clock for MLC block */
237 clkrate
= clk_get_rate(host
->clk
);
242 * (among others, will be locked again automatically) */
243 writew(MLCLOCKPR_MAGIC
, MLC_LOCK_PR(host
->io_base
));
245 /* Configure MLC Controller: Large Block, 5 Byte Address */
246 tmp
= MLCICR_LARGEBLOCK
| MLCICR_LONGADDR
;
247 writel(tmp
, MLC_ICR(host
->io_base
));
249 /* Unlock MLC_TIME_REG
250 * (among others, will be locked again automatically) */
251 writew(MLCLOCKPR_MAGIC
, MLC_LOCK_PR(host
->io_base
));
253 /* Compute clock setup values, see LPC and NAND manual */
255 tmp
|= MLCTIMEREG_TCEA_DELAY(clkrate
/ host
->ncfg
->tcea_delay
+ 1);
256 tmp
|= MLCTIMEREG_BUSY_DELAY(clkrate
/ host
->ncfg
->busy_delay
+ 1);
257 tmp
|= MLCTIMEREG_NAND_TA(clkrate
/ host
->ncfg
->nand_ta
+ 1);
258 tmp
|= MLCTIMEREG_RD_HIGH(clkrate
/ host
->ncfg
->rd_high
+ 1);
259 tmp
|= MLCTIMEREG_RD_LOW(clkrate
/ host
->ncfg
->rd_low
);
260 tmp
|= MLCTIMEREG_WR_HIGH(clkrate
/ host
->ncfg
->wr_high
+ 1);
261 tmp
|= MLCTIMEREG_WR_LOW(clkrate
/ host
->ncfg
->wr_low
);
262 writel(tmp
, MLC_TIME_REG(host
->io_base
));
264 /* Enable IRQ for CONTROLLER_READY and NAND_READY */
265 writeb(MLCIRQ_CONTROLLER_READY
| MLCIRQ_NAND_READY
,
266 MLC_IRQ_MR(host
->io_base
));
268 /* Normal nCE operation: nCE controlled by controller */
269 writel(MLCCEH_NORMAL
, MLC_CEH(host
->io_base
));
273 * Hardware specific access to control lines
275 static void lpc32xx_nand_cmd_ctrl(struct mtd_info
*mtd
, int cmd
,
278 struct nand_chip
*nand_chip
= mtd
->priv
;
279 struct lpc32xx_nand_host
*host
= nand_chip
->priv
;
281 if (cmd
!= NAND_CMD_NONE
) {
283 writel(cmd
, MLC_CMD(host
->io_base
));
285 writel(cmd
, MLC_ADDR(host
->io_base
));
290 * Read Device Ready (NAND device _and_ controller ready)
292 static int lpc32xx_nand_device_ready(struct mtd_info
*mtd
)
294 struct nand_chip
*nand_chip
= mtd
->priv
;
295 struct lpc32xx_nand_host
*host
= nand_chip
->priv
;
297 if ((readb(MLC_ISR(host
->io_base
)) &
298 (MLCISR_CONTROLLER_READY
| MLCISR_NAND_READY
)) ==
299 (MLCISR_CONTROLLER_READY
| MLCISR_NAND_READY
))
305 static irqreturn_t
lpc3xxx_nand_irq(int irq
, struct lpc32xx_nand_host
*host
)
309 /* Clear interrupt flag by reading status */
310 sr
= readb(MLC_IRQ_SR(host
->io_base
));
311 if (sr
& MLCIRQ_NAND_READY
)
312 complete(&host
->comp_nand
);
313 if (sr
& MLCIRQ_CONTROLLER_READY
)
314 complete(&host
->comp_controller
);
319 static int lpc32xx_waitfunc_nand(struct mtd_info
*mtd
, struct nand_chip
*chip
)
321 struct lpc32xx_nand_host
*host
= chip
->priv
;
323 if (readb(MLC_ISR(host
->io_base
)) & MLCISR_NAND_READY
)
326 wait_for_completion(&host
->comp_nand
);
328 while (!(readb(MLC_ISR(host
->io_base
)) & MLCISR_NAND_READY
)) {
329 /* Seems to be delayed sometimes by controller */
330 dev_dbg(&mtd
->dev
, "Warning: NAND not ready.\n");
335 return NAND_STATUS_READY
;
338 static int lpc32xx_waitfunc_controller(struct mtd_info
*mtd
,
339 struct nand_chip
*chip
)
341 struct lpc32xx_nand_host
*host
= chip
->priv
;
343 if (readb(MLC_ISR(host
->io_base
)) & MLCISR_CONTROLLER_READY
)
346 wait_for_completion(&host
->comp_controller
);
348 while (!(readb(MLC_ISR(host
->io_base
)) &
349 MLCISR_CONTROLLER_READY
)) {
350 dev_dbg(&mtd
->dev
, "Warning: Controller not ready.\n");
355 return NAND_STATUS_READY
;
358 static int lpc32xx_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*chip
)
360 lpc32xx_waitfunc_nand(mtd
, chip
);
361 lpc32xx_waitfunc_controller(mtd
, chip
);
363 return NAND_STATUS_READY
;
367 * Enable NAND write protect
369 static void lpc32xx_wp_enable(struct lpc32xx_nand_host
*host
)
371 if (gpio_is_valid(host
->ncfg
->wp_gpio
))
372 gpio_set_value(host
->ncfg
->wp_gpio
, 0);
376 * Disable NAND write protect
378 static void lpc32xx_wp_disable(struct lpc32xx_nand_host
*host
)
380 if (gpio_is_valid(host
->ncfg
->wp_gpio
))
381 gpio_set_value(host
->ncfg
->wp_gpio
, 1);
384 static void lpc32xx_dma_complete_func(void *completion
)
386 complete(completion
);
389 static int lpc32xx_xmit_dma(struct mtd_info
*mtd
, void *mem
, int len
,
390 enum dma_transfer_direction dir
)
392 struct nand_chip
*chip
= mtd
->priv
;
393 struct lpc32xx_nand_host
*host
= chip
->priv
;
394 struct dma_async_tx_descriptor
*desc
;
395 int flags
= DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
;
398 sg_init_one(&host
->sgl
, mem
, len
);
400 res
= dma_map_sg(host
->dma_chan
->device
->dev
, &host
->sgl
, 1,
403 dev_err(mtd
->dev
.parent
, "Failed to map sg list\n");
406 desc
= dmaengine_prep_slave_sg(host
->dma_chan
, &host
->sgl
, 1, dir
,
409 dev_err(mtd
->dev
.parent
, "Failed to prepare slave sg\n");
413 init_completion(&host
->comp_dma
);
414 desc
->callback
= lpc32xx_dma_complete_func
;
415 desc
->callback_param
= &host
->comp_dma
;
417 dmaengine_submit(desc
);
418 dma_async_issue_pending(host
->dma_chan
);
420 wait_for_completion_timeout(&host
->comp_dma
, msecs_to_jiffies(1000));
422 dma_unmap_sg(host
->dma_chan
->device
->dev
, &host
->sgl
, 1,
426 dma_unmap_sg(host
->dma_chan
->device
->dev
, &host
->sgl
, 1,
431 static int lpc32xx_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
432 uint8_t *buf
, int oob_required
, int page
)
434 struct lpc32xx_nand_host
*host
= chip
->priv
;
436 uint8_t *oobbuf
= chip
->oob_poi
;
442 if ((void *)buf
<= high_memory
) {
446 dma_buf
= host
->dma_buf
;
450 /* Writing Command and Address */
451 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, 0, page
);
453 /* For all sub-pages */
454 for (i
= 0; i
< host
->mlcsubpages
; i
++) {
455 /* Start Auto Decode Command */
456 writeb(0x00, MLC_ECC_AUTO_DEC_REG(host
->io_base
));
458 /* Wait for Controller Ready */
459 lpc32xx_waitfunc_controller(mtd
, chip
);
461 /* Check ECC Error status */
462 mlc_isr
= readl(MLC_ISR(host
->io_base
));
463 if (mlc_isr
& MLCISR_DECODER_FAILURE
) {
464 mtd
->ecc_stats
.failed
++;
465 dev_warn(&mtd
->dev
, "%s: DECODER_FAILURE\n", __func__
);
466 } else if (mlc_isr
& MLCISR_ERRORS_DETECTED
) {
467 mtd
->ecc_stats
.corrected
+= ((mlc_isr
>> 4) & 0x3) + 1;
470 /* Read 512 + 16 Bytes */
472 res
= lpc32xx_xmit_dma(mtd
, dma_buf
+ i
* 512, 512,
477 for (j
= 0; j
< (512 >> 2); j
++) {
478 *((uint32_t *)(buf
)) =
479 readl(MLC_BUFF(host
->io_base
));
483 for (j
= 0; j
< (16 >> 2); j
++) {
484 *((uint32_t *)(oobbuf
)) =
485 readl(MLC_BUFF(host
->io_base
));
490 if (use_dma
&& !dma_mapped
)
491 memcpy(buf
, dma_buf
, mtd
->writesize
);
496 static int lpc32xx_write_page_lowlevel(struct mtd_info
*mtd
,
497 struct nand_chip
*chip
,
498 const uint8_t *buf
, int oob_required
)
500 struct lpc32xx_nand_host
*host
= chip
->priv
;
501 const uint8_t *oobbuf
= chip
->oob_poi
;
502 uint8_t *dma_buf
= (uint8_t *)buf
;
506 if (use_dma
&& (void *)buf
>= high_memory
) {
507 dma_buf
= host
->dma_buf
;
508 memcpy(dma_buf
, buf
, mtd
->writesize
);
511 for (i
= 0; i
< host
->mlcsubpages
; i
++) {
513 writeb(0x00, MLC_ECC_ENC_REG(host
->io_base
));
515 /* Write 512 + 6 Bytes to Buffer */
517 res
= lpc32xx_xmit_dma(mtd
, dma_buf
+ i
* 512, 512,
522 for (j
= 0; j
< (512 >> 2); j
++) {
523 writel(*((uint32_t *)(buf
)),
524 MLC_BUFF(host
->io_base
));
528 writel(*((uint32_t *)(oobbuf
)), MLC_BUFF(host
->io_base
));
530 writew(*((uint16_t *)(oobbuf
)), MLC_BUFF(host
->io_base
));
533 /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
534 writeb(0x00, MLC_ECC_AUTO_ENC_REG(host
->io_base
));
536 /* Wait for Controller Ready */
537 lpc32xx_waitfunc_controller(mtd
, chip
);
542 static int lpc32xx_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
543 uint32_t offset
, int data_len
, const uint8_t *buf
,
544 int oob_required
, int page
, int cached
, int raw
)
548 chip
->cmdfunc(mtd
, NAND_CMD_SEQIN
, 0x00, page
);
549 res
= lpc32xx_write_page_lowlevel(mtd
, chip
, buf
, oob_required
);
550 chip
->cmdfunc(mtd
, NAND_CMD_PAGEPROG
, -1, -1);
551 lpc32xx_waitfunc(mtd
, chip
);
556 static int lpc32xx_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
559 struct lpc32xx_nand_host
*host
= chip
->priv
;
561 /* Read whole page - necessary with MLC controller! */
562 lpc32xx_read_page(mtd
, chip
, host
->dummy_buf
, 1, page
);
567 static int lpc32xx_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
570 /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
574 /* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
575 static void lpc32xx_ecc_enable(struct mtd_info
*mtd
, int mode
)
577 /* Always enabled! */
580 static int lpc32xx_dma_setup(struct lpc32xx_nand_host
*host
)
582 struct mtd_info
*mtd
= &host
->mtd
;
585 if (!host
->pdata
|| !host
->pdata
->dma_filter
) {
586 dev_err(mtd
->dev
.parent
, "no DMA platform data\n");
591 dma_cap_set(DMA_SLAVE
, mask
);
592 host
->dma_chan
= dma_request_channel(mask
, host
->pdata
->dma_filter
,
594 if (!host
->dma_chan
) {
595 dev_err(mtd
->dev
.parent
, "Failed to request DMA channel\n");
600 * Set direction to a sensible value even if the dmaengine driver
601 * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
602 * driver criticizes it as "alien transfer direction".
604 host
->dma_slave_config
.direction
= DMA_DEV_TO_MEM
;
605 host
->dma_slave_config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
606 host
->dma_slave_config
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
607 host
->dma_slave_config
.src_maxburst
= 128;
608 host
->dma_slave_config
.dst_maxburst
= 128;
609 /* DMA controller does flow control: */
610 host
->dma_slave_config
.device_fc
= false;
611 host
->dma_slave_config
.src_addr
= MLC_BUFF(host
->io_base_phy
);
612 host
->dma_slave_config
.dst_addr
= MLC_BUFF(host
->io_base_phy
);
613 if (dmaengine_slave_config(host
->dma_chan
, &host
->dma_slave_config
)) {
614 dev_err(mtd
->dev
.parent
, "Failed to setup DMA slave\n");
620 dma_release_channel(host
->dma_chan
);
624 static struct lpc32xx_nand_cfg_mlc
*lpc32xx_parse_dt(struct device
*dev
)
626 struct lpc32xx_nand_cfg_mlc
*ncfg
;
627 struct device_node
*np
= dev
->of_node
;
629 ncfg
= devm_kzalloc(dev
, sizeof(*ncfg
), GFP_KERNEL
);
631 dev_err(dev
, "could not allocate memory for platform data\n");
635 of_property_read_u32(np
, "nxp,tcea-delay", &ncfg
->tcea_delay
);
636 of_property_read_u32(np
, "nxp,busy-delay", &ncfg
->busy_delay
);
637 of_property_read_u32(np
, "nxp,nand-ta", &ncfg
->nand_ta
);
638 of_property_read_u32(np
, "nxp,rd-high", &ncfg
->rd_high
);
639 of_property_read_u32(np
, "nxp,rd-low", &ncfg
->rd_low
);
640 of_property_read_u32(np
, "nxp,wr-high", &ncfg
->wr_high
);
641 of_property_read_u32(np
, "nxp,wr-low", &ncfg
->wr_low
);
643 if (!ncfg
->tcea_delay
|| !ncfg
->busy_delay
|| !ncfg
->nand_ta
||
644 !ncfg
->rd_high
|| !ncfg
->rd_low
|| !ncfg
->wr_high
||
646 dev_err(dev
, "chip parameters not specified correctly\n");
650 ncfg
->wp_gpio
= of_get_named_gpio(np
, "gpios", 0);
656 * Probe for NAND controller
658 static int lpc32xx_nand_probe(struct platform_device
*pdev
)
660 struct lpc32xx_nand_host
*host
;
661 struct mtd_info
*mtd
;
662 struct nand_chip
*nand_chip
;
665 struct mtd_part_parser_data ppdata
= {};
667 /* Allocate memory for the device structure (and zero it) */
668 host
= devm_kzalloc(&pdev
->dev
, sizeof(*host
), GFP_KERNEL
);
670 dev_err(&pdev
->dev
, "failed to allocate device structure.\n");
674 rc
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
675 host
->io_base
= devm_ioremap_resource(&pdev
->dev
, rc
);
676 if (IS_ERR(host
->io_base
))
677 return PTR_ERR(host
->io_base
);
679 host
->io_base_phy
= rc
->start
;
682 nand_chip
= &host
->nand_chip
;
683 if (pdev
->dev
.of_node
)
684 host
->ncfg
= lpc32xx_parse_dt(&pdev
->dev
);
687 "Missing or bad NAND config from device tree\n");
690 if (host
->ncfg
->wp_gpio
== -EPROBE_DEFER
)
691 return -EPROBE_DEFER
;
692 if (gpio_is_valid(host
->ncfg
->wp_gpio
) &&
693 gpio_request(host
->ncfg
->wp_gpio
, "NAND WP")) {
694 dev_err(&pdev
->dev
, "GPIO not available\n");
697 lpc32xx_wp_disable(host
);
699 host
->pdata
= pdev
->dev
.platform_data
;
701 nand_chip
->priv
= host
; /* link the private data structures */
702 mtd
->priv
= nand_chip
;
703 mtd
->owner
= THIS_MODULE
;
704 mtd
->dev
.parent
= &pdev
->dev
;
707 host
->clk
= clk_get(&pdev
->dev
, NULL
);
708 if (IS_ERR(host
->clk
)) {
709 dev_err(&pdev
->dev
, "Clock initialization failure\n");
713 clk_enable(host
->clk
);
715 nand_chip
->cmd_ctrl
= lpc32xx_nand_cmd_ctrl
;
716 nand_chip
->dev_ready
= lpc32xx_nand_device_ready
;
717 nand_chip
->chip_delay
= 25; /* us */
718 nand_chip
->IO_ADDR_R
= MLC_DATA(host
->io_base
);
719 nand_chip
->IO_ADDR_W
= MLC_DATA(host
->io_base
);
721 /* Init NAND controller */
722 lpc32xx_nand_setup(host
);
724 platform_set_drvdata(pdev
, host
);
726 /* Initialize function pointers */
727 nand_chip
->ecc
.hwctl
= lpc32xx_ecc_enable
;
728 nand_chip
->ecc
.read_page_raw
= lpc32xx_read_page
;
729 nand_chip
->ecc
.read_page
= lpc32xx_read_page
;
730 nand_chip
->ecc
.write_page_raw
= lpc32xx_write_page_lowlevel
;
731 nand_chip
->ecc
.write_page
= lpc32xx_write_page_lowlevel
;
732 nand_chip
->ecc
.write_oob
= lpc32xx_write_oob
;
733 nand_chip
->ecc
.read_oob
= lpc32xx_read_oob
;
734 nand_chip
->ecc
.strength
= 4;
735 nand_chip
->write_page
= lpc32xx_write_page
;
736 nand_chip
->waitfunc
= lpc32xx_waitfunc
;
738 nand_chip
->bbt_options
= NAND_BBT_USE_FLASH
| NAND_BBT_NO_OOB
;
739 nand_chip
->bbt_td
= &lpc32xx_nand_bbt
;
740 nand_chip
->bbt_md
= &lpc32xx_nand_bbt_mirror
;
742 /* bitflip_threshold's default is defined as ecc_strength anyway.
743 * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
744 * being 0, it causes bad block table scanning errors in
745 * nand_scan_tail(), so preparing it here. */
746 mtd
->bitflip_threshold
= nand_chip
->ecc
.strength
;
749 res
= lpc32xx_dma_setup(host
);
757 * Scan to find existance of the device and
758 * Get the type of NAND device SMALL block or LARGE block
760 if (nand_scan_ident(mtd
, 1, NULL
)) {
765 host
->dma_buf
= devm_kzalloc(&pdev
->dev
, mtd
->writesize
, GFP_KERNEL
);
766 if (!host
->dma_buf
) {
767 dev_err(&pdev
->dev
, "Error allocating dma_buf memory\n");
772 host
->dummy_buf
= devm_kzalloc(&pdev
->dev
, mtd
->writesize
, GFP_KERNEL
);
773 if (!host
->dummy_buf
) {
774 dev_err(&pdev
->dev
, "Error allocating dummy_buf memory\n");
779 nand_chip
->ecc
.mode
= NAND_ECC_HW
;
780 nand_chip
->ecc
.size
= mtd
->writesize
;
781 nand_chip
->ecc
.layout
= &lpc32xx_nand_oob
;
782 host
->mlcsubpages
= mtd
->writesize
/ 512;
784 /* initially clear interrupt status */
785 readb(MLC_IRQ_SR(host
->io_base
));
787 init_completion(&host
->comp_nand
);
788 init_completion(&host
->comp_controller
);
790 host
->irq
= platform_get_irq(pdev
, 0);
791 if ((host
->irq
< 0) || (host
->irq
>= NR_IRQS
)) {
792 dev_err(&pdev
->dev
, "failed to get platform irq\n");
797 if (request_irq(host
->irq
, (irq_handler_t
)&lpc3xxx_nand_irq
,
798 IRQF_TRIGGER_HIGH
, DRV_NAME
, host
)) {
799 dev_err(&pdev
->dev
, "Error requesting NAND IRQ\n");
805 * Fills out all the uninitialized function pointers with the defaults
806 * And scans for a bad block table if appropriate.
808 if (nand_scan_tail(mtd
)) {
813 mtd
->name
= DRV_NAME
;
815 ppdata
.of_node
= pdev
->dev
.of_node
;
816 res
= mtd_device_parse_register(mtd
, NULL
, &ppdata
, host
->ncfg
->parts
,
817 host
->ncfg
->num_parts
);
824 free_irq(host
->irq
, host
);
827 dma_release_channel(host
->dma_chan
);
829 clk_disable(host
->clk
);
831 platform_set_drvdata(pdev
, NULL
);
833 lpc32xx_wp_enable(host
);
834 gpio_free(host
->ncfg
->wp_gpio
);
842 static int lpc32xx_nand_remove(struct platform_device
*pdev
)
844 struct lpc32xx_nand_host
*host
= platform_get_drvdata(pdev
);
845 struct mtd_info
*mtd
= &host
->mtd
;
848 free_irq(host
->irq
, host
);
850 dma_release_channel(host
->dma_chan
);
852 clk_disable(host
->clk
);
854 platform_set_drvdata(pdev
, NULL
);
856 lpc32xx_wp_enable(host
);
857 gpio_free(host
->ncfg
->wp_gpio
);
863 static int lpc32xx_nand_resume(struct platform_device
*pdev
)
865 struct lpc32xx_nand_host
*host
= platform_get_drvdata(pdev
);
867 /* Re-enable NAND clock */
868 clk_enable(host
->clk
);
870 /* Fresh init of NAND controller */
871 lpc32xx_nand_setup(host
);
873 /* Disable write protect */
874 lpc32xx_wp_disable(host
);
879 static int lpc32xx_nand_suspend(struct platform_device
*pdev
, pm_message_t pm
)
881 struct lpc32xx_nand_host
*host
= platform_get_drvdata(pdev
);
883 /* Enable write protect for safety */
884 lpc32xx_wp_enable(host
);
887 clk_disable(host
->clk
);
892 #define lpc32xx_nand_resume NULL
893 #define lpc32xx_nand_suspend NULL
896 static const struct of_device_id lpc32xx_nand_match
[] = {
897 { .compatible
= "nxp,lpc3220-mlc" },
900 MODULE_DEVICE_TABLE(of
, lpc32xx_nand_match
);
902 static struct platform_driver lpc32xx_nand_driver
= {
903 .probe
= lpc32xx_nand_probe
,
904 .remove
= lpc32xx_nand_remove
,
905 .resume
= lpc32xx_nand_resume
,
906 .suspend
= lpc32xx_nand_suspend
,
909 .owner
= THIS_MODULE
,
910 .of_match_table
= of_match_ptr(lpc32xx_nand_match
),
914 module_platform_driver(lpc32xx_nand_driver
);
916 MODULE_LICENSE("GPL");
917 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
918 MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");