2 * Driver for NAND MLC Controller in LPC32xx
4 * Author: Roland Stigge <stigge@antcom.de>
6 * Copyright © 2011 WORK Microwave GmbH
7 * Copyright © 2011, 2012 Roland Stigge
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 * NAND Flash Controller Operation:
22 * - Write: Auto Encode
23 * - Tested Page Sizes: 2048, 4096
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/mtd/mtd.h>
30 #include <linux/mtd/nand.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/clk.h>
33 #include <linux/err.h>
34 #include <linux/delay.h>
35 #include <linux/completion.h>
36 #include <linux/interrupt.h>
38 #include <linux/of_mtd.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mtd/lpc32xx_mlc.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/dmaengine.h>
45 #include <linux/mtd/nand_ecc.h>
47 #define DRV_NAME "lpc32xx_mlc"
49 /**********************************************************************
50 * MLC NAND controller register offsets
51 **********************************************************************/
53 #define MLC_BUFF(x) (x + 0x00000)
54 #define MLC_DATA(x) (x + 0x08000)
55 #define MLC_CMD(x) (x + 0x10000)
56 #define MLC_ADDR(x) (x + 0x10004)
57 #define MLC_ECC_ENC_REG(x) (x + 0x10008)
58 #define MLC_ECC_DEC_REG(x) (x + 0x1000C)
59 #define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010)
60 #define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014)
61 #define MLC_RPR(x) (x + 0x10018)
62 #define MLC_WPR(x) (x + 0x1001C)
63 #define MLC_RUBP(x) (x + 0x10020)
64 #define MLC_ROBP(x) (x + 0x10024)
65 #define MLC_SW_WP_ADD_LOW(x) (x + 0x10028)
66 #define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C)
67 #define MLC_ICR(x) (x + 0x10030)
68 #define MLC_TIME_REG(x) (x + 0x10034)
69 #define MLC_IRQ_MR(x) (x + 0x10038)
70 #define MLC_IRQ_SR(x) (x + 0x1003C)
71 #define MLC_LOCK_PR(x) (x + 0x10044)
72 #define MLC_ISR(x) (x + 0x10048)
73 #define MLC_CEH(x) (x + 0x1004C)
75 /**********************************************************************
76 * MLC_CMD bit definitions
77 **********************************************************************/
78 #define MLCCMD_RESET 0xFF
80 /**********************************************************************
81 * MLC_ICR bit definitions
82 **********************************************************************/
83 #define MLCICR_WPROT (1 << 3)
84 #define MLCICR_LARGEBLOCK (1 << 2)
85 #define MLCICR_LONGADDR (1 << 1)
86 #define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */
88 /**********************************************************************
89 * MLC_TIME_REG bit definitions
90 **********************************************************************/
91 #define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24)
92 #define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19)
93 #define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16)
94 #define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12)
95 #define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8)
96 #define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4)
97 #define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0)
99 /**********************************************************************
100 * MLC_IRQ_MR and MLC_IRQ_SR bit definitions
101 **********************************************************************/
102 #define MLCIRQ_NAND_READY (1 << 5)
103 #define MLCIRQ_CONTROLLER_READY (1 << 4)
104 #define MLCIRQ_DECODE_FAILURE (1 << 3)
105 #define MLCIRQ_DECODE_ERROR (1 << 2)
106 #define MLCIRQ_ECC_READY (1 << 1)
107 #define MLCIRQ_WRPROT_FAULT (1 << 0)
109 /**********************************************************************
110 * MLC_LOCK_PR bit definitions
111 **********************************************************************/
112 #define MLCLOCKPR_MAGIC 0xA25E
114 /**********************************************************************
115 * MLC_ISR bit definitions
116 **********************************************************************/
117 #define MLCISR_DECODER_FAILURE (1 << 6)
118 #define MLCISR_ERRORS ((1 << 4) | (1 << 5))
119 #define MLCISR_ERRORS_DETECTED (1 << 3)
120 #define MLCISR_ECC_READY (1 << 2)
121 #define MLCISR_CONTROLLER_READY (1 << 1)
122 #define MLCISR_NAND_READY (1 << 0)
124 /**********************************************************************
125 * MLC_CEH bit definitions
126 **********************************************************************/
127 #define MLCCEH_NORMAL (1 << 0)
129 struct lpc32xx_nand_cfg_mlc
{
138 struct mtd_partition
*parts
;
142 static struct nand_ecclayout lpc32xx_nand_oob
= {
144 .eccpos
= { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
145 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
146 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
147 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
160 static struct nand_bbt_descr lpc32xx_nand_bbt
= {
161 .options
= NAND_BBT_ABSPAGE
| NAND_BBT_2BIT
| NAND_BBT_NO_OOB
|
163 .pages
= { 524224, 0, 0, 0, 0, 0, 0, 0 },
166 static struct nand_bbt_descr lpc32xx_nand_bbt_mirror
= {
167 .options
= NAND_BBT_ABSPAGE
| NAND_BBT_2BIT
| NAND_BBT_NO_OOB
|
169 .pages
= { 524160, 0, 0, 0, 0, 0, 0, 0 },
172 struct lpc32xx_nand_host
{
173 struct nand_chip nand_chip
;
174 struct lpc32xx_mlc_platform_data
*pdata
;
176 void __iomem
*io_base
;
178 struct lpc32xx_nand_cfg_mlc
*ncfg
;
179 struct completion comp_nand
;
180 struct completion comp_controller
;
183 * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
185 dma_addr_t oob_buf_phy
;
187 * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
190 /* Physical address of DMA base address */
191 dma_addr_t io_base_phy
;
193 struct completion comp_dma
;
194 struct dma_chan
*dma_chan
;
195 struct dma_slave_config dma_slave_config
;
196 struct scatterlist sgl
;
199 int mlcsubpages
; /* number of 512bytes-subpages */
203 * Activate/Deactivate DMA Operation:
205 * Using the PL080 DMA Controller for transferring the 512 byte subpages
206 * instead of doing readl() / writel() in a loop slows it down significantly.
207 * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
209 * - readl() of 128 x 32 bits in a loop: ~20us
210 * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
211 * - DMA read of 512 bytes (32 bit, no bursts): ~100us
213 * This applies to the transfer itself. In the DMA case: only the
214 * wait_for_completion() (DMA setup _not_ included).
216 * Note that the 512 bytes subpage transfer is done directly from/to a
217 * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
218 * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
219 * controller transferring data between its internal buffer to/from the NAND
222 * Therefore, using the PL080 DMA is disabled by default, for now.
227 static void lpc32xx_nand_setup(struct lpc32xx_nand_host
*host
)
229 uint32_t clkrate
, tmp
;
231 /* Reset MLC controller */
232 writel(MLCCMD_RESET
, MLC_CMD(host
->io_base
));
235 /* Get base clock for MLC block */
236 clkrate
= clk_get_rate(host
->clk
);
241 * (among others, will be locked again automatically) */
242 writew(MLCLOCKPR_MAGIC
, MLC_LOCK_PR(host
->io_base
));
244 /* Configure MLC Controller: Large Block, 5 Byte Address */
245 tmp
= MLCICR_LARGEBLOCK
| MLCICR_LONGADDR
;
246 writel(tmp
, MLC_ICR(host
->io_base
));
248 /* Unlock MLC_TIME_REG
249 * (among others, will be locked again automatically) */
250 writew(MLCLOCKPR_MAGIC
, MLC_LOCK_PR(host
->io_base
));
252 /* Compute clock setup values, see LPC and NAND manual */
254 tmp
|= MLCTIMEREG_TCEA_DELAY(clkrate
/ host
->ncfg
->tcea_delay
+ 1);
255 tmp
|= MLCTIMEREG_BUSY_DELAY(clkrate
/ host
->ncfg
->busy_delay
+ 1);
256 tmp
|= MLCTIMEREG_NAND_TA(clkrate
/ host
->ncfg
->nand_ta
+ 1);
257 tmp
|= MLCTIMEREG_RD_HIGH(clkrate
/ host
->ncfg
->rd_high
+ 1);
258 tmp
|= MLCTIMEREG_RD_LOW(clkrate
/ host
->ncfg
->rd_low
);
259 tmp
|= MLCTIMEREG_WR_HIGH(clkrate
/ host
->ncfg
->wr_high
+ 1);
260 tmp
|= MLCTIMEREG_WR_LOW(clkrate
/ host
->ncfg
->wr_low
);
261 writel(tmp
, MLC_TIME_REG(host
->io_base
));
263 /* Enable IRQ for CONTROLLER_READY and NAND_READY */
264 writeb(MLCIRQ_CONTROLLER_READY
| MLCIRQ_NAND_READY
,
265 MLC_IRQ_MR(host
->io_base
));
267 /* Normal nCE operation: nCE controlled by controller */
268 writel(MLCCEH_NORMAL
, MLC_CEH(host
->io_base
));
272 * Hardware specific access to control lines
274 static void lpc32xx_nand_cmd_ctrl(struct mtd_info
*mtd
, int cmd
,
277 struct nand_chip
*nand_chip
= mtd_to_nand(mtd
);
278 struct lpc32xx_nand_host
*host
= nand_get_controller_data(nand_chip
);
280 if (cmd
!= NAND_CMD_NONE
) {
282 writel(cmd
, MLC_CMD(host
->io_base
));
284 writel(cmd
, MLC_ADDR(host
->io_base
));
289 * Read Device Ready (NAND device _and_ controller ready)
291 static int lpc32xx_nand_device_ready(struct mtd_info
*mtd
)
293 struct nand_chip
*nand_chip
= mtd_to_nand(mtd
);
294 struct lpc32xx_nand_host
*host
= nand_get_controller_data(nand_chip
);
296 if ((readb(MLC_ISR(host
->io_base
)) &
297 (MLCISR_CONTROLLER_READY
| MLCISR_NAND_READY
)) ==
298 (MLCISR_CONTROLLER_READY
| MLCISR_NAND_READY
))
304 static irqreturn_t
lpc3xxx_nand_irq(int irq
, struct lpc32xx_nand_host
*host
)
308 /* Clear interrupt flag by reading status */
309 sr
= readb(MLC_IRQ_SR(host
->io_base
));
310 if (sr
& MLCIRQ_NAND_READY
)
311 complete(&host
->comp_nand
);
312 if (sr
& MLCIRQ_CONTROLLER_READY
)
313 complete(&host
->comp_controller
);
318 static int lpc32xx_waitfunc_nand(struct mtd_info
*mtd
, struct nand_chip
*chip
)
320 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
322 if (readb(MLC_ISR(host
->io_base
)) & MLCISR_NAND_READY
)
325 wait_for_completion(&host
->comp_nand
);
327 while (!(readb(MLC_ISR(host
->io_base
)) & MLCISR_NAND_READY
)) {
328 /* Seems to be delayed sometimes by controller */
329 dev_dbg(&mtd
->dev
, "Warning: NAND not ready.\n");
334 return NAND_STATUS_READY
;
337 static int lpc32xx_waitfunc_controller(struct mtd_info
*mtd
,
338 struct nand_chip
*chip
)
340 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
342 if (readb(MLC_ISR(host
->io_base
)) & MLCISR_CONTROLLER_READY
)
345 wait_for_completion(&host
->comp_controller
);
347 while (!(readb(MLC_ISR(host
->io_base
)) &
348 MLCISR_CONTROLLER_READY
)) {
349 dev_dbg(&mtd
->dev
, "Warning: Controller not ready.\n");
354 return NAND_STATUS_READY
;
357 static int lpc32xx_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*chip
)
359 lpc32xx_waitfunc_nand(mtd
, chip
);
360 lpc32xx_waitfunc_controller(mtd
, chip
);
362 return NAND_STATUS_READY
;
366 * Enable NAND write protect
368 static void lpc32xx_wp_enable(struct lpc32xx_nand_host
*host
)
370 if (gpio_is_valid(host
->ncfg
->wp_gpio
))
371 gpio_set_value(host
->ncfg
->wp_gpio
, 0);
375 * Disable NAND write protect
377 static void lpc32xx_wp_disable(struct lpc32xx_nand_host
*host
)
379 if (gpio_is_valid(host
->ncfg
->wp_gpio
))
380 gpio_set_value(host
->ncfg
->wp_gpio
, 1);
383 static void lpc32xx_dma_complete_func(void *completion
)
385 complete(completion
);
388 static int lpc32xx_xmit_dma(struct mtd_info
*mtd
, void *mem
, int len
,
389 enum dma_transfer_direction dir
)
391 struct nand_chip
*chip
= mtd_to_nand(mtd
);
392 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
393 struct dma_async_tx_descriptor
*desc
;
394 int flags
= DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
;
397 sg_init_one(&host
->sgl
, mem
, len
);
399 res
= dma_map_sg(host
->dma_chan
->device
->dev
, &host
->sgl
, 1,
402 dev_err(mtd
->dev
.parent
, "Failed to map sg list\n");
405 desc
= dmaengine_prep_slave_sg(host
->dma_chan
, &host
->sgl
, 1, dir
,
408 dev_err(mtd
->dev
.parent
, "Failed to prepare slave sg\n");
412 init_completion(&host
->comp_dma
);
413 desc
->callback
= lpc32xx_dma_complete_func
;
414 desc
->callback_param
= &host
->comp_dma
;
416 dmaengine_submit(desc
);
417 dma_async_issue_pending(host
->dma_chan
);
419 wait_for_completion_timeout(&host
->comp_dma
, msecs_to_jiffies(1000));
421 dma_unmap_sg(host
->dma_chan
->device
->dev
, &host
->sgl
, 1,
425 dma_unmap_sg(host
->dma_chan
->device
->dev
, &host
->sgl
, 1,
430 static int lpc32xx_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
431 uint8_t *buf
, int oob_required
, int page
)
433 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
435 uint8_t *oobbuf
= chip
->oob_poi
;
441 if ((void *)buf
<= high_memory
) {
445 dma_buf
= host
->dma_buf
;
449 /* Writing Command and Address */
450 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, 0, page
);
452 /* For all sub-pages */
453 for (i
= 0; i
< host
->mlcsubpages
; i
++) {
454 /* Start Auto Decode Command */
455 writeb(0x00, MLC_ECC_AUTO_DEC_REG(host
->io_base
));
457 /* Wait for Controller Ready */
458 lpc32xx_waitfunc_controller(mtd
, chip
);
460 /* Check ECC Error status */
461 mlc_isr
= readl(MLC_ISR(host
->io_base
));
462 if (mlc_isr
& MLCISR_DECODER_FAILURE
) {
463 mtd
->ecc_stats
.failed
++;
464 dev_warn(&mtd
->dev
, "%s: DECODER_FAILURE\n", __func__
);
465 } else if (mlc_isr
& MLCISR_ERRORS_DETECTED
) {
466 mtd
->ecc_stats
.corrected
+= ((mlc_isr
>> 4) & 0x3) + 1;
469 /* Read 512 + 16 Bytes */
471 res
= lpc32xx_xmit_dma(mtd
, dma_buf
+ i
* 512, 512,
476 for (j
= 0; j
< (512 >> 2); j
++) {
477 *((uint32_t *)(buf
)) =
478 readl(MLC_BUFF(host
->io_base
));
482 for (j
= 0; j
< (16 >> 2); j
++) {
483 *((uint32_t *)(oobbuf
)) =
484 readl(MLC_BUFF(host
->io_base
));
489 if (use_dma
&& !dma_mapped
)
490 memcpy(buf
, dma_buf
, mtd
->writesize
);
495 static int lpc32xx_write_page_lowlevel(struct mtd_info
*mtd
,
496 struct nand_chip
*chip
,
497 const uint8_t *buf
, int oob_required
,
500 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
501 const uint8_t *oobbuf
= chip
->oob_poi
;
502 uint8_t *dma_buf
= (uint8_t *)buf
;
506 if (use_dma
&& (void *)buf
>= high_memory
) {
507 dma_buf
= host
->dma_buf
;
508 memcpy(dma_buf
, buf
, mtd
->writesize
);
511 for (i
= 0; i
< host
->mlcsubpages
; i
++) {
513 writeb(0x00, MLC_ECC_ENC_REG(host
->io_base
));
515 /* Write 512 + 6 Bytes to Buffer */
517 res
= lpc32xx_xmit_dma(mtd
, dma_buf
+ i
* 512, 512,
522 for (j
= 0; j
< (512 >> 2); j
++) {
523 writel(*((uint32_t *)(buf
)),
524 MLC_BUFF(host
->io_base
));
528 writel(*((uint32_t *)(oobbuf
)), MLC_BUFF(host
->io_base
));
530 writew(*((uint16_t *)(oobbuf
)), MLC_BUFF(host
->io_base
));
533 /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
534 writeb(0x00, MLC_ECC_AUTO_ENC_REG(host
->io_base
));
536 /* Wait for Controller Ready */
537 lpc32xx_waitfunc_controller(mtd
, chip
);
542 static int lpc32xx_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
545 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
547 /* Read whole page - necessary with MLC controller! */
548 lpc32xx_read_page(mtd
, chip
, host
->dummy_buf
, 1, page
);
553 static int lpc32xx_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
556 /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
560 /* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
561 static void lpc32xx_ecc_enable(struct mtd_info
*mtd
, int mode
)
563 /* Always enabled! */
566 static int lpc32xx_dma_setup(struct lpc32xx_nand_host
*host
)
568 struct mtd_info
*mtd
= nand_to_mtd(&host
->nand_chip
);
571 if (!host
->pdata
|| !host
->pdata
->dma_filter
) {
572 dev_err(mtd
->dev
.parent
, "no DMA platform data\n");
577 dma_cap_set(DMA_SLAVE
, mask
);
578 host
->dma_chan
= dma_request_channel(mask
, host
->pdata
->dma_filter
,
580 if (!host
->dma_chan
) {
581 dev_err(mtd
->dev
.parent
, "Failed to request DMA channel\n");
586 * Set direction to a sensible value even if the dmaengine driver
587 * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
588 * driver criticizes it as "alien transfer direction".
590 host
->dma_slave_config
.direction
= DMA_DEV_TO_MEM
;
591 host
->dma_slave_config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
592 host
->dma_slave_config
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
593 host
->dma_slave_config
.src_maxburst
= 128;
594 host
->dma_slave_config
.dst_maxburst
= 128;
595 /* DMA controller does flow control: */
596 host
->dma_slave_config
.device_fc
= false;
597 host
->dma_slave_config
.src_addr
= MLC_BUFF(host
->io_base_phy
);
598 host
->dma_slave_config
.dst_addr
= MLC_BUFF(host
->io_base_phy
);
599 if (dmaengine_slave_config(host
->dma_chan
, &host
->dma_slave_config
)) {
600 dev_err(mtd
->dev
.parent
, "Failed to setup DMA slave\n");
606 dma_release_channel(host
->dma_chan
);
610 static struct lpc32xx_nand_cfg_mlc
*lpc32xx_parse_dt(struct device
*dev
)
612 struct lpc32xx_nand_cfg_mlc
*ncfg
;
613 struct device_node
*np
= dev
->of_node
;
615 ncfg
= devm_kzalloc(dev
, sizeof(*ncfg
), GFP_KERNEL
);
619 of_property_read_u32(np
, "nxp,tcea-delay", &ncfg
->tcea_delay
);
620 of_property_read_u32(np
, "nxp,busy-delay", &ncfg
->busy_delay
);
621 of_property_read_u32(np
, "nxp,nand-ta", &ncfg
->nand_ta
);
622 of_property_read_u32(np
, "nxp,rd-high", &ncfg
->rd_high
);
623 of_property_read_u32(np
, "nxp,rd-low", &ncfg
->rd_low
);
624 of_property_read_u32(np
, "nxp,wr-high", &ncfg
->wr_high
);
625 of_property_read_u32(np
, "nxp,wr-low", &ncfg
->wr_low
);
627 if (!ncfg
->tcea_delay
|| !ncfg
->busy_delay
|| !ncfg
->nand_ta
||
628 !ncfg
->rd_high
|| !ncfg
->rd_low
|| !ncfg
->wr_high
||
630 dev_err(dev
, "chip parameters not specified correctly\n");
634 ncfg
->wp_gpio
= of_get_named_gpio(np
, "gpios", 0);
640 * Probe for NAND controller
642 static int lpc32xx_nand_probe(struct platform_device
*pdev
)
644 struct lpc32xx_nand_host
*host
;
645 struct mtd_info
*mtd
;
646 struct nand_chip
*nand_chip
;
650 /* Allocate memory for the device structure (and zero it) */
651 host
= devm_kzalloc(&pdev
->dev
, sizeof(*host
), GFP_KERNEL
);
655 rc
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
656 host
->io_base
= devm_ioremap_resource(&pdev
->dev
, rc
);
657 if (IS_ERR(host
->io_base
))
658 return PTR_ERR(host
->io_base
);
660 host
->io_base_phy
= rc
->start
;
662 nand_chip
= &host
->nand_chip
;
663 mtd
= nand_to_mtd(nand_chip
);
664 if (pdev
->dev
.of_node
)
665 host
->ncfg
= lpc32xx_parse_dt(&pdev
->dev
);
668 "Missing or bad NAND config from device tree\n");
671 if (host
->ncfg
->wp_gpio
== -EPROBE_DEFER
)
672 return -EPROBE_DEFER
;
673 if (gpio_is_valid(host
->ncfg
->wp_gpio
) &&
674 gpio_request(host
->ncfg
->wp_gpio
, "NAND WP")) {
675 dev_err(&pdev
->dev
, "GPIO not available\n");
678 lpc32xx_wp_disable(host
);
680 host
->pdata
= dev_get_platdata(&pdev
->dev
);
682 /* link the private data structures */
683 nand_set_controller_data(nand_chip
, host
);
684 nand_set_flash_node(nand_chip
, pdev
->dev
.of_node
);
685 mtd
->dev
.parent
= &pdev
->dev
;
688 host
->clk
= clk_get(&pdev
->dev
, NULL
);
689 if (IS_ERR(host
->clk
)) {
690 dev_err(&pdev
->dev
, "Clock initialization failure\n");
694 clk_prepare_enable(host
->clk
);
696 nand_chip
->cmd_ctrl
= lpc32xx_nand_cmd_ctrl
;
697 nand_chip
->dev_ready
= lpc32xx_nand_device_ready
;
698 nand_chip
->chip_delay
= 25; /* us */
699 nand_chip
->IO_ADDR_R
= MLC_DATA(host
->io_base
);
700 nand_chip
->IO_ADDR_W
= MLC_DATA(host
->io_base
);
702 /* Init NAND controller */
703 lpc32xx_nand_setup(host
);
705 platform_set_drvdata(pdev
, host
);
707 /* Initialize function pointers */
708 nand_chip
->ecc
.hwctl
= lpc32xx_ecc_enable
;
709 nand_chip
->ecc
.read_page_raw
= lpc32xx_read_page
;
710 nand_chip
->ecc
.read_page
= lpc32xx_read_page
;
711 nand_chip
->ecc
.write_page_raw
= lpc32xx_write_page_lowlevel
;
712 nand_chip
->ecc
.write_page
= lpc32xx_write_page_lowlevel
;
713 nand_chip
->ecc
.write_oob
= lpc32xx_write_oob
;
714 nand_chip
->ecc
.read_oob
= lpc32xx_read_oob
;
715 nand_chip
->ecc
.strength
= 4;
716 nand_chip
->waitfunc
= lpc32xx_waitfunc
;
718 nand_chip
->options
= NAND_NO_SUBPAGE_WRITE
;
719 nand_chip
->bbt_options
= NAND_BBT_USE_FLASH
| NAND_BBT_NO_OOB
;
720 nand_chip
->bbt_td
= &lpc32xx_nand_bbt
;
721 nand_chip
->bbt_md
= &lpc32xx_nand_bbt_mirror
;
724 res
= lpc32xx_dma_setup(host
);
732 * Scan to find existance of the device and
733 * Get the type of NAND device SMALL block or LARGE block
735 if (nand_scan_ident(mtd
, 1, NULL
)) {
740 host
->dma_buf
= devm_kzalloc(&pdev
->dev
, mtd
->writesize
, GFP_KERNEL
);
741 if (!host
->dma_buf
) {
746 host
->dummy_buf
= devm_kzalloc(&pdev
->dev
, mtd
->writesize
, GFP_KERNEL
);
747 if (!host
->dummy_buf
) {
752 nand_chip
->ecc
.mode
= NAND_ECC_HW
;
753 nand_chip
->ecc
.size
= 512;
754 nand_chip
->ecc
.layout
= &lpc32xx_nand_oob
;
755 host
->mlcsubpages
= mtd
->writesize
/ 512;
757 /* initially clear interrupt status */
758 readb(MLC_IRQ_SR(host
->io_base
));
760 init_completion(&host
->comp_nand
);
761 init_completion(&host
->comp_controller
);
763 host
->irq
= platform_get_irq(pdev
, 0);
764 if ((host
->irq
< 0) || (host
->irq
>= NR_IRQS
)) {
765 dev_err(&pdev
->dev
, "failed to get platform irq\n");
770 if (request_irq(host
->irq
, (irq_handler_t
)&lpc3xxx_nand_irq
,
771 IRQF_TRIGGER_HIGH
, DRV_NAME
, host
)) {
772 dev_err(&pdev
->dev
, "Error requesting NAND IRQ\n");
778 * Fills out all the uninitialized function pointers with the defaults
779 * And scans for a bad block table if appropriate.
781 if (nand_scan_tail(mtd
)) {
786 mtd
->name
= DRV_NAME
;
788 res
= mtd_device_register(mtd
, host
->ncfg
->parts
,
789 host
->ncfg
->num_parts
);
796 free_irq(host
->irq
, host
);
799 dma_release_channel(host
->dma_chan
);
801 clk_disable_unprepare(host
->clk
);
804 lpc32xx_wp_enable(host
);
805 gpio_free(host
->ncfg
->wp_gpio
);
813 static int lpc32xx_nand_remove(struct platform_device
*pdev
)
815 struct lpc32xx_nand_host
*host
= platform_get_drvdata(pdev
);
816 struct mtd_info
*mtd
= nand_to_mtd(&host
->nand_chip
);
819 free_irq(host
->irq
, host
);
821 dma_release_channel(host
->dma_chan
);
823 clk_disable_unprepare(host
->clk
);
826 lpc32xx_wp_enable(host
);
827 gpio_free(host
->ncfg
->wp_gpio
);
833 static int lpc32xx_nand_resume(struct platform_device
*pdev
)
835 struct lpc32xx_nand_host
*host
= platform_get_drvdata(pdev
);
837 /* Re-enable NAND clock */
838 clk_prepare_enable(host
->clk
);
840 /* Fresh init of NAND controller */
841 lpc32xx_nand_setup(host
);
843 /* Disable write protect */
844 lpc32xx_wp_disable(host
);
849 static int lpc32xx_nand_suspend(struct platform_device
*pdev
, pm_message_t pm
)
851 struct lpc32xx_nand_host
*host
= platform_get_drvdata(pdev
);
853 /* Enable write protect for safety */
854 lpc32xx_wp_enable(host
);
857 clk_disable_unprepare(host
->clk
);
862 #define lpc32xx_nand_resume NULL
863 #define lpc32xx_nand_suspend NULL
866 static const struct of_device_id lpc32xx_nand_match
[] = {
867 { .compatible
= "nxp,lpc3220-mlc" },
870 MODULE_DEVICE_TABLE(of
, lpc32xx_nand_match
);
872 static struct platform_driver lpc32xx_nand_driver
= {
873 .probe
= lpc32xx_nand_probe
,
874 .remove
= lpc32xx_nand_remove
,
875 .resume
= lpc32xx_nand_resume
,
876 .suspend
= lpc32xx_nand_suspend
,
879 .of_match_table
= lpc32xx_nand_match
,
883 module_platform_driver(lpc32xx_nand_driver
);
885 MODULE_LICENSE("GPL");
886 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
887 MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");