1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NXP LPC32XX NAND SLC driver
6 * Kevin Wells <kevin.wells@nxp.com>
7 * Roland Stigge <stigge@antcom.de>
9 * Copyright © 2011 NXP Semiconductors
10 * Copyright © 2012 Roland Stigge
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/mtd/partitions.h>
19 #include <linux/clk.h>
20 #include <linux/err.h>
21 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmaengine.h>
26 #include <linux/mtd/nand_ecc.h>
27 #include <linux/gpio.h>
29 #include <linux/of_gpio.h>
30 #include <linux/mtd/lpc32xx_slc.h>
32 #define LPC32XX_MODNAME "lpc32xx-nand"
34 /**********************************************************************
35 * SLC NAND controller register offsets
36 **********************************************************************/
38 #define SLC_DATA(x) (x + 0x000)
39 #define SLC_ADDR(x) (x + 0x004)
40 #define SLC_CMD(x) (x + 0x008)
41 #define SLC_STOP(x) (x + 0x00C)
42 #define SLC_CTRL(x) (x + 0x010)
43 #define SLC_CFG(x) (x + 0x014)
44 #define SLC_STAT(x) (x + 0x018)
45 #define SLC_INT_STAT(x) (x + 0x01C)
46 #define SLC_IEN(x) (x + 0x020)
47 #define SLC_ISR(x) (x + 0x024)
48 #define SLC_ICR(x) (x + 0x028)
49 #define SLC_TAC(x) (x + 0x02C)
50 #define SLC_TC(x) (x + 0x030)
51 #define SLC_ECC(x) (x + 0x034)
52 #define SLC_DMA_DATA(x) (x + 0x038)
54 /**********************************************************************
55 * slc_ctrl register definitions
56 **********************************************************************/
57 #define SLCCTRL_SW_RESET (1 << 2) /* Reset the NAND controller bit */
58 #define SLCCTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */
59 #define SLCCTRL_DMA_START (1 << 0) /* Start DMA channel bit */
61 /**********************************************************************
62 * slc_cfg register definitions
63 **********************************************************************/
64 #define SLCCFG_CE_LOW (1 << 5) /* Force CE low bit */
65 #define SLCCFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */
66 #define SLCCFG_ECC_EN (1 << 3) /* ECC enable bit */
67 #define SLCCFG_DMA_BURST (1 << 2) /* DMA burst bit */
68 #define SLCCFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */
69 #define SLCCFG_WIDTH (1 << 0) /* External device width, 0=8bit */
71 /**********************************************************************
72 * slc_stat register definitions
73 **********************************************************************/
74 #define SLCSTAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */
75 #define SLCSTAT_SLC_FIFO (1 << 1) /* SLC FIFO has data bit */
76 #define SLCSTAT_NAND_READY (1 << 0) /* NAND device is ready bit */
78 /**********************************************************************
79 * slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
80 **********************************************************************/
81 #define SLCSTAT_INT_TC (1 << 1) /* Transfer count bit */
82 #define SLCSTAT_INT_RDY_EN (1 << 0) /* Ready interrupt bit */
84 /**********************************************************************
85 * slc_tac register definitions
86 **********************************************************************/
87 /* Computation of clock cycles on basis of controller and device clock rates */
88 #define SLCTAC_CLOCKS(c, n, s) (min_t(u32, DIV_ROUND_UP(c, n) - 1, 0xF) << s)
90 /* Clock setting for RDY write sample wait time in 2*n clocks */
91 #define SLCTAC_WDR(n) (((n) & 0xF) << 28)
92 /* Write pulse width in clock cycles, 1 to 16 clocks */
93 #define SLCTAC_WWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 24))
94 /* Write hold time of control and data signals, 1 to 16 clocks */
95 #define SLCTAC_WHOLD(c, n) (SLCTAC_CLOCKS(c, n, 20))
96 /* Write setup time of control and data signals, 1 to 16 clocks */
97 #define SLCTAC_WSETUP(c, n) (SLCTAC_CLOCKS(c, n, 16))
98 /* Clock setting for RDY read sample wait time in 2*n clocks */
99 #define SLCTAC_RDR(n) (((n) & 0xF) << 12)
100 /* Read pulse width in clock cycles, 1 to 16 clocks */
101 #define SLCTAC_RWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 8))
102 /* Read hold time of control and data signals, 1 to 16 clocks */
103 #define SLCTAC_RHOLD(c, n) (SLCTAC_CLOCKS(c, n, 4))
104 /* Read setup time of control and data signals, 1 to 16 clocks */
105 #define SLCTAC_RSETUP(c, n) (SLCTAC_CLOCKS(c, n, 0))
107 /**********************************************************************
108 * slc_ecc register definitions
109 **********************************************************************/
110 /* ECC line party fetch macro */
111 #define SLCECC_TO_LINEPAR(n) (((n) >> 6) & 0x7FFF)
112 #define SLCECC_TO_COLPAR(n) ((n) & 0x3F)
115 * DMA requires storage space for the DMA local buffer and the hardware ECC
116 * storage area. The DMA local buffer is only used if DMA mapping fails
119 #define LPC32XX_DMA_DATA_SIZE 4096
120 #define LPC32XX_ECC_SAVE_SIZE ((4096 / 256) * 4)
122 /* Number of bytes used for ECC stored in NAND per 256 bytes */
123 #define LPC32XX_SLC_DEV_ECC_BYTES 3
126 * If the NAND base clock frequency can't be fetched, this frequency will be
127 * used instead as the base. This rate is used to setup the timing registers
128 * used for NAND accesses.
130 #define LPC32XX_DEF_BUS_RATE 133250000
132 /* Milliseconds for DMA FIFO timeout (unlikely anyway) */
133 #define LPC32XX_DMA_TIMEOUT 100
136 * NAND ECC Layout for small page NAND devices
137 * Note: For large and huge page devices, the default layouts are used
139 static int lpc32xx_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
140 struct mtd_oob_region
*oobregion
)
145 oobregion
->length
= 6;
146 oobregion
->offset
= 10;
151 static int lpc32xx_ooblayout_free(struct mtd_info
*mtd
, int section
,
152 struct mtd_oob_region
*oobregion
)
158 oobregion
->offset
= 0;
159 oobregion
->length
= 4;
161 oobregion
->offset
= 6;
162 oobregion
->length
= 4;
168 static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops
= {
169 .ecc
= lpc32xx_ooblayout_ecc
,
170 .free
= lpc32xx_ooblayout_free
,
173 static u8 bbt_pattern
[] = {'B', 'b', 't', '0' };
174 static u8 mirror_pattern
[] = {'1', 't', 'b', 'B' };
177 * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
178 * Note: Large page devices used the default layout
180 static struct nand_bbt_descr bbt_smallpage_main_descr
= {
181 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
182 | NAND_BBT_2BIT
| NAND_BBT_VERSION
| NAND_BBT_PERCHIP
,
187 .pattern
= bbt_pattern
190 static struct nand_bbt_descr bbt_smallpage_mirror_descr
= {
191 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
192 | NAND_BBT_2BIT
| NAND_BBT_VERSION
| NAND_BBT_PERCHIP
,
197 .pattern
= mirror_pattern
201 * NAND platform configuration structure
203 struct lpc32xx_nand_cfg_slc
{
213 struct mtd_partition
*parts
;
217 struct lpc32xx_nand_host
{
218 struct nand_chip nand_chip
;
219 struct lpc32xx_slc_platform_data
*pdata
;
221 void __iomem
*io_base
;
222 struct lpc32xx_nand_cfg_slc
*ncfg
;
224 struct completion comp
;
225 struct dma_chan
*dma_chan
;
226 uint32_t dma_buf_len
;
227 struct dma_slave_config dma_slave_config
;
228 struct scatterlist sgl
;
231 * DMA and CPU addresses of ECC work area and data buffer
235 dma_addr_t io_base_dma
;
238 static void lpc32xx_nand_setup(struct lpc32xx_nand_host
*host
)
240 uint32_t clkrate
, tmp
;
242 /* Reset SLC controller */
243 writel(SLCCTRL_SW_RESET
, SLC_CTRL(host
->io_base
));
247 writel(0, SLC_CFG(host
->io_base
));
248 writel(0, SLC_IEN(host
->io_base
));
249 writel((SLCSTAT_INT_TC
| SLCSTAT_INT_RDY_EN
),
250 SLC_ICR(host
->io_base
));
252 /* Get base clock for SLC block */
253 clkrate
= clk_get_rate(host
->clk
);
255 clkrate
= LPC32XX_DEF_BUS_RATE
;
257 /* Compute clock setup values */
258 tmp
= SLCTAC_WDR(host
->ncfg
->wdr_clks
) |
259 SLCTAC_WWIDTH(clkrate
, host
->ncfg
->wwidth
) |
260 SLCTAC_WHOLD(clkrate
, host
->ncfg
->whold
) |
261 SLCTAC_WSETUP(clkrate
, host
->ncfg
->wsetup
) |
262 SLCTAC_RDR(host
->ncfg
->rdr_clks
) |
263 SLCTAC_RWIDTH(clkrate
, host
->ncfg
->rwidth
) |
264 SLCTAC_RHOLD(clkrate
, host
->ncfg
->rhold
) |
265 SLCTAC_RSETUP(clkrate
, host
->ncfg
->rsetup
);
266 writel(tmp
, SLC_TAC(host
->io_base
));
270 * Hardware specific access to control lines
272 static void lpc32xx_nand_cmd_ctrl(struct nand_chip
*chip
, int cmd
,
276 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
278 /* Does CE state need to be changed? */
279 tmp
= readl(SLC_CFG(host
->io_base
));
281 tmp
|= SLCCFG_CE_LOW
;
283 tmp
&= ~SLCCFG_CE_LOW
;
284 writel(tmp
, SLC_CFG(host
->io_base
));
286 if (cmd
!= NAND_CMD_NONE
) {
288 writel(cmd
, SLC_CMD(host
->io_base
));
290 writel(cmd
, SLC_ADDR(host
->io_base
));
295 * Read the Device Ready pin
297 static int lpc32xx_nand_device_ready(struct nand_chip
*chip
)
299 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
302 if ((readl(SLC_STAT(host
->io_base
)) & SLCSTAT_NAND_READY
) != 0)
309 * Enable NAND write protect
311 static void lpc32xx_wp_enable(struct lpc32xx_nand_host
*host
)
313 if (gpio_is_valid(host
->ncfg
->wp_gpio
))
314 gpio_set_value(host
->ncfg
->wp_gpio
, 0);
318 * Disable NAND write protect
320 static void lpc32xx_wp_disable(struct lpc32xx_nand_host
*host
)
322 if (gpio_is_valid(host
->ncfg
->wp_gpio
))
323 gpio_set_value(host
->ncfg
->wp_gpio
, 1);
327 * Prepares SLC for transfers with H/W ECC enabled
329 static void lpc32xx_nand_ecc_enable(struct nand_chip
*chip
, int mode
)
331 /* Hardware ECC is enabled automatically in hardware as needed */
335 * Calculates the ECC for the data
337 static int lpc32xx_nand_ecc_calculate(struct nand_chip
*chip
,
338 const unsigned char *buf
,
342 * ECC is calculated automatically in hardware during syndrome read
343 * and write operations, so it doesn't need to be calculated here.
349 * Read a single byte from NAND device
351 static uint8_t lpc32xx_nand_read_byte(struct nand_chip
*chip
)
353 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
355 return (uint8_t)readl(SLC_DATA(host
->io_base
));
359 * Simple device read without ECC
361 static void lpc32xx_nand_read_buf(struct nand_chip
*chip
, u_char
*buf
, int len
)
363 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
365 /* Direct device read with no ECC */
367 *buf
++ = (uint8_t)readl(SLC_DATA(host
->io_base
));
371 * Simple device write without ECC
373 static void lpc32xx_nand_write_buf(struct nand_chip
*chip
, const uint8_t *buf
,
376 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
378 /* Direct device write with no ECC */
380 writel((uint32_t)*buf
++, SLC_DATA(host
->io_base
));
384 * Read the OOB data from the device without ECC using FIFO method
386 static int lpc32xx_nand_read_oob_syndrome(struct nand_chip
*chip
, int page
)
388 struct mtd_info
*mtd
= nand_to_mtd(chip
);
390 return nand_read_oob_op(chip
, page
, 0, chip
->oob_poi
, mtd
->oobsize
);
394 * Write the OOB data to the device without ECC using FIFO method
396 static int lpc32xx_nand_write_oob_syndrome(struct nand_chip
*chip
, int page
)
398 struct mtd_info
*mtd
= nand_to_mtd(chip
);
400 return nand_prog_page_op(chip
, page
, mtd
->writesize
, chip
->oob_poi
,
405 * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
407 static void lpc32xx_slc_ecc_copy(uint8_t *spare
, const uint32_t *ecc
, int count
)
411 for (i
= 0; i
< (count
* 3); i
+= 3) {
412 uint32_t ce
= ecc
[i
/ 3];
413 ce
= ~(ce
<< 2) & 0xFFFFFF;
414 spare
[i
+ 2] = (uint8_t)(ce
& 0xFF);
416 spare
[i
+ 1] = (uint8_t)(ce
& 0xFF);
418 spare
[i
] = (uint8_t)(ce
& 0xFF);
422 static void lpc32xx_dma_complete_func(void *completion
)
424 complete(completion
);
427 static int lpc32xx_xmit_dma(struct mtd_info
*mtd
, dma_addr_t dma
,
428 void *mem
, int len
, enum dma_transfer_direction dir
)
430 struct nand_chip
*chip
= mtd_to_nand(mtd
);
431 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
432 struct dma_async_tx_descriptor
*desc
;
433 int flags
= DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
;
436 host
->dma_slave_config
.direction
= dir
;
437 host
->dma_slave_config
.src_addr
= dma
;
438 host
->dma_slave_config
.dst_addr
= dma
;
439 host
->dma_slave_config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
440 host
->dma_slave_config
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
441 host
->dma_slave_config
.src_maxburst
= 4;
442 host
->dma_slave_config
.dst_maxburst
= 4;
443 /* DMA controller does flow control: */
444 host
->dma_slave_config
.device_fc
= false;
445 if (dmaengine_slave_config(host
->dma_chan
, &host
->dma_slave_config
)) {
446 dev_err(mtd
->dev
.parent
, "Failed to setup DMA slave\n");
450 sg_init_one(&host
->sgl
, mem
, len
);
452 res
= dma_map_sg(host
->dma_chan
->device
->dev
, &host
->sgl
, 1,
455 dev_err(mtd
->dev
.parent
, "Failed to map sg list\n");
458 desc
= dmaengine_prep_slave_sg(host
->dma_chan
, &host
->sgl
, 1, dir
,
461 dev_err(mtd
->dev
.parent
, "Failed to prepare slave sg\n");
465 init_completion(&host
->comp
);
466 desc
->callback
= lpc32xx_dma_complete_func
;
467 desc
->callback_param
= &host
->comp
;
469 dmaengine_submit(desc
);
470 dma_async_issue_pending(host
->dma_chan
);
472 wait_for_completion_timeout(&host
->comp
, msecs_to_jiffies(1000));
474 dma_unmap_sg(host
->dma_chan
->device
->dev
, &host
->sgl
, 1,
479 dma_unmap_sg(host
->dma_chan
->device
->dev
, &host
->sgl
, 1,
485 * DMA read/write transfers with ECC support
487 static int lpc32xx_xfer(struct mtd_info
*mtd
, uint8_t *buf
, int eccsubpages
,
490 struct nand_chip
*chip
= mtd_to_nand(mtd
);
491 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
493 unsigned long timeout
;
495 enum dma_transfer_direction dir
=
496 read
? DMA_DEV_TO_MEM
: DMA_MEM_TO_DEV
;
500 if ((void *)buf
<= high_memory
) {
504 dma_buf
= host
->data_buf
;
507 memcpy(host
->data_buf
, buf
, mtd
->writesize
);
511 writel(readl(SLC_CFG(host
->io_base
)) |
512 SLCCFG_DMA_DIR
| SLCCFG_ECC_EN
| SLCCFG_DMA_ECC
|
513 SLCCFG_DMA_BURST
, SLC_CFG(host
->io_base
));
515 writel((readl(SLC_CFG(host
->io_base
)) |
516 SLCCFG_ECC_EN
| SLCCFG_DMA_ECC
| SLCCFG_DMA_BURST
) &
518 SLC_CFG(host
->io_base
));
521 /* Clear initial ECC */
522 writel(SLCCTRL_ECC_CLEAR
, SLC_CTRL(host
->io_base
));
524 /* Transfer size is data area only */
525 writel(mtd
->writesize
, SLC_TC(host
->io_base
));
527 /* Start transfer in the NAND controller */
528 writel(readl(SLC_CTRL(host
->io_base
)) | SLCCTRL_DMA_START
,
529 SLC_CTRL(host
->io_base
));
531 for (i
= 0; i
< chip
->ecc
.steps
; i
++) {
533 res
= lpc32xx_xmit_dma(mtd
, SLC_DMA_DATA(host
->io_base_dma
),
534 dma_buf
+ i
* chip
->ecc
.size
,
535 mtd
->writesize
/ chip
->ecc
.steps
, dir
);
539 /* Always _read_ ECC */
540 if (i
== chip
->ecc
.steps
- 1)
542 if (!read
) /* ECC availability delayed on write */
544 res
= lpc32xx_xmit_dma(mtd
, SLC_ECC(host
->io_base_dma
),
545 &host
->ecc_buf
[i
], 4, DMA_DEV_TO_MEM
);
551 * According to NXP, the DMA can be finished here, but the NAND
552 * controller may still have buffered data. After porting to using the
553 * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
554 * appears to be always true, according to tests. Keeping the check for
555 * safety reasons for now.
557 if (readl(SLC_STAT(host
->io_base
)) & SLCSTAT_DMA_FIFO
) {
558 dev_warn(mtd
->dev
.parent
, "FIFO not empty!\n");
559 timeout
= jiffies
+ msecs_to_jiffies(LPC32XX_DMA_TIMEOUT
);
560 while ((readl(SLC_STAT(host
->io_base
)) & SLCSTAT_DMA_FIFO
) &&
561 time_before(jiffies
, timeout
))
563 if (!time_before(jiffies
, timeout
)) {
564 dev_err(mtd
->dev
.parent
, "FIFO held data too long\n");
569 /* Read last calculated ECC value */
572 host
->ecc_buf
[chip
->ecc
.steps
- 1] =
573 readl(SLC_ECC(host
->io_base
));
576 dmaengine_terminate_all(host
->dma_chan
);
578 if (readl(SLC_STAT(host
->io_base
)) & SLCSTAT_DMA_FIFO
||
579 readl(SLC_TC(host
->io_base
))) {
580 /* Something is left in the FIFO, something is wrong */
581 dev_err(mtd
->dev
.parent
, "DMA FIFO failure\n");
585 /* Stop DMA & HW ECC */
586 writel(readl(SLC_CTRL(host
->io_base
)) & ~SLCCTRL_DMA_START
,
587 SLC_CTRL(host
->io_base
));
588 writel(readl(SLC_CFG(host
->io_base
)) &
589 ~(SLCCFG_DMA_DIR
| SLCCFG_ECC_EN
| SLCCFG_DMA_ECC
|
590 SLCCFG_DMA_BURST
), SLC_CFG(host
->io_base
));
592 if (!dma_mapped
&& read
)
593 memcpy(buf
, host
->data_buf
, mtd
->writesize
);
599 * Read the data and OOB data from the device, use ECC correction with the
600 * data, disable ECC for the OOB data
602 static int lpc32xx_nand_read_page_syndrome(struct nand_chip
*chip
, uint8_t *buf
,
603 int oob_required
, int page
)
605 struct mtd_info
*mtd
= nand_to_mtd(chip
);
606 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
607 struct mtd_oob_region oobregion
= { };
608 int stat
, i
, status
, error
;
609 uint8_t *oobecc
, tmpecc
[LPC32XX_ECC_SAVE_SIZE
];
611 /* Issue read command */
612 nand_read_page_op(chip
, page
, 0, NULL
, 0);
614 /* Read data and oob, calculate ECC */
615 status
= lpc32xx_xfer(mtd
, buf
, chip
->ecc
.steps
, 1);
618 chip
->legacy
.read_buf(chip
, chip
->oob_poi
, mtd
->oobsize
);
620 /* Convert to stored ECC format */
621 lpc32xx_slc_ecc_copy(tmpecc
, (uint32_t *) host
->ecc_buf
, chip
->ecc
.steps
);
623 /* Pointer to ECC data retrieved from NAND spare area */
624 error
= mtd_ooblayout_ecc(mtd
, 0, &oobregion
);
628 oobecc
= chip
->oob_poi
+ oobregion
.offset
;
630 for (i
= 0; i
< chip
->ecc
.steps
; i
++) {
631 stat
= chip
->ecc
.correct(chip
, buf
, oobecc
,
632 &tmpecc
[i
* chip
->ecc
.bytes
]);
634 mtd
->ecc_stats
.failed
++;
636 mtd
->ecc_stats
.corrected
+= stat
;
638 buf
+= chip
->ecc
.size
;
639 oobecc
+= chip
->ecc
.bytes
;
646 * Read the data and OOB data from the device, no ECC correction with the
649 static int lpc32xx_nand_read_page_raw_syndrome(struct nand_chip
*chip
,
650 uint8_t *buf
, int oob_required
,
653 struct mtd_info
*mtd
= nand_to_mtd(chip
);
655 /* Issue read command */
656 nand_read_page_op(chip
, page
, 0, NULL
, 0);
658 /* Raw reads can just use the FIFO interface */
659 chip
->legacy
.read_buf(chip
, buf
, chip
->ecc
.size
* chip
->ecc
.steps
);
660 chip
->legacy
.read_buf(chip
, chip
->oob_poi
, mtd
->oobsize
);
666 * Write the data and OOB data to the device, use ECC with the data,
667 * disable ECC for the OOB data
669 static int lpc32xx_nand_write_page_syndrome(struct nand_chip
*chip
,
671 int oob_required
, int page
)
673 struct mtd_info
*mtd
= nand_to_mtd(chip
);
674 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
675 struct mtd_oob_region oobregion
= { };
679 nand_prog_page_begin_op(chip
, page
, 0, NULL
, 0);
681 /* Write data, calculate ECC on outbound data */
682 error
= lpc32xx_xfer(mtd
, (uint8_t *)buf
, chip
->ecc
.steps
, 0);
687 * The calculated ECC needs some manual work done to it before
688 * committing it to NAND. Process the calculated ECC and place
689 * the resultant values directly into the OOB buffer. */
690 error
= mtd_ooblayout_ecc(mtd
, 0, &oobregion
);
694 pb
= chip
->oob_poi
+ oobregion
.offset
;
695 lpc32xx_slc_ecc_copy(pb
, (uint32_t *)host
->ecc_buf
, chip
->ecc
.steps
);
697 /* Write ECC data to device */
698 chip
->legacy
.write_buf(chip
, chip
->oob_poi
, mtd
->oobsize
);
700 return nand_prog_page_end_op(chip
);
704 * Write the data and OOB data to the device, no ECC correction with the
707 static int lpc32xx_nand_write_page_raw_syndrome(struct nand_chip
*chip
,
709 int oob_required
, int page
)
711 struct mtd_info
*mtd
= nand_to_mtd(chip
);
713 /* Raw writes can just use the FIFO interface */
714 nand_prog_page_begin_op(chip
, page
, 0, buf
,
715 chip
->ecc
.size
* chip
->ecc
.steps
);
716 chip
->legacy
.write_buf(chip
, chip
->oob_poi
, mtd
->oobsize
);
718 return nand_prog_page_end_op(chip
);
721 static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host
*host
)
723 struct mtd_info
*mtd
= nand_to_mtd(&host
->nand_chip
);
726 if (!host
->pdata
|| !host
->pdata
->dma_filter
) {
727 dev_err(mtd
->dev
.parent
, "no DMA platform data\n");
732 dma_cap_set(DMA_SLAVE
, mask
);
733 host
->dma_chan
= dma_request_channel(mask
, host
->pdata
->dma_filter
,
735 if (!host
->dma_chan
) {
736 dev_err(mtd
->dev
.parent
, "Failed to request DMA channel\n");
743 static struct lpc32xx_nand_cfg_slc
*lpc32xx_parse_dt(struct device
*dev
)
745 struct lpc32xx_nand_cfg_slc
*ncfg
;
746 struct device_node
*np
= dev
->of_node
;
748 ncfg
= devm_kzalloc(dev
, sizeof(*ncfg
), GFP_KERNEL
);
752 of_property_read_u32(np
, "nxp,wdr-clks", &ncfg
->wdr_clks
);
753 of_property_read_u32(np
, "nxp,wwidth", &ncfg
->wwidth
);
754 of_property_read_u32(np
, "nxp,whold", &ncfg
->whold
);
755 of_property_read_u32(np
, "nxp,wsetup", &ncfg
->wsetup
);
756 of_property_read_u32(np
, "nxp,rdr-clks", &ncfg
->rdr_clks
);
757 of_property_read_u32(np
, "nxp,rwidth", &ncfg
->rwidth
);
758 of_property_read_u32(np
, "nxp,rhold", &ncfg
->rhold
);
759 of_property_read_u32(np
, "nxp,rsetup", &ncfg
->rsetup
);
761 if (!ncfg
->wdr_clks
|| !ncfg
->wwidth
|| !ncfg
->whold
||
762 !ncfg
->wsetup
|| !ncfg
->rdr_clks
|| !ncfg
->rwidth
||
763 !ncfg
->rhold
|| !ncfg
->rsetup
) {
764 dev_err(dev
, "chip parameters not specified correctly\n");
768 ncfg
->wp_gpio
= of_get_named_gpio(np
, "gpios", 0);
773 static int lpc32xx_nand_attach_chip(struct nand_chip
*chip
)
775 struct mtd_info
*mtd
= nand_to_mtd(chip
);
776 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
778 /* OOB and ECC CPU and DMA work areas */
779 host
->ecc_buf
= (uint32_t *)(host
->data_buf
+ LPC32XX_DMA_DATA_SIZE
);
782 * Small page FLASH has a unique OOB layout, but large and huge
783 * page FLASH use the standard layout. Small page FLASH uses a
784 * custom BBT marker layout.
786 if (mtd
->writesize
<= 512)
787 mtd_set_ooblayout(mtd
, &lpc32xx_ooblayout_ops
);
789 /* These sizes remain the same regardless of page size */
790 chip
->ecc
.size
= 256;
791 chip
->ecc
.bytes
= LPC32XX_SLC_DEV_ECC_BYTES
;
792 chip
->ecc
.prepad
= 0;
793 chip
->ecc
.postpad
= 0;
796 * Use a custom BBT marker setup for small page FLASH that
797 * won't interfere with the ECC layout. Large and huge page
798 * FLASH use the standard layout.
800 if ((chip
->bbt_options
& NAND_BBT_USE_FLASH
) &&
801 mtd
->writesize
<= 512) {
802 chip
->bbt_td
= &bbt_smallpage_main_descr
;
803 chip
->bbt_md
= &bbt_smallpage_mirror_descr
;
809 static const struct nand_controller_ops lpc32xx_nand_controller_ops
= {
810 .attach_chip
= lpc32xx_nand_attach_chip
,
814 * Probe for NAND controller
816 static int lpc32xx_nand_probe(struct platform_device
*pdev
)
818 struct lpc32xx_nand_host
*host
;
819 struct mtd_info
*mtd
;
820 struct nand_chip
*chip
;
824 /* Allocate memory for the device structure (and zero it) */
825 host
= devm_kzalloc(&pdev
->dev
, sizeof(*host
), GFP_KERNEL
);
829 rc
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
830 host
->io_base
= devm_ioremap_resource(&pdev
->dev
, rc
);
831 if (IS_ERR(host
->io_base
))
832 return PTR_ERR(host
->io_base
);
834 host
->io_base_dma
= rc
->start
;
835 if (pdev
->dev
.of_node
)
836 host
->ncfg
= lpc32xx_parse_dt(&pdev
->dev
);
839 "Missing or bad NAND config from device tree\n");
842 if (host
->ncfg
->wp_gpio
== -EPROBE_DEFER
)
843 return -EPROBE_DEFER
;
844 if (gpio_is_valid(host
->ncfg
->wp_gpio
) && devm_gpio_request(&pdev
->dev
,
845 host
->ncfg
->wp_gpio
, "NAND WP")) {
846 dev_err(&pdev
->dev
, "GPIO not available\n");
849 lpc32xx_wp_disable(host
);
851 host
->pdata
= dev_get_platdata(&pdev
->dev
);
853 chip
= &host
->nand_chip
;
854 mtd
= nand_to_mtd(chip
);
855 nand_set_controller_data(chip
, host
);
856 nand_set_flash_node(chip
, pdev
->dev
.of_node
);
857 mtd
->owner
= THIS_MODULE
;
858 mtd
->dev
.parent
= &pdev
->dev
;
861 host
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
862 if (IS_ERR(host
->clk
)) {
863 dev_err(&pdev
->dev
, "Clock failure\n");
867 res
= clk_prepare_enable(host
->clk
);
871 /* Set NAND IO addresses and command/ready functions */
872 chip
->legacy
.IO_ADDR_R
= SLC_DATA(host
->io_base
);
873 chip
->legacy
.IO_ADDR_W
= SLC_DATA(host
->io_base
);
874 chip
->legacy
.cmd_ctrl
= lpc32xx_nand_cmd_ctrl
;
875 chip
->legacy
.dev_ready
= lpc32xx_nand_device_ready
;
876 chip
->legacy
.chip_delay
= 20; /* 20us command delay time */
878 /* Init NAND controller */
879 lpc32xx_nand_setup(host
);
881 platform_set_drvdata(pdev
, host
);
883 /* NAND callbacks for LPC32xx SLC hardware */
884 chip
->ecc
.mode
= NAND_ECC_HW_SYNDROME
;
885 chip
->legacy
.read_byte
= lpc32xx_nand_read_byte
;
886 chip
->legacy
.read_buf
= lpc32xx_nand_read_buf
;
887 chip
->legacy
.write_buf
= lpc32xx_nand_write_buf
;
888 chip
->ecc
.read_page_raw
= lpc32xx_nand_read_page_raw_syndrome
;
889 chip
->ecc
.read_page
= lpc32xx_nand_read_page_syndrome
;
890 chip
->ecc
.write_page_raw
= lpc32xx_nand_write_page_raw_syndrome
;
891 chip
->ecc
.write_page
= lpc32xx_nand_write_page_syndrome
;
892 chip
->ecc
.write_oob
= lpc32xx_nand_write_oob_syndrome
;
893 chip
->ecc
.read_oob
= lpc32xx_nand_read_oob_syndrome
;
894 chip
->ecc
.calculate
= lpc32xx_nand_ecc_calculate
;
895 chip
->ecc
.correct
= nand_correct_data
;
896 chip
->ecc
.strength
= 1;
897 chip
->ecc
.hwctl
= lpc32xx_nand_ecc_enable
;
900 * Allocate a large enough buffer for a single huge page plus
901 * extra space for the spare area and ECC storage area
903 host
->dma_buf_len
= LPC32XX_DMA_DATA_SIZE
+ LPC32XX_ECC_SAVE_SIZE
;
904 host
->data_buf
= devm_kzalloc(&pdev
->dev
, host
->dma_buf_len
,
906 if (host
->data_buf
== NULL
) {
911 res
= lpc32xx_nand_dma_setup(host
);
917 /* Find NAND device */
918 chip
->legacy
.dummy_controller
.ops
= &lpc32xx_nand_controller_ops
;
919 res
= nand_scan(chip
, 1);
923 mtd
->name
= "nxp_lpc3220_slc";
924 res
= mtd_device_register(mtd
, host
->ncfg
->parts
,
925 host
->ncfg
->num_parts
);
934 dma_release_channel(host
->dma_chan
);
936 clk_disable_unprepare(host
->clk
);
938 lpc32xx_wp_enable(host
);
944 * Remove NAND device.
946 static int lpc32xx_nand_remove(struct platform_device
*pdev
)
949 struct lpc32xx_nand_host
*host
= platform_get_drvdata(pdev
);
951 nand_release(&host
->nand_chip
);
952 dma_release_channel(host
->dma_chan
);
955 tmp
= readl(SLC_CTRL(host
->io_base
));
956 tmp
&= ~SLCCFG_CE_LOW
;
957 writel(tmp
, SLC_CTRL(host
->io_base
));
959 clk_disable_unprepare(host
->clk
);
960 lpc32xx_wp_enable(host
);
966 static int lpc32xx_nand_resume(struct platform_device
*pdev
)
968 struct lpc32xx_nand_host
*host
= platform_get_drvdata(pdev
);
971 /* Re-enable NAND clock */
972 ret
= clk_prepare_enable(host
->clk
);
976 /* Fresh init of NAND controller */
977 lpc32xx_nand_setup(host
);
979 /* Disable write protect */
980 lpc32xx_wp_disable(host
);
985 static int lpc32xx_nand_suspend(struct platform_device
*pdev
, pm_message_t pm
)
988 struct lpc32xx_nand_host
*host
= platform_get_drvdata(pdev
);
991 tmp
= readl(SLC_CTRL(host
->io_base
));
992 tmp
&= ~SLCCFG_CE_LOW
;
993 writel(tmp
, SLC_CTRL(host
->io_base
));
995 /* Enable write protect for safety */
996 lpc32xx_wp_enable(host
);
999 clk_disable_unprepare(host
->clk
);
1005 #define lpc32xx_nand_resume NULL
1006 #define lpc32xx_nand_suspend NULL
1009 static const struct of_device_id lpc32xx_nand_match
[] = {
1010 { .compatible
= "nxp,lpc3220-slc" },
1013 MODULE_DEVICE_TABLE(of
, lpc32xx_nand_match
);
1015 static struct platform_driver lpc32xx_nand_driver
= {
1016 .probe
= lpc32xx_nand_probe
,
1017 .remove
= lpc32xx_nand_remove
,
1018 .resume
= lpc32xx_nand_resume
,
1019 .suspend
= lpc32xx_nand_suspend
,
1021 .name
= LPC32XX_MODNAME
,
1022 .of_match_table
= lpc32xx_nand_match
,
1026 module_platform_driver(lpc32xx_nand_driver
);
1028 MODULE_LICENSE("GPL");
1029 MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1030 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1031 MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");