1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NXP LPC32XX NAND SLC driver
6 * Kevin Wells <kevin.wells@nxp.com>
7 * Roland Stigge <stigge@antcom.de>
9 * Copyright © 2011 NXP Semiconductors
10 * Copyright © 2012 Roland Stigge
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/mtd/partitions.h>
19 #include <linux/clk.h>
20 #include <linux/err.h>
21 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmaengine.h>
26 #include <linux/gpio.h>
28 #include <linux/of_gpio.h>
29 #include <linux/mtd/lpc32xx_slc.h>
31 #define LPC32XX_MODNAME "lpc32xx-nand"
33 /**********************************************************************
34 * SLC NAND controller register offsets
35 **********************************************************************/
37 #define SLC_DATA(x) (x + 0x000)
38 #define SLC_ADDR(x) (x + 0x004)
39 #define SLC_CMD(x) (x + 0x008)
40 #define SLC_STOP(x) (x + 0x00C)
41 #define SLC_CTRL(x) (x + 0x010)
42 #define SLC_CFG(x) (x + 0x014)
43 #define SLC_STAT(x) (x + 0x018)
44 #define SLC_INT_STAT(x) (x + 0x01C)
45 #define SLC_IEN(x) (x + 0x020)
46 #define SLC_ISR(x) (x + 0x024)
47 #define SLC_ICR(x) (x + 0x028)
48 #define SLC_TAC(x) (x + 0x02C)
49 #define SLC_TC(x) (x + 0x030)
50 #define SLC_ECC(x) (x + 0x034)
51 #define SLC_DMA_DATA(x) (x + 0x038)
53 /**********************************************************************
54 * slc_ctrl register definitions
55 **********************************************************************/
56 #define SLCCTRL_SW_RESET (1 << 2) /* Reset the NAND controller bit */
57 #define SLCCTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */
58 #define SLCCTRL_DMA_START (1 << 0) /* Start DMA channel bit */
60 /**********************************************************************
61 * slc_cfg register definitions
62 **********************************************************************/
63 #define SLCCFG_CE_LOW (1 << 5) /* Force CE low bit */
64 #define SLCCFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */
65 #define SLCCFG_ECC_EN (1 << 3) /* ECC enable bit */
66 #define SLCCFG_DMA_BURST (1 << 2) /* DMA burst bit */
67 #define SLCCFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */
68 #define SLCCFG_WIDTH (1 << 0) /* External device width, 0=8bit */
70 /**********************************************************************
71 * slc_stat register definitions
72 **********************************************************************/
73 #define SLCSTAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */
74 #define SLCSTAT_SLC_FIFO (1 << 1) /* SLC FIFO has data bit */
75 #define SLCSTAT_NAND_READY (1 << 0) /* NAND device is ready bit */
77 /**********************************************************************
78 * slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
79 **********************************************************************/
80 #define SLCSTAT_INT_TC (1 << 1) /* Transfer count bit */
81 #define SLCSTAT_INT_RDY_EN (1 << 0) /* Ready interrupt bit */
83 /**********************************************************************
84 * slc_tac register definitions
85 **********************************************************************/
86 /* Computation of clock cycles on basis of controller and device clock rates */
87 #define SLCTAC_CLOCKS(c, n, s) (min_t(u32, DIV_ROUND_UP(c, n) - 1, 0xF) << s)
89 /* Clock setting for RDY write sample wait time in 2*n clocks */
90 #define SLCTAC_WDR(n) (((n) & 0xF) << 28)
91 /* Write pulse width in clock cycles, 1 to 16 clocks */
92 #define SLCTAC_WWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 24))
93 /* Write hold time of control and data signals, 1 to 16 clocks */
94 #define SLCTAC_WHOLD(c, n) (SLCTAC_CLOCKS(c, n, 20))
95 /* Write setup time of control and data signals, 1 to 16 clocks */
96 #define SLCTAC_WSETUP(c, n) (SLCTAC_CLOCKS(c, n, 16))
97 /* Clock setting for RDY read sample wait time in 2*n clocks */
98 #define SLCTAC_RDR(n) (((n) & 0xF) << 12)
99 /* Read pulse width in clock cycles, 1 to 16 clocks */
100 #define SLCTAC_RWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 8))
101 /* Read hold time of control and data signals, 1 to 16 clocks */
102 #define SLCTAC_RHOLD(c, n) (SLCTAC_CLOCKS(c, n, 4))
103 /* Read setup time of control and data signals, 1 to 16 clocks */
104 #define SLCTAC_RSETUP(c, n) (SLCTAC_CLOCKS(c, n, 0))
106 /**********************************************************************
107 * slc_ecc register definitions
108 **********************************************************************/
109 /* ECC line party fetch macro */
110 #define SLCECC_TO_LINEPAR(n) (((n) >> 6) & 0x7FFF)
111 #define SLCECC_TO_COLPAR(n) ((n) & 0x3F)
114 * DMA requires storage space for the DMA local buffer and the hardware ECC
115 * storage area. The DMA local buffer is only used if DMA mapping fails
118 #define LPC32XX_DMA_DATA_SIZE 4096
119 #define LPC32XX_ECC_SAVE_SIZE ((4096 / 256) * 4)
121 /* Number of bytes used for ECC stored in NAND per 256 bytes */
122 #define LPC32XX_SLC_DEV_ECC_BYTES 3
125 * If the NAND base clock frequency can't be fetched, this frequency will be
126 * used instead as the base. This rate is used to setup the timing registers
127 * used for NAND accesses.
129 #define LPC32XX_DEF_BUS_RATE 133250000
131 /* Milliseconds for DMA FIFO timeout (unlikely anyway) */
132 #define LPC32XX_DMA_TIMEOUT 100
135 * NAND ECC Layout for small page NAND devices
136 * Note: For large and huge page devices, the default layouts are used
138 static int lpc32xx_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
139 struct mtd_oob_region
*oobregion
)
144 oobregion
->length
= 6;
145 oobregion
->offset
= 10;
150 static int lpc32xx_ooblayout_free(struct mtd_info
*mtd
, int section
,
151 struct mtd_oob_region
*oobregion
)
157 oobregion
->offset
= 0;
158 oobregion
->length
= 4;
160 oobregion
->offset
= 6;
161 oobregion
->length
= 4;
167 static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops
= {
168 .ecc
= lpc32xx_ooblayout_ecc
,
169 .free
= lpc32xx_ooblayout_free
,
172 static u8 bbt_pattern
[] = {'B', 'b', 't', '0' };
173 static u8 mirror_pattern
[] = {'1', 't', 'b', 'B' };
176 * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
177 * Note: Large page devices used the default layout
179 static struct nand_bbt_descr bbt_smallpage_main_descr
= {
180 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
181 | NAND_BBT_2BIT
| NAND_BBT_VERSION
| NAND_BBT_PERCHIP
,
186 .pattern
= bbt_pattern
189 static struct nand_bbt_descr bbt_smallpage_mirror_descr
= {
190 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
191 | NAND_BBT_2BIT
| NAND_BBT_VERSION
| NAND_BBT_PERCHIP
,
196 .pattern
= mirror_pattern
200 * NAND platform configuration structure
202 struct lpc32xx_nand_cfg_slc
{
212 struct mtd_partition
*parts
;
216 struct lpc32xx_nand_host
{
217 struct nand_chip nand_chip
;
218 struct lpc32xx_slc_platform_data
*pdata
;
220 void __iomem
*io_base
;
221 struct lpc32xx_nand_cfg_slc
*ncfg
;
223 struct completion comp
;
224 struct dma_chan
*dma_chan
;
225 uint32_t dma_buf_len
;
226 struct dma_slave_config dma_slave_config
;
227 struct scatterlist sgl
;
230 * DMA and CPU addresses of ECC work area and data buffer
234 dma_addr_t io_base_dma
;
237 static void lpc32xx_nand_setup(struct lpc32xx_nand_host
*host
)
239 uint32_t clkrate
, tmp
;
241 /* Reset SLC controller */
242 writel(SLCCTRL_SW_RESET
, SLC_CTRL(host
->io_base
));
246 writel(0, SLC_CFG(host
->io_base
));
247 writel(0, SLC_IEN(host
->io_base
));
248 writel((SLCSTAT_INT_TC
| SLCSTAT_INT_RDY_EN
),
249 SLC_ICR(host
->io_base
));
251 /* Get base clock for SLC block */
252 clkrate
= clk_get_rate(host
->clk
);
254 clkrate
= LPC32XX_DEF_BUS_RATE
;
256 /* Compute clock setup values */
257 tmp
= SLCTAC_WDR(host
->ncfg
->wdr_clks
) |
258 SLCTAC_WWIDTH(clkrate
, host
->ncfg
->wwidth
) |
259 SLCTAC_WHOLD(clkrate
, host
->ncfg
->whold
) |
260 SLCTAC_WSETUP(clkrate
, host
->ncfg
->wsetup
) |
261 SLCTAC_RDR(host
->ncfg
->rdr_clks
) |
262 SLCTAC_RWIDTH(clkrate
, host
->ncfg
->rwidth
) |
263 SLCTAC_RHOLD(clkrate
, host
->ncfg
->rhold
) |
264 SLCTAC_RSETUP(clkrate
, host
->ncfg
->rsetup
);
265 writel(tmp
, SLC_TAC(host
->io_base
));
269 * Hardware specific access to control lines
271 static void lpc32xx_nand_cmd_ctrl(struct nand_chip
*chip
, int cmd
,
275 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
277 /* Does CE state need to be changed? */
278 tmp
= readl(SLC_CFG(host
->io_base
));
280 tmp
|= SLCCFG_CE_LOW
;
282 tmp
&= ~SLCCFG_CE_LOW
;
283 writel(tmp
, SLC_CFG(host
->io_base
));
285 if (cmd
!= NAND_CMD_NONE
) {
287 writel(cmd
, SLC_CMD(host
->io_base
));
289 writel(cmd
, SLC_ADDR(host
->io_base
));
294 * Read the Device Ready pin
296 static int lpc32xx_nand_device_ready(struct nand_chip
*chip
)
298 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
301 if ((readl(SLC_STAT(host
->io_base
)) & SLCSTAT_NAND_READY
) != 0)
308 * Enable NAND write protect
310 static void lpc32xx_wp_enable(struct lpc32xx_nand_host
*host
)
312 if (gpio_is_valid(host
->ncfg
->wp_gpio
))
313 gpio_set_value(host
->ncfg
->wp_gpio
, 0);
317 * Disable NAND write protect
319 static void lpc32xx_wp_disable(struct lpc32xx_nand_host
*host
)
321 if (gpio_is_valid(host
->ncfg
->wp_gpio
))
322 gpio_set_value(host
->ncfg
->wp_gpio
, 1);
326 * Prepares SLC for transfers with H/W ECC enabled
328 static void lpc32xx_nand_ecc_enable(struct nand_chip
*chip
, int mode
)
330 /* Hardware ECC is enabled automatically in hardware as needed */
334 * Calculates the ECC for the data
336 static int lpc32xx_nand_ecc_calculate(struct nand_chip
*chip
,
337 const unsigned char *buf
,
341 * ECC is calculated automatically in hardware during syndrome read
342 * and write operations, so it doesn't need to be calculated here.
348 * Read a single byte from NAND device
350 static uint8_t lpc32xx_nand_read_byte(struct nand_chip
*chip
)
352 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
354 return (uint8_t)readl(SLC_DATA(host
->io_base
));
358 * Simple device read without ECC
360 static void lpc32xx_nand_read_buf(struct nand_chip
*chip
, u_char
*buf
, int len
)
362 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
364 /* Direct device read with no ECC */
366 *buf
++ = (uint8_t)readl(SLC_DATA(host
->io_base
));
370 * Simple device write without ECC
372 static void lpc32xx_nand_write_buf(struct nand_chip
*chip
, const uint8_t *buf
,
375 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
377 /* Direct device write with no ECC */
379 writel((uint32_t)*buf
++, SLC_DATA(host
->io_base
));
383 * Read the OOB data from the device without ECC using FIFO method
385 static int lpc32xx_nand_read_oob_syndrome(struct nand_chip
*chip
, int page
)
387 struct mtd_info
*mtd
= nand_to_mtd(chip
);
389 return nand_read_oob_op(chip
, page
, 0, chip
->oob_poi
, mtd
->oobsize
);
393 * Write the OOB data to the device without ECC using FIFO method
395 static int lpc32xx_nand_write_oob_syndrome(struct nand_chip
*chip
, int page
)
397 struct mtd_info
*mtd
= nand_to_mtd(chip
);
399 return nand_prog_page_op(chip
, page
, mtd
->writesize
, chip
->oob_poi
,
404 * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
406 static void lpc32xx_slc_ecc_copy(uint8_t *spare
, const uint32_t *ecc
, int count
)
410 for (i
= 0; i
< (count
* 3); i
+= 3) {
411 uint32_t ce
= ecc
[i
/ 3];
412 ce
= ~(ce
<< 2) & 0xFFFFFF;
413 spare
[i
+ 2] = (uint8_t)(ce
& 0xFF);
415 spare
[i
+ 1] = (uint8_t)(ce
& 0xFF);
417 spare
[i
] = (uint8_t)(ce
& 0xFF);
421 static void lpc32xx_dma_complete_func(void *completion
)
423 complete(completion
);
426 static int lpc32xx_xmit_dma(struct mtd_info
*mtd
, dma_addr_t dma
,
427 void *mem
, int len
, enum dma_transfer_direction dir
)
429 struct nand_chip
*chip
= mtd_to_nand(mtd
);
430 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
431 struct dma_async_tx_descriptor
*desc
;
432 int flags
= DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
;
435 host
->dma_slave_config
.direction
= dir
;
436 host
->dma_slave_config
.src_addr
= dma
;
437 host
->dma_slave_config
.dst_addr
= dma
;
438 host
->dma_slave_config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
439 host
->dma_slave_config
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
440 host
->dma_slave_config
.src_maxburst
= 4;
441 host
->dma_slave_config
.dst_maxburst
= 4;
442 /* DMA controller does flow control: */
443 host
->dma_slave_config
.device_fc
= false;
444 if (dmaengine_slave_config(host
->dma_chan
, &host
->dma_slave_config
)) {
445 dev_err(mtd
->dev
.parent
, "Failed to setup DMA slave\n");
449 sg_init_one(&host
->sgl
, mem
, len
);
451 res
= dma_map_sg(host
->dma_chan
->device
->dev
, &host
->sgl
, 1,
454 dev_err(mtd
->dev
.parent
, "Failed to map sg list\n");
457 desc
= dmaengine_prep_slave_sg(host
->dma_chan
, &host
->sgl
, 1, dir
,
460 dev_err(mtd
->dev
.parent
, "Failed to prepare slave sg\n");
464 init_completion(&host
->comp
);
465 desc
->callback
= lpc32xx_dma_complete_func
;
466 desc
->callback_param
= &host
->comp
;
468 dmaengine_submit(desc
);
469 dma_async_issue_pending(host
->dma_chan
);
471 wait_for_completion_timeout(&host
->comp
, msecs_to_jiffies(1000));
473 dma_unmap_sg(host
->dma_chan
->device
->dev
, &host
->sgl
, 1,
478 dma_unmap_sg(host
->dma_chan
->device
->dev
, &host
->sgl
, 1,
484 * DMA read/write transfers with ECC support
486 static int lpc32xx_xfer(struct mtd_info
*mtd
, uint8_t *buf
, int eccsubpages
,
489 struct nand_chip
*chip
= mtd_to_nand(mtd
);
490 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
492 unsigned long timeout
;
494 enum dma_transfer_direction dir
=
495 read
? DMA_DEV_TO_MEM
: DMA_MEM_TO_DEV
;
499 if ((void *)buf
<= high_memory
) {
503 dma_buf
= host
->data_buf
;
506 memcpy(host
->data_buf
, buf
, mtd
->writesize
);
510 writel(readl(SLC_CFG(host
->io_base
)) |
511 SLCCFG_DMA_DIR
| SLCCFG_ECC_EN
| SLCCFG_DMA_ECC
|
512 SLCCFG_DMA_BURST
, SLC_CFG(host
->io_base
));
514 writel((readl(SLC_CFG(host
->io_base
)) |
515 SLCCFG_ECC_EN
| SLCCFG_DMA_ECC
| SLCCFG_DMA_BURST
) &
517 SLC_CFG(host
->io_base
));
520 /* Clear initial ECC */
521 writel(SLCCTRL_ECC_CLEAR
, SLC_CTRL(host
->io_base
));
523 /* Transfer size is data area only */
524 writel(mtd
->writesize
, SLC_TC(host
->io_base
));
526 /* Start transfer in the NAND controller */
527 writel(readl(SLC_CTRL(host
->io_base
)) | SLCCTRL_DMA_START
,
528 SLC_CTRL(host
->io_base
));
530 for (i
= 0; i
< chip
->ecc
.steps
; i
++) {
532 res
= lpc32xx_xmit_dma(mtd
, SLC_DMA_DATA(host
->io_base_dma
),
533 dma_buf
+ i
* chip
->ecc
.size
,
534 mtd
->writesize
/ chip
->ecc
.steps
, dir
);
538 /* Always _read_ ECC */
539 if (i
== chip
->ecc
.steps
- 1)
541 if (!read
) /* ECC availability delayed on write */
543 res
= lpc32xx_xmit_dma(mtd
, SLC_ECC(host
->io_base_dma
),
544 &host
->ecc_buf
[i
], 4, DMA_DEV_TO_MEM
);
550 * According to NXP, the DMA can be finished here, but the NAND
551 * controller may still have buffered data. After porting to using the
552 * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
553 * appears to be always true, according to tests. Keeping the check for
554 * safety reasons for now.
556 if (readl(SLC_STAT(host
->io_base
)) & SLCSTAT_DMA_FIFO
) {
557 dev_warn(mtd
->dev
.parent
, "FIFO not empty!\n");
558 timeout
= jiffies
+ msecs_to_jiffies(LPC32XX_DMA_TIMEOUT
);
559 while ((readl(SLC_STAT(host
->io_base
)) & SLCSTAT_DMA_FIFO
) &&
560 time_before(jiffies
, timeout
))
562 if (!time_before(jiffies
, timeout
)) {
563 dev_err(mtd
->dev
.parent
, "FIFO held data too long\n");
568 /* Read last calculated ECC value */
571 host
->ecc_buf
[chip
->ecc
.steps
- 1] =
572 readl(SLC_ECC(host
->io_base
));
575 dmaengine_terminate_all(host
->dma_chan
);
577 if (readl(SLC_STAT(host
->io_base
)) & SLCSTAT_DMA_FIFO
||
578 readl(SLC_TC(host
->io_base
))) {
579 /* Something is left in the FIFO, something is wrong */
580 dev_err(mtd
->dev
.parent
, "DMA FIFO failure\n");
584 /* Stop DMA & HW ECC */
585 writel(readl(SLC_CTRL(host
->io_base
)) & ~SLCCTRL_DMA_START
,
586 SLC_CTRL(host
->io_base
));
587 writel(readl(SLC_CFG(host
->io_base
)) &
588 ~(SLCCFG_DMA_DIR
| SLCCFG_ECC_EN
| SLCCFG_DMA_ECC
|
589 SLCCFG_DMA_BURST
), SLC_CFG(host
->io_base
));
591 if (!dma_mapped
&& read
)
592 memcpy(buf
, host
->data_buf
, mtd
->writesize
);
598 * Read the data and OOB data from the device, use ECC correction with the
599 * data, disable ECC for the OOB data
601 static int lpc32xx_nand_read_page_syndrome(struct nand_chip
*chip
, uint8_t *buf
,
602 int oob_required
, int page
)
604 struct mtd_info
*mtd
= nand_to_mtd(chip
);
605 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
606 struct mtd_oob_region oobregion
= { };
607 int stat
, i
, status
, error
;
608 uint8_t *oobecc
, tmpecc
[LPC32XX_ECC_SAVE_SIZE
];
610 /* Issue read command */
611 nand_read_page_op(chip
, page
, 0, NULL
, 0);
613 /* Read data and oob, calculate ECC */
614 status
= lpc32xx_xfer(mtd
, buf
, chip
->ecc
.steps
, 1);
617 chip
->legacy
.read_buf(chip
, chip
->oob_poi
, mtd
->oobsize
);
619 /* Convert to stored ECC format */
620 lpc32xx_slc_ecc_copy(tmpecc
, (uint32_t *) host
->ecc_buf
, chip
->ecc
.steps
);
622 /* Pointer to ECC data retrieved from NAND spare area */
623 error
= mtd_ooblayout_ecc(mtd
, 0, &oobregion
);
627 oobecc
= chip
->oob_poi
+ oobregion
.offset
;
629 for (i
= 0; i
< chip
->ecc
.steps
; i
++) {
630 stat
= chip
->ecc
.correct(chip
, buf
, oobecc
,
631 &tmpecc
[i
* chip
->ecc
.bytes
]);
633 mtd
->ecc_stats
.failed
++;
635 mtd
->ecc_stats
.corrected
+= stat
;
637 buf
+= chip
->ecc
.size
;
638 oobecc
+= chip
->ecc
.bytes
;
645 * Read the data and OOB data from the device, no ECC correction with the
648 static int lpc32xx_nand_read_page_raw_syndrome(struct nand_chip
*chip
,
649 uint8_t *buf
, int oob_required
,
652 struct mtd_info
*mtd
= nand_to_mtd(chip
);
654 /* Issue read command */
655 nand_read_page_op(chip
, page
, 0, NULL
, 0);
657 /* Raw reads can just use the FIFO interface */
658 chip
->legacy
.read_buf(chip
, buf
, chip
->ecc
.size
* chip
->ecc
.steps
);
659 chip
->legacy
.read_buf(chip
, chip
->oob_poi
, mtd
->oobsize
);
665 * Write the data and OOB data to the device, use ECC with the data,
666 * disable ECC for the OOB data
668 static int lpc32xx_nand_write_page_syndrome(struct nand_chip
*chip
,
670 int oob_required
, int page
)
672 struct mtd_info
*mtd
= nand_to_mtd(chip
);
673 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
674 struct mtd_oob_region oobregion
= { };
678 nand_prog_page_begin_op(chip
, page
, 0, NULL
, 0);
680 /* Write data, calculate ECC on outbound data */
681 error
= lpc32xx_xfer(mtd
, (uint8_t *)buf
, chip
->ecc
.steps
, 0);
686 * The calculated ECC needs some manual work done to it before
687 * committing it to NAND. Process the calculated ECC and place
688 * the resultant values directly into the OOB buffer. */
689 error
= mtd_ooblayout_ecc(mtd
, 0, &oobregion
);
693 pb
= chip
->oob_poi
+ oobregion
.offset
;
694 lpc32xx_slc_ecc_copy(pb
, (uint32_t *)host
->ecc_buf
, chip
->ecc
.steps
);
696 /* Write ECC data to device */
697 chip
->legacy
.write_buf(chip
, chip
->oob_poi
, mtd
->oobsize
);
699 return nand_prog_page_end_op(chip
);
703 * Write the data and OOB data to the device, no ECC correction with the
706 static int lpc32xx_nand_write_page_raw_syndrome(struct nand_chip
*chip
,
708 int oob_required
, int page
)
710 struct mtd_info
*mtd
= nand_to_mtd(chip
);
712 /* Raw writes can just use the FIFO interface */
713 nand_prog_page_begin_op(chip
, page
, 0, buf
,
714 chip
->ecc
.size
* chip
->ecc
.steps
);
715 chip
->legacy
.write_buf(chip
, chip
->oob_poi
, mtd
->oobsize
);
717 return nand_prog_page_end_op(chip
);
720 static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host
*host
)
722 struct mtd_info
*mtd
= nand_to_mtd(&host
->nand_chip
);
725 if (!host
->pdata
|| !host
->pdata
->dma_filter
) {
726 dev_err(mtd
->dev
.parent
, "no DMA platform data\n");
731 dma_cap_set(DMA_SLAVE
, mask
);
732 host
->dma_chan
= dma_request_channel(mask
, host
->pdata
->dma_filter
,
734 if (!host
->dma_chan
) {
735 dev_err(mtd
->dev
.parent
, "Failed to request DMA channel\n");
742 static struct lpc32xx_nand_cfg_slc
*lpc32xx_parse_dt(struct device
*dev
)
744 struct lpc32xx_nand_cfg_slc
*ncfg
;
745 struct device_node
*np
= dev
->of_node
;
747 ncfg
= devm_kzalloc(dev
, sizeof(*ncfg
), GFP_KERNEL
);
751 of_property_read_u32(np
, "nxp,wdr-clks", &ncfg
->wdr_clks
);
752 of_property_read_u32(np
, "nxp,wwidth", &ncfg
->wwidth
);
753 of_property_read_u32(np
, "nxp,whold", &ncfg
->whold
);
754 of_property_read_u32(np
, "nxp,wsetup", &ncfg
->wsetup
);
755 of_property_read_u32(np
, "nxp,rdr-clks", &ncfg
->rdr_clks
);
756 of_property_read_u32(np
, "nxp,rwidth", &ncfg
->rwidth
);
757 of_property_read_u32(np
, "nxp,rhold", &ncfg
->rhold
);
758 of_property_read_u32(np
, "nxp,rsetup", &ncfg
->rsetup
);
760 if (!ncfg
->wdr_clks
|| !ncfg
->wwidth
|| !ncfg
->whold
||
761 !ncfg
->wsetup
|| !ncfg
->rdr_clks
|| !ncfg
->rwidth
||
762 !ncfg
->rhold
|| !ncfg
->rsetup
) {
763 dev_err(dev
, "chip parameters not specified correctly\n");
767 ncfg
->wp_gpio
= of_get_named_gpio(np
, "gpios", 0);
772 static int lpc32xx_nand_attach_chip(struct nand_chip
*chip
)
774 struct mtd_info
*mtd
= nand_to_mtd(chip
);
775 struct lpc32xx_nand_host
*host
= nand_get_controller_data(chip
);
777 if (chip
->ecc
.engine_type
!= NAND_ECC_ENGINE_TYPE_ON_HOST
)
780 /* OOB and ECC CPU and DMA work areas */
781 host
->ecc_buf
= (uint32_t *)(host
->data_buf
+ LPC32XX_DMA_DATA_SIZE
);
784 * Small page FLASH has a unique OOB layout, but large and huge
785 * page FLASH use the standard layout. Small page FLASH uses a
786 * custom BBT marker layout.
788 if (mtd
->writesize
<= 512)
789 mtd_set_ooblayout(mtd
, &lpc32xx_ooblayout_ops
);
791 chip
->ecc
.placement
= NAND_ECC_PLACEMENT_INTERLEAVED
;
792 /* These sizes remain the same regardless of page size */
793 chip
->ecc
.size
= 256;
794 chip
->ecc
.strength
= 1;
795 chip
->ecc
.bytes
= LPC32XX_SLC_DEV_ECC_BYTES
;
796 chip
->ecc
.prepad
= 0;
797 chip
->ecc
.postpad
= 0;
798 chip
->ecc
.read_page_raw
= lpc32xx_nand_read_page_raw_syndrome
;
799 chip
->ecc
.read_page
= lpc32xx_nand_read_page_syndrome
;
800 chip
->ecc
.write_page_raw
= lpc32xx_nand_write_page_raw_syndrome
;
801 chip
->ecc
.write_page
= lpc32xx_nand_write_page_syndrome
;
802 chip
->ecc
.write_oob
= lpc32xx_nand_write_oob_syndrome
;
803 chip
->ecc
.read_oob
= lpc32xx_nand_read_oob_syndrome
;
804 chip
->ecc
.calculate
= lpc32xx_nand_ecc_calculate
;
805 chip
->ecc
.correct
= rawnand_sw_hamming_correct
;
806 chip
->ecc
.hwctl
= lpc32xx_nand_ecc_enable
;
809 * Use a custom BBT marker setup for small page FLASH that
810 * won't interfere with the ECC layout. Large and huge page
811 * FLASH use the standard layout.
813 if ((chip
->bbt_options
& NAND_BBT_USE_FLASH
) &&
814 mtd
->writesize
<= 512) {
815 chip
->bbt_td
= &bbt_smallpage_main_descr
;
816 chip
->bbt_md
= &bbt_smallpage_mirror_descr
;
822 static const struct nand_controller_ops lpc32xx_nand_controller_ops
= {
823 .attach_chip
= lpc32xx_nand_attach_chip
,
827 * Probe for NAND controller
829 static int lpc32xx_nand_probe(struct platform_device
*pdev
)
831 struct lpc32xx_nand_host
*host
;
832 struct mtd_info
*mtd
;
833 struct nand_chip
*chip
;
837 /* Allocate memory for the device structure (and zero it) */
838 host
= devm_kzalloc(&pdev
->dev
, sizeof(*host
), GFP_KERNEL
);
842 rc
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
843 host
->io_base
= devm_ioremap_resource(&pdev
->dev
, rc
);
844 if (IS_ERR(host
->io_base
))
845 return PTR_ERR(host
->io_base
);
847 host
->io_base_dma
= rc
->start
;
848 if (pdev
->dev
.of_node
)
849 host
->ncfg
= lpc32xx_parse_dt(&pdev
->dev
);
852 "Missing or bad NAND config from device tree\n");
855 if (host
->ncfg
->wp_gpio
== -EPROBE_DEFER
)
856 return -EPROBE_DEFER
;
857 if (gpio_is_valid(host
->ncfg
->wp_gpio
) && devm_gpio_request(&pdev
->dev
,
858 host
->ncfg
->wp_gpio
, "NAND WP")) {
859 dev_err(&pdev
->dev
, "GPIO not available\n");
862 lpc32xx_wp_disable(host
);
864 host
->pdata
= dev_get_platdata(&pdev
->dev
);
866 chip
= &host
->nand_chip
;
867 mtd
= nand_to_mtd(chip
);
868 nand_set_controller_data(chip
, host
);
869 nand_set_flash_node(chip
, pdev
->dev
.of_node
);
870 mtd
->owner
= THIS_MODULE
;
871 mtd
->dev
.parent
= &pdev
->dev
;
874 host
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
875 if (IS_ERR(host
->clk
)) {
876 dev_err(&pdev
->dev
, "Clock failure\n");
880 res
= clk_prepare_enable(host
->clk
);
884 /* Set NAND IO addresses and command/ready functions */
885 chip
->legacy
.IO_ADDR_R
= SLC_DATA(host
->io_base
);
886 chip
->legacy
.IO_ADDR_W
= SLC_DATA(host
->io_base
);
887 chip
->legacy
.cmd_ctrl
= lpc32xx_nand_cmd_ctrl
;
888 chip
->legacy
.dev_ready
= lpc32xx_nand_device_ready
;
889 chip
->legacy
.chip_delay
= 20; /* 20us command delay time */
891 /* Init NAND controller */
892 lpc32xx_nand_setup(host
);
894 platform_set_drvdata(pdev
, host
);
896 /* NAND callbacks for LPC32xx SLC hardware */
897 chip
->legacy
.read_byte
= lpc32xx_nand_read_byte
;
898 chip
->legacy
.read_buf
= lpc32xx_nand_read_buf
;
899 chip
->legacy
.write_buf
= lpc32xx_nand_write_buf
;
902 * Allocate a large enough buffer for a single huge page plus
903 * extra space for the spare area and ECC storage area
905 host
->dma_buf_len
= LPC32XX_DMA_DATA_SIZE
+ LPC32XX_ECC_SAVE_SIZE
;
906 host
->data_buf
= devm_kzalloc(&pdev
->dev
, host
->dma_buf_len
,
908 if (host
->data_buf
== NULL
) {
913 res
= lpc32xx_nand_dma_setup(host
);
919 /* Find NAND device */
920 chip
->legacy
.dummy_controller
.ops
= &lpc32xx_nand_controller_ops
;
921 res
= nand_scan(chip
, 1);
925 mtd
->name
= "nxp_lpc3220_slc";
926 res
= mtd_device_register(mtd
, host
->ncfg
->parts
,
927 host
->ncfg
->num_parts
);
936 dma_release_channel(host
->dma_chan
);
938 clk_disable_unprepare(host
->clk
);
940 lpc32xx_wp_enable(host
);
946 * Remove NAND device.
948 static int lpc32xx_nand_remove(struct platform_device
*pdev
)
951 struct lpc32xx_nand_host
*host
= platform_get_drvdata(pdev
);
952 struct nand_chip
*chip
= &host
->nand_chip
;
955 ret
= mtd_device_unregister(nand_to_mtd(chip
));
958 dma_release_channel(host
->dma_chan
);
961 tmp
= readl(SLC_CTRL(host
->io_base
));
962 tmp
&= ~SLCCFG_CE_LOW
;
963 writel(tmp
, SLC_CTRL(host
->io_base
));
965 clk_disable_unprepare(host
->clk
);
966 lpc32xx_wp_enable(host
);
972 static int lpc32xx_nand_resume(struct platform_device
*pdev
)
974 struct lpc32xx_nand_host
*host
= platform_get_drvdata(pdev
);
977 /* Re-enable NAND clock */
978 ret
= clk_prepare_enable(host
->clk
);
982 /* Fresh init of NAND controller */
983 lpc32xx_nand_setup(host
);
985 /* Disable write protect */
986 lpc32xx_wp_disable(host
);
991 static int lpc32xx_nand_suspend(struct platform_device
*pdev
, pm_message_t pm
)
994 struct lpc32xx_nand_host
*host
= platform_get_drvdata(pdev
);
997 tmp
= readl(SLC_CTRL(host
->io_base
));
998 tmp
&= ~SLCCFG_CE_LOW
;
999 writel(tmp
, SLC_CTRL(host
->io_base
));
1001 /* Enable write protect for safety */
1002 lpc32xx_wp_enable(host
);
1005 clk_disable_unprepare(host
->clk
);
1011 #define lpc32xx_nand_resume NULL
1012 #define lpc32xx_nand_suspend NULL
1015 static const struct of_device_id lpc32xx_nand_match
[] = {
1016 { .compatible
= "nxp,lpc3220-slc" },
1019 MODULE_DEVICE_TABLE(of
, lpc32xx_nand_match
);
1021 static struct platform_driver lpc32xx_nand_driver
= {
1022 .probe
= lpc32xx_nand_probe
,
1023 .remove
= lpc32xx_nand_remove
,
1024 .resume
= lpc32xx_nand_resume
,
1025 .suspend
= lpc32xx_nand_suspend
,
1027 .name
= LPC32XX_MODNAME
,
1028 .of_match_table
= lpc32xx_nand_match
,
1032 module_platform_driver(lpc32xx_nand_driver
);
1034 MODULE_LICENSE("GPL");
1035 MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1036 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1037 MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");