2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand.h>
23 #include <linux/mtd/partitions.h>
25 #include <linux/irq.h>
26 #include <linux/slab.h>
28 #include <linux/of_device.h>
29 #include <linux/of_mtd.h>
31 #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
39 #include <linux/platform_data/mtd-nand-pxa3xx.h>
41 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
42 #define NAND_STOP_DELAY msecs_to_jiffies(40)
43 #define PAGE_CHUNK_SIZE (2048)
46 * Define a buffer size for the initial command that detects the flash device:
47 * STATUS, READID and PARAM. The largest of these is the PARAM command,
50 #define INIT_BUFFER_SIZE 256
52 /* registers and bit definitions */
53 #define NDCR (0x00) /* Control register */
54 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
55 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
56 #define NDSR (0x14) /* Status Register */
57 #define NDPCR (0x18) /* Page Count Register */
58 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
59 #define NDBDR1 (0x20) /* Bad Block Register 1 */
60 #define NDECCCTRL (0x28) /* ECC control */
61 #define NDDB (0x40) /* Data Buffer */
62 #define NDCB0 (0x48) /* Command Buffer0 */
63 #define NDCB1 (0x4C) /* Command Buffer1 */
64 #define NDCB2 (0x50) /* Command Buffer2 */
66 #define NDCR_SPARE_EN (0x1 << 31)
67 #define NDCR_ECC_EN (0x1 << 30)
68 #define NDCR_DMA_EN (0x1 << 29)
69 #define NDCR_ND_RUN (0x1 << 28)
70 #define NDCR_DWIDTH_C (0x1 << 27)
71 #define NDCR_DWIDTH_M (0x1 << 26)
72 #define NDCR_PAGE_SZ (0x1 << 24)
73 #define NDCR_NCSX (0x1 << 23)
74 #define NDCR_ND_MODE (0x3 << 21)
75 #define NDCR_NAND_MODE (0x0)
76 #define NDCR_CLR_PG_CNT (0x1 << 20)
77 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
78 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
79 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
81 #define NDCR_RA_START (0x1 << 15)
82 #define NDCR_PG_PER_BLK (0x1 << 14)
83 #define NDCR_ND_ARB_EN (0x1 << 12)
84 #define NDCR_INT_MASK (0xFFF)
86 #define NDSR_MASK (0xfff)
87 #define NDSR_ERR_CNT_OFF (16)
88 #define NDSR_ERR_CNT_MASK (0x1f)
89 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
90 #define NDSR_RDY (0x1 << 12)
91 #define NDSR_FLASH_RDY (0x1 << 11)
92 #define NDSR_CS0_PAGED (0x1 << 10)
93 #define NDSR_CS1_PAGED (0x1 << 9)
94 #define NDSR_CS0_CMDD (0x1 << 8)
95 #define NDSR_CS1_CMDD (0x1 << 7)
96 #define NDSR_CS0_BBD (0x1 << 6)
97 #define NDSR_CS1_BBD (0x1 << 5)
98 #define NDSR_UNCORERR (0x1 << 4)
99 #define NDSR_CORERR (0x1 << 3)
100 #define NDSR_WRDREQ (0x1 << 2)
101 #define NDSR_RDDREQ (0x1 << 1)
102 #define NDSR_WRCMDREQ (0x1)
104 #define NDCB0_LEN_OVRD (0x1 << 28)
105 #define NDCB0_ST_ROW_EN (0x1 << 26)
106 #define NDCB0_AUTO_RS (0x1 << 25)
107 #define NDCB0_CSEL (0x1 << 24)
108 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
109 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
110 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
111 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
112 #define NDCB0_NC (0x1 << 20)
113 #define NDCB0_DBC (0x1 << 19)
114 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
115 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
116 #define NDCB0_CMD2_MASK (0xff << 8)
117 #define NDCB0_CMD1_MASK (0xff)
118 #define NDCB0_ADDR_CYC_SHIFT (16)
120 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
121 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
122 #define EXT_CMD_TYPE_READ 4 /* Read */
123 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
124 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
125 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
126 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
128 /* macros for registers read/write */
129 #define nand_writel(info, off, val) \
130 writel_relaxed((val), (info)->mmio_base + (off))
132 #define nand_readl(info, off) \
133 readl_relaxed((info)->mmio_base + (off))
135 /* error code and state */
158 enum pxa3xx_nand_variant
{
159 PXA3XX_NAND_VARIANT_PXA
,
160 PXA3XX_NAND_VARIANT_ARMADA370
,
163 struct pxa3xx_nand_host
{
164 struct nand_chip chip
;
165 struct mtd_info
*mtd
;
168 /* page size of attached chip */
172 /* calculated from pxa3xx_nand_flash data */
173 unsigned int col_addr_cycles
;
174 unsigned int row_addr_cycles
;
175 size_t read_id_bytes
;
179 struct pxa3xx_nand_info
{
180 struct nand_hw_control controller
;
181 struct platform_device
*pdev
;
184 void __iomem
*mmio_base
;
185 unsigned long mmio_phys
;
186 struct completion cmd_complete
, dev_ready
;
188 unsigned int buf_start
;
189 unsigned int buf_count
;
190 unsigned int buf_size
;
191 unsigned int data_buff_pos
;
192 unsigned int oob_buff_pos
;
194 /* DMA information */
198 unsigned char *data_buff
;
199 unsigned char *oob_buff
;
200 dma_addr_t data_buff_phys
;
202 struct pxa_dma_desc
*data_desc
;
203 dma_addr_t data_desc_addr
;
205 struct pxa3xx_nand_host
*host
[NUM_CHIP_SELECT
];
209 * This driver supports NFCv1 (as found in PXA SoC)
210 * and NFCv2 (as found in Armada 370/XP SoC).
212 enum pxa3xx_nand_variant variant
;
215 int use_ecc
; /* use HW ECC ? */
216 int ecc_bch
; /* using BCH ECC? */
217 int use_dma
; /* use DMA ? */
218 int use_spare
; /* use spare ? */
221 unsigned int data_size
; /* data to be read from FIFO */
222 unsigned int chunk_size
; /* split commands chunk size */
223 unsigned int oob_size
;
224 unsigned int spare_size
;
225 unsigned int ecc_size
;
226 unsigned int ecc_err_cnt
;
227 unsigned int max_bitflips
;
230 /* cached register value */
235 /* generated NDCBx register values */
242 static bool use_dma
= 1;
243 module_param(use_dma
, bool, 0444);
244 MODULE_PARM_DESC(use_dma
, "enable DMA for data transferring to/from NAND HW");
246 static struct pxa3xx_nand_timing timing
[] = {
247 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
248 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
249 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
250 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
253 static struct pxa3xx_nand_flash builtin_flash_types
[] = {
254 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing
[0] },
255 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing
[1] },
256 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing
[1] },
257 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing
[1] },
258 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing
[2] },
259 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing
[2] },
260 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing
[2] },
261 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing
[2] },
262 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing
[3] },
265 static u8 bbt_pattern
[] = {'M', 'V', 'B', 'b', 't', '0' };
266 static u8 bbt_mirror_pattern
[] = {'1', 't', 'b', 'B', 'V', 'M' };
268 static struct nand_bbt_descr bbt_main_descr
= {
269 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
270 | NAND_BBT_2BIT
| NAND_BBT_VERSION
,
274 .maxblocks
= 8, /* Last 8 blocks in each chip */
275 .pattern
= bbt_pattern
278 static struct nand_bbt_descr bbt_mirror_descr
= {
279 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
280 | NAND_BBT_2BIT
| NAND_BBT_VERSION
,
284 .maxblocks
= 8, /* Last 8 blocks in each chip */
285 .pattern
= bbt_mirror_pattern
288 static struct nand_ecclayout ecc_layout_2KB_bch4bit
= {
291 32, 33, 34, 35, 36, 37, 38, 39,
292 40, 41, 42, 43, 44, 45, 46, 47,
293 48, 49, 50, 51, 52, 53, 54, 55,
294 56, 57, 58, 59, 60, 61, 62, 63},
295 .oobfree
= { {2, 30} }
298 static struct nand_ecclayout ecc_layout_4KB_bch4bit
= {
301 32, 33, 34, 35, 36, 37, 38, 39,
302 40, 41, 42, 43, 44, 45, 46, 47,
303 48, 49, 50, 51, 52, 53, 54, 55,
304 56, 57, 58, 59, 60, 61, 62, 63,
305 96, 97, 98, 99, 100, 101, 102, 103,
306 104, 105, 106, 107, 108, 109, 110, 111,
307 112, 113, 114, 115, 116, 117, 118, 119,
308 120, 121, 122, 123, 124, 125, 126, 127},
309 /* Bootrom looks in bytes 0 & 5 for bad blocks */
310 .oobfree
= { {6, 26}, { 64, 32} }
313 static struct nand_ecclayout ecc_layout_4KB_bch8bit
= {
316 32, 33, 34, 35, 36, 37, 38, 39,
317 40, 41, 42, 43, 44, 45, 46, 47,
318 48, 49, 50, 51, 52, 53, 54, 55,
319 56, 57, 58, 59, 60, 61, 62, 63},
323 /* Define a default flash type setting serve as flash detecting only */
324 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
326 #define NDTR0_tCH(c) (min((c), 7) << 19)
327 #define NDTR0_tCS(c) (min((c), 7) << 16)
328 #define NDTR0_tWH(c) (min((c), 7) << 11)
329 #define NDTR0_tWP(c) (min((c), 7) << 8)
330 #define NDTR0_tRH(c) (min((c), 7) << 3)
331 #define NDTR0_tRP(c) (min((c), 7) << 0)
333 #define NDTR1_tR(c) (min((c), 65535) << 16)
334 #define NDTR1_tWHR(c) (min((c), 15) << 4)
335 #define NDTR1_tAR(c) (min((c), 15) << 0)
337 /* convert nano-seconds to nand flash controller clock cycles */
338 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
340 static const struct of_device_id pxa3xx_nand_dt_ids
[] = {
342 .compatible
= "marvell,pxa3xx-nand",
343 .data
= (void *)PXA3XX_NAND_VARIANT_PXA
,
346 .compatible
= "marvell,armada370-nand",
347 .data
= (void *)PXA3XX_NAND_VARIANT_ARMADA370
,
351 MODULE_DEVICE_TABLE(of
, pxa3xx_nand_dt_ids
);
353 static enum pxa3xx_nand_variant
354 pxa3xx_nand_get_variant(struct platform_device
*pdev
)
356 const struct of_device_id
*of_id
=
357 of_match_device(pxa3xx_nand_dt_ids
, &pdev
->dev
);
359 return PXA3XX_NAND_VARIANT_PXA
;
360 return (enum pxa3xx_nand_variant
)of_id
->data
;
363 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host
*host
,
364 const struct pxa3xx_nand_timing
*t
)
366 struct pxa3xx_nand_info
*info
= host
->info_data
;
367 unsigned long nand_clk
= clk_get_rate(info
->clk
);
368 uint32_t ndtr0
, ndtr1
;
370 ndtr0
= NDTR0_tCH(ns2cycle(t
->tCH
, nand_clk
)) |
371 NDTR0_tCS(ns2cycle(t
->tCS
, nand_clk
)) |
372 NDTR0_tWH(ns2cycle(t
->tWH
, nand_clk
)) |
373 NDTR0_tWP(ns2cycle(t
->tWP
, nand_clk
)) |
374 NDTR0_tRH(ns2cycle(t
->tRH
, nand_clk
)) |
375 NDTR0_tRP(ns2cycle(t
->tRP
, nand_clk
));
377 ndtr1
= NDTR1_tR(ns2cycle(t
->tR
, nand_clk
)) |
378 NDTR1_tWHR(ns2cycle(t
->tWHR
, nand_clk
)) |
379 NDTR1_tAR(ns2cycle(t
->tAR
, nand_clk
));
381 info
->ndtr0cs0
= ndtr0
;
382 info
->ndtr1cs0
= ndtr1
;
383 nand_writel(info
, NDTR0CS0
, ndtr0
);
384 nand_writel(info
, NDTR1CS0
, ndtr1
);
388 * Set the data and OOB size, depending on the selected
389 * spare and ECC configuration.
390 * Only applicable to READ0, READOOB and PAGEPROG commands.
392 static void pxa3xx_set_datasize(struct pxa3xx_nand_info
*info
,
393 struct mtd_info
*mtd
)
395 int oob_enable
= info
->reg_ndcr
& NDCR_SPARE_EN
;
397 info
->data_size
= mtd
->writesize
;
401 info
->oob_size
= info
->spare_size
;
403 info
->oob_size
+= info
->ecc_size
;
407 * NOTE: it is a must to set ND_RUN firstly, then write
408 * command buffer, otherwise, it does not work.
409 * We enable all the interrupt at the same time, and
410 * let pxa3xx_nand_irq to handle all logic.
412 static void pxa3xx_nand_start(struct pxa3xx_nand_info
*info
)
416 ndcr
= info
->reg_ndcr
;
421 nand_writel(info
, NDECCCTRL
, 0x1);
423 ndcr
&= ~NDCR_ECC_EN
;
425 nand_writel(info
, NDECCCTRL
, 0x0);
431 ndcr
&= ~NDCR_DMA_EN
;
434 ndcr
|= NDCR_SPARE_EN
;
436 ndcr
&= ~NDCR_SPARE_EN
;
440 /* clear status bits and run */
441 nand_writel(info
, NDCR
, 0);
442 nand_writel(info
, NDSR
, NDSR_MASK
);
443 nand_writel(info
, NDCR
, ndcr
);
446 static void pxa3xx_nand_stop(struct pxa3xx_nand_info
*info
)
449 int timeout
= NAND_STOP_DELAY
;
451 /* wait RUN bit in NDCR become 0 */
452 ndcr
= nand_readl(info
, NDCR
);
453 while ((ndcr
& NDCR_ND_RUN
) && (timeout
-- > 0)) {
454 ndcr
= nand_readl(info
, NDCR
);
459 ndcr
&= ~NDCR_ND_RUN
;
460 nand_writel(info
, NDCR
, ndcr
);
462 /* clear status bits */
463 nand_writel(info
, NDSR
, NDSR_MASK
);
466 static void __maybe_unused
467 enable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
471 ndcr
= nand_readl(info
, NDCR
);
472 nand_writel(info
, NDCR
, ndcr
& ~int_mask
);
475 static void disable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
479 ndcr
= nand_readl(info
, NDCR
);
480 nand_writel(info
, NDCR
, ndcr
| int_mask
);
483 static void drain_fifo(struct pxa3xx_nand_info
*info
, void *data
, int len
)
489 * According to the datasheet, when reading from NDDB
490 * with BCH enabled, after each 32 bytes reads, we
491 * have to make sure that the NDSR.RDDREQ bit is set.
493 * Drain the FIFO 8 32 bits reads at a time, and skip
494 * the polling on the last read.
497 __raw_readsl(info
->mmio_base
+ NDDB
, data
, 8);
500 !(nand_readl(info
, NDSR
) & NDSR_RDDREQ
);
503 dev_err(&info
->pdev
->dev
,
504 "Timeout on RDDREQ while draining the FIFO\n");
516 __raw_readsl(info
->mmio_base
+ NDDB
, data
, len
);
519 static void handle_data_pio(struct pxa3xx_nand_info
*info
)
521 unsigned int do_bytes
= min(info
->data_size
, info
->chunk_size
);
523 switch (info
->state
) {
524 case STATE_PIO_WRITING
:
525 __raw_writesl(info
->mmio_base
+ NDDB
,
526 info
->data_buff
+ info
->data_buff_pos
,
527 DIV_ROUND_UP(do_bytes
, 4));
529 if (info
->oob_size
> 0)
530 __raw_writesl(info
->mmio_base
+ NDDB
,
531 info
->oob_buff
+ info
->oob_buff_pos
,
532 DIV_ROUND_UP(info
->oob_size
, 4));
534 case STATE_PIO_READING
:
536 info
->data_buff
+ info
->data_buff_pos
,
537 DIV_ROUND_UP(do_bytes
, 4));
539 if (info
->oob_size
> 0)
541 info
->oob_buff
+ info
->oob_buff_pos
,
542 DIV_ROUND_UP(info
->oob_size
, 4));
545 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
550 /* Update buffer pointers for multi-page read/write */
551 info
->data_buff_pos
+= do_bytes
;
552 info
->oob_buff_pos
+= info
->oob_size
;
553 info
->data_size
-= do_bytes
;
557 static void start_data_dma(struct pxa3xx_nand_info
*info
)
559 struct pxa_dma_desc
*desc
= info
->data_desc
;
560 int dma_len
= ALIGN(info
->data_size
+ info
->oob_size
, 32);
562 desc
->ddadr
= DDADR_STOP
;
563 desc
->dcmd
= DCMD_ENDIRQEN
| DCMD_WIDTH4
| DCMD_BURST32
| dma_len
;
565 switch (info
->state
) {
566 case STATE_DMA_WRITING
:
567 desc
->dsadr
= info
->data_buff_phys
;
568 desc
->dtadr
= info
->mmio_phys
+ NDDB
;
569 desc
->dcmd
|= DCMD_INCSRCADDR
| DCMD_FLOWTRG
;
571 case STATE_DMA_READING
:
572 desc
->dtadr
= info
->data_buff_phys
;
573 desc
->dsadr
= info
->mmio_phys
+ NDDB
;
574 desc
->dcmd
|= DCMD_INCTRGADDR
| DCMD_FLOWSRC
;
577 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
582 DRCMR(info
->drcmr_dat
) = DRCMR_MAPVLD
| info
->data_dma_ch
;
583 DDADR(info
->data_dma_ch
) = info
->data_desc_addr
;
584 DCSR(info
->data_dma_ch
) |= DCSR_RUN
;
587 static void pxa3xx_nand_data_dma_irq(int channel
, void *data
)
589 struct pxa3xx_nand_info
*info
= data
;
592 dcsr
= DCSR(channel
);
593 DCSR(channel
) = dcsr
;
595 if (dcsr
& DCSR_BUSERR
) {
596 info
->retcode
= ERR_DMABUSERR
;
599 info
->state
= STATE_DMA_DONE
;
600 enable_int(info
, NDCR_INT_MASK
);
601 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
604 static void start_data_dma(struct pxa3xx_nand_info
*info
)
608 static irqreturn_t
pxa3xx_nand_irq_thread(int irq
, void *data
)
610 struct pxa3xx_nand_info
*info
= data
;
612 handle_data_pio(info
);
614 info
->state
= STATE_CMD_DONE
;
615 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
620 static irqreturn_t
pxa3xx_nand_irq(int irq
, void *devid
)
622 struct pxa3xx_nand_info
*info
= devid
;
623 unsigned int status
, is_completed
= 0, is_ready
= 0;
624 unsigned int ready
, cmd_done
;
625 irqreturn_t ret
= IRQ_HANDLED
;
628 ready
= NDSR_FLASH_RDY
;
629 cmd_done
= NDSR_CS0_CMDD
;
632 cmd_done
= NDSR_CS1_CMDD
;
635 status
= nand_readl(info
, NDSR
);
637 if (status
& NDSR_UNCORERR
)
638 info
->retcode
= ERR_UNCORERR
;
639 if (status
& NDSR_CORERR
) {
640 info
->retcode
= ERR_CORERR
;
641 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
&&
643 info
->ecc_err_cnt
= NDSR_ERR_CNT(status
);
645 info
->ecc_err_cnt
= 1;
648 * Each chunk composing a page is corrected independently,
649 * and we need to store maximum number of corrected bitflips
650 * to return it to the MTD layer in ecc.read_page().
652 info
->max_bitflips
= max_t(unsigned int,
656 if (status
& (NDSR_RDDREQ
| NDSR_WRDREQ
)) {
657 /* whether use dma to transfer data */
659 disable_int(info
, NDCR_INT_MASK
);
660 info
->state
= (status
& NDSR_RDDREQ
) ?
661 STATE_DMA_READING
: STATE_DMA_WRITING
;
662 start_data_dma(info
);
663 goto NORMAL_IRQ_EXIT
;
665 info
->state
= (status
& NDSR_RDDREQ
) ?
666 STATE_PIO_READING
: STATE_PIO_WRITING
;
667 ret
= IRQ_WAKE_THREAD
;
668 goto NORMAL_IRQ_EXIT
;
671 if (status
& cmd_done
) {
672 info
->state
= STATE_CMD_DONE
;
675 if (status
& ready
) {
676 info
->state
= STATE_READY
;
680 if (status
& NDSR_WRCMDREQ
) {
681 nand_writel(info
, NDSR
, NDSR_WRCMDREQ
);
682 status
&= ~NDSR_WRCMDREQ
;
683 info
->state
= STATE_CMD_HANDLE
;
686 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
687 * must be loaded by writing directly either 12 or 16
688 * bytes directly to NDCB0, four bytes at a time.
690 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
691 * but each NDCBx register can be read.
693 nand_writel(info
, NDCB0
, info
->ndcb0
);
694 nand_writel(info
, NDCB0
, info
->ndcb1
);
695 nand_writel(info
, NDCB0
, info
->ndcb2
);
697 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
698 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
699 nand_writel(info
, NDCB0
, info
->ndcb3
);
702 /* clear NDSR to let the controller exit the IRQ */
703 nand_writel(info
, NDSR
, status
);
705 complete(&info
->cmd_complete
);
707 complete(&info
->dev_ready
);
712 static inline int is_buf_blank(uint8_t *buf
, size_t len
)
714 for (; len
> 0; len
--)
720 static void set_command_address(struct pxa3xx_nand_info
*info
,
721 unsigned int page_size
, uint16_t column
, int page_addr
)
723 /* small page addr setting */
724 if (page_size
< PAGE_CHUNK_SIZE
) {
725 info
->ndcb1
= ((page_addr
& 0xFFFFFF) << 8)
730 info
->ndcb1
= ((page_addr
& 0xFFFF) << 16)
733 if (page_addr
& 0xFF0000)
734 info
->ndcb2
= (page_addr
& 0xFF0000) >> 16;
740 static void prepare_start_command(struct pxa3xx_nand_info
*info
, int command
)
742 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
743 struct mtd_info
*mtd
= host
->mtd
;
745 /* reset data and oob column point to handle data */
749 info
->data_buff_pos
= 0;
750 info
->oob_buff_pos
= 0;
753 info
->retcode
= ERR_NONE
;
754 info
->ecc_err_cnt
= 0;
760 case NAND_CMD_PAGEPROG
:
762 case NAND_CMD_READOOB
:
763 pxa3xx_set_datasize(info
, mtd
);
775 * If we are about to issue a read command, or about to set
776 * the write address, then clean the data buffer.
778 if (command
== NAND_CMD_READ0
||
779 command
== NAND_CMD_READOOB
||
780 command
== NAND_CMD_SEQIN
) {
782 info
->buf_count
= mtd
->writesize
+ mtd
->oobsize
;
783 memset(info
->data_buff
, 0xFF, info
->buf_count
);
788 static int prepare_set_command(struct pxa3xx_nand_info
*info
, int command
,
789 int ext_cmd_type
, uint16_t column
, int page_addr
)
791 int addr_cycle
, exec_cmd
;
792 struct pxa3xx_nand_host
*host
;
793 struct mtd_info
*mtd
;
795 host
= info
->host
[info
->cs
];
801 info
->ndcb0
= NDCB0_CSEL
;
805 if (command
== NAND_CMD_SEQIN
)
808 addr_cycle
= NDCB0_ADDR_CYC(host
->row_addr_cycles
809 + host
->col_addr_cycles
);
812 case NAND_CMD_READOOB
:
814 info
->buf_start
= column
;
815 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
819 if (command
== NAND_CMD_READOOB
)
820 info
->buf_start
+= mtd
->writesize
;
823 * Multiple page read needs an 'extended command type' field,
824 * which is either naked-read or last-read according to the
827 if (mtd
->writesize
== PAGE_CHUNK_SIZE
) {
828 info
->ndcb0
|= NDCB0_DBC
| (NAND_CMD_READSTART
<< 8);
829 } else if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
830 info
->ndcb0
|= NDCB0_DBC
| (NAND_CMD_READSTART
<< 8)
832 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
);
833 info
->ndcb3
= info
->chunk_size
+
837 set_command_address(info
, mtd
->writesize
, column
, page_addr
);
842 info
->buf_start
= column
;
843 set_command_address(info
, mtd
->writesize
, 0, page_addr
);
846 * Multiple page programming needs to execute the initial
847 * SEQIN command that sets the page address.
849 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
850 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
851 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
)
854 /* No data transfer in this case */
860 case NAND_CMD_PAGEPROG
:
861 if (is_buf_blank(info
->data_buff
,
862 (mtd
->writesize
+ mtd
->oobsize
))) {
867 /* Second command setting for large pages */
868 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
870 * Multiple page write uses the 'extended command'
871 * field. This can be used to issue a command dispatch
872 * or a naked-write depending on the current stage.
874 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
876 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
);
877 info
->ndcb3
= info
->chunk_size
+
881 * This is the command dispatch that completes a chunked
882 * page program operation.
884 if (info
->data_size
== 0) {
885 info
->ndcb0
= NDCB0_CMD_TYPE(0x1)
886 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
)
893 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
897 | (NAND_CMD_PAGEPROG
<< 8)
904 info
->buf_count
= 256;
905 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
909 info
->ndcb1
= (column
& 0xFF);
911 info
->data_size
= 256;
914 case NAND_CMD_READID
:
915 info
->buf_count
= host
->read_id_bytes
;
916 info
->ndcb0
|= NDCB0_CMD_TYPE(3)
919 info
->ndcb1
= (column
& 0xFF);
923 case NAND_CMD_STATUS
:
925 info
->ndcb0
|= NDCB0_CMD_TYPE(4)
932 case NAND_CMD_ERASE1
:
933 info
->ndcb0
|= NDCB0_CMD_TYPE(2)
937 | (NAND_CMD_ERASE2
<< 8)
939 info
->ndcb1
= page_addr
;
944 info
->ndcb0
|= NDCB0_CMD_TYPE(5)
949 case NAND_CMD_ERASE2
:
955 dev_err(&info
->pdev
->dev
, "non-supported command %x\n",
963 static void nand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
964 int column
, int page_addr
)
966 struct pxa3xx_nand_host
*host
= mtd
->priv
;
967 struct pxa3xx_nand_info
*info
= host
->info_data
;
971 * if this is a x16 device ,then convert the input
972 * "byte" address into a "word" address appropriate
973 * for indexing a word-oriented device
975 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
979 * There may be different NAND chip hooked to
980 * different chip select, so check whether
981 * chip select has been changed, if yes, reset the timing
983 if (info
->cs
!= host
->cs
) {
985 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
986 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
989 prepare_start_command(info
, command
);
991 info
->state
= STATE_PREPARED
;
992 exec_cmd
= prepare_set_command(info
, command
, 0, column
, page_addr
);
995 init_completion(&info
->cmd_complete
);
996 init_completion(&info
->dev_ready
);
998 pxa3xx_nand_start(info
);
1000 if (!wait_for_completion_timeout(&info
->cmd_complete
,
1001 CHIP_DELAY_TIMEOUT
)) {
1002 dev_err(&info
->pdev
->dev
, "Wait time out!!!\n");
1003 /* Stop State Machine for next command cycle */
1004 pxa3xx_nand_stop(info
);
1007 info
->state
= STATE_IDLE
;
1010 static void nand_cmdfunc_extended(struct mtd_info
*mtd
,
1011 const unsigned command
,
1012 int column
, int page_addr
)
1014 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1015 struct pxa3xx_nand_info
*info
= host
->info_data
;
1016 int exec_cmd
, ext_cmd_type
;
1019 * if this is a x16 device then convert the input
1020 * "byte" address into a "word" address appropriate
1021 * for indexing a word-oriented device
1023 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1027 * There may be different NAND chip hooked to
1028 * different chip select, so check whether
1029 * chip select has been changed, if yes, reset the timing
1031 if (info
->cs
!= host
->cs
) {
1032 info
->cs
= host
->cs
;
1033 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
1034 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
1037 /* Select the extended command for the first command */
1039 case NAND_CMD_READ0
:
1040 case NAND_CMD_READOOB
:
1041 ext_cmd_type
= EXT_CMD_TYPE_MONO
;
1043 case NAND_CMD_SEQIN
:
1044 ext_cmd_type
= EXT_CMD_TYPE_DISPATCH
;
1046 case NAND_CMD_PAGEPROG
:
1047 ext_cmd_type
= EXT_CMD_TYPE_NAKED_RW
;
1054 prepare_start_command(info
, command
);
1057 * Prepare the "is ready" completion before starting a command
1058 * transaction sequence. If the command is not executed the
1059 * completion will be completed, see below.
1061 * We can do that inside the loop because the command variable
1062 * is invariant and thus so is the exec_cmd.
1064 info
->need_wait
= 1;
1065 init_completion(&info
->dev_ready
);
1067 info
->state
= STATE_PREPARED
;
1068 exec_cmd
= prepare_set_command(info
, command
, ext_cmd_type
,
1071 info
->need_wait
= 0;
1072 complete(&info
->dev_ready
);
1076 init_completion(&info
->cmd_complete
);
1077 pxa3xx_nand_start(info
);
1079 if (!wait_for_completion_timeout(&info
->cmd_complete
,
1080 CHIP_DELAY_TIMEOUT
)) {
1081 dev_err(&info
->pdev
->dev
, "Wait time out!!!\n");
1082 /* Stop State Machine for next command cycle */
1083 pxa3xx_nand_stop(info
);
1087 /* Check if the sequence is complete */
1088 if (info
->data_size
== 0 && command
!= NAND_CMD_PAGEPROG
)
1092 * After a splitted program command sequence has issued
1093 * the command dispatch, the command sequence is complete.
1095 if (info
->data_size
== 0 &&
1096 command
== NAND_CMD_PAGEPROG
&&
1097 ext_cmd_type
== EXT_CMD_TYPE_DISPATCH
)
1100 if (command
== NAND_CMD_READ0
|| command
== NAND_CMD_READOOB
) {
1101 /* Last read: issue a 'last naked read' */
1102 if (info
->data_size
== info
->chunk_size
)
1103 ext_cmd_type
= EXT_CMD_TYPE_LAST_RW
;
1105 ext_cmd_type
= EXT_CMD_TYPE_NAKED_RW
;
1108 * If a splitted program command has no more data to transfer,
1109 * the command dispatch must be issued to complete.
1111 } else if (command
== NAND_CMD_PAGEPROG
&&
1112 info
->data_size
== 0) {
1113 ext_cmd_type
= EXT_CMD_TYPE_DISPATCH
;
1117 info
->state
= STATE_IDLE
;
1120 static int pxa3xx_nand_write_page_hwecc(struct mtd_info
*mtd
,
1121 struct nand_chip
*chip
, const uint8_t *buf
, int oob_required
)
1123 chip
->write_buf(mtd
, buf
, mtd
->writesize
);
1124 chip
->write_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1129 static int pxa3xx_nand_read_page_hwecc(struct mtd_info
*mtd
,
1130 struct nand_chip
*chip
, uint8_t *buf
, int oob_required
,
1133 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1134 struct pxa3xx_nand_info
*info
= host
->info_data
;
1136 chip
->read_buf(mtd
, buf
, mtd
->writesize
);
1137 chip
->read_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1139 if (info
->retcode
== ERR_CORERR
&& info
->use_ecc
) {
1140 mtd
->ecc_stats
.corrected
+= info
->ecc_err_cnt
;
1142 } else if (info
->retcode
== ERR_UNCORERR
) {
1144 * for blank page (all 0xff), HW will calculate its ECC as
1145 * 0, which is different from the ECC information within
1146 * OOB, ignore such uncorrectable errors
1148 if (is_buf_blank(buf
, mtd
->writesize
))
1149 info
->retcode
= ERR_NONE
;
1151 mtd
->ecc_stats
.failed
++;
1154 return info
->max_bitflips
;
1157 static uint8_t pxa3xx_nand_read_byte(struct mtd_info
*mtd
)
1159 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1160 struct pxa3xx_nand_info
*info
= host
->info_data
;
1163 if (info
->buf_start
< info
->buf_count
)
1164 /* Has just send a new command? */
1165 retval
= info
->data_buff
[info
->buf_start
++];
1170 static u16
pxa3xx_nand_read_word(struct mtd_info
*mtd
)
1172 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1173 struct pxa3xx_nand_info
*info
= host
->info_data
;
1174 u16 retval
= 0xFFFF;
1176 if (!(info
->buf_start
& 0x01) && info
->buf_start
< info
->buf_count
) {
1177 retval
= *((u16
*)(info
->data_buff
+info
->buf_start
));
1178 info
->buf_start
+= 2;
1183 static void pxa3xx_nand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
1185 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1186 struct pxa3xx_nand_info
*info
= host
->info_data
;
1187 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
1189 memcpy(buf
, info
->data_buff
+ info
->buf_start
, real_len
);
1190 info
->buf_start
+= real_len
;
1193 static void pxa3xx_nand_write_buf(struct mtd_info
*mtd
,
1194 const uint8_t *buf
, int len
)
1196 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1197 struct pxa3xx_nand_info
*info
= host
->info_data
;
1198 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
1200 memcpy(info
->data_buff
+ info
->buf_start
, buf
, real_len
);
1201 info
->buf_start
+= real_len
;
1204 static void pxa3xx_nand_select_chip(struct mtd_info
*mtd
, int chip
)
1209 static int pxa3xx_nand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
1211 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1212 struct pxa3xx_nand_info
*info
= host
->info_data
;
1214 if (info
->need_wait
) {
1215 info
->need_wait
= 0;
1216 if (!wait_for_completion_timeout(&info
->dev_ready
,
1217 CHIP_DELAY_TIMEOUT
)) {
1218 dev_err(&info
->pdev
->dev
, "Ready time out!!!\n");
1219 return NAND_STATUS_FAIL
;
1223 /* pxa3xx_nand_send_command has waited for command complete */
1224 if (this->state
== FL_WRITING
|| this->state
== FL_ERASING
) {
1225 if (info
->retcode
== ERR_NONE
)
1228 return NAND_STATUS_FAIL
;
1231 return NAND_STATUS_READY
;
1234 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info
*info
,
1235 const struct pxa3xx_nand_flash
*f
)
1237 struct platform_device
*pdev
= info
->pdev
;
1238 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1239 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
1240 uint32_t ndcr
= 0x0; /* enable all interrupts */
1242 if (f
->page_size
!= 2048 && f
->page_size
!= 512) {
1243 dev_err(&pdev
->dev
, "Current only support 2048 and 512 size\n");
1247 if (f
->flash_width
!= 16 && f
->flash_width
!= 8) {
1248 dev_err(&pdev
->dev
, "Only support 8bit and 16 bit!\n");
1252 /* calculate flash information */
1253 host
->read_id_bytes
= (f
->page_size
== 2048) ? 4 : 2;
1255 /* calculate addressing information */
1256 host
->col_addr_cycles
= (f
->page_size
== 2048) ? 2 : 1;
1258 if (f
->num_blocks
* f
->page_per_block
> 65536)
1259 host
->row_addr_cycles
= 3;
1261 host
->row_addr_cycles
= 2;
1263 ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
1264 ndcr
|= (host
->col_addr_cycles
== 2) ? NDCR_RA_START
: 0;
1265 ndcr
|= (f
->page_per_block
== 64) ? NDCR_PG_PER_BLK
: 0;
1266 ndcr
|= (f
->page_size
== 2048) ? NDCR_PAGE_SZ
: 0;
1267 ndcr
|= (f
->flash_width
== 16) ? NDCR_DWIDTH_M
: 0;
1268 ndcr
|= (f
->dfc_width
== 16) ? NDCR_DWIDTH_C
: 0;
1270 ndcr
|= NDCR_RD_ID_CNT(host
->read_id_bytes
);
1271 ndcr
|= NDCR_SPARE_EN
; /* enable spare by default */
1273 info
->reg_ndcr
= ndcr
;
1275 pxa3xx_nand_set_timing(host
, f
->timing
);
1279 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info
*info
)
1282 * We set 0 by hard coding here, for we don't support keep_config
1283 * when there is more than one chip attached to the controller
1285 struct pxa3xx_nand_host
*host
= info
->host
[0];
1286 uint32_t ndcr
= nand_readl(info
, NDCR
);
1288 if (ndcr
& NDCR_PAGE_SZ
) {
1289 /* Controller's FIFO size */
1290 info
->chunk_size
= 2048;
1291 host
->read_id_bytes
= 4;
1293 info
->chunk_size
= 512;
1294 host
->read_id_bytes
= 2;
1297 /* Set an initial chunk size */
1298 info
->reg_ndcr
= ndcr
& ~NDCR_INT_MASK
;
1299 info
->ndtr0cs0
= nand_readl(info
, NDTR0CS0
);
1300 info
->ndtr1cs0
= nand_readl(info
, NDTR1CS0
);
1305 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info
*info
)
1307 struct platform_device
*pdev
= info
->pdev
;
1308 int data_desc_offset
= info
->buf_size
- sizeof(struct pxa_dma_desc
);
1311 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1312 if (info
->data_buff
== NULL
)
1317 info
->data_buff
= dma_alloc_coherent(&pdev
->dev
, info
->buf_size
,
1318 &info
->data_buff_phys
, GFP_KERNEL
);
1319 if (info
->data_buff
== NULL
) {
1320 dev_err(&pdev
->dev
, "failed to allocate dma buffer\n");
1324 info
->data_desc
= (void *)info
->data_buff
+ data_desc_offset
;
1325 info
->data_desc_addr
= info
->data_buff_phys
+ data_desc_offset
;
1327 info
->data_dma_ch
= pxa_request_dma("nand-data", DMA_PRIO_LOW
,
1328 pxa3xx_nand_data_dma_irq
, info
);
1329 if (info
->data_dma_ch
< 0) {
1330 dev_err(&pdev
->dev
, "failed to request data dma\n");
1331 dma_free_coherent(&pdev
->dev
, info
->buf_size
,
1332 info
->data_buff
, info
->data_buff_phys
);
1333 return info
->data_dma_ch
;
1337 * Now that DMA buffers are allocated we turn on
1338 * DMA proper for I/O operations.
1344 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info
*info
)
1346 struct platform_device
*pdev
= info
->pdev
;
1347 if (info
->use_dma
) {
1348 pxa_free_dma(info
->data_dma_ch
);
1349 dma_free_coherent(&pdev
->dev
, info
->buf_size
,
1350 info
->data_buff
, info
->data_buff_phys
);
1352 kfree(info
->data_buff
);
1356 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info
*info
)
1358 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1359 if (info
->data_buff
== NULL
)
1364 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info
*info
)
1366 kfree(info
->data_buff
);
1370 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info
*info
)
1372 struct mtd_info
*mtd
;
1373 struct nand_chip
*chip
;
1376 mtd
= info
->host
[info
->cs
]->mtd
;
1379 /* use the common timing to make a try */
1380 ret
= pxa3xx_nand_config_flash(info
, &builtin_flash_types
[0]);
1384 chip
->cmdfunc(mtd
, NAND_CMD_RESET
, 0, 0);
1385 ret
= chip
->waitfunc(mtd
, chip
);
1386 if (ret
& NAND_STATUS_FAIL
)
1392 static int pxa_ecc_init(struct pxa3xx_nand_info
*info
,
1393 struct nand_ecc_ctrl
*ecc
,
1394 int strength
, int ecc_stepsize
, int page_size
)
1396 if (strength
== 1 && ecc_stepsize
== 512 && page_size
== 2048) {
1397 info
->chunk_size
= 2048;
1398 info
->spare_size
= 40;
1399 info
->ecc_size
= 24;
1400 ecc
->mode
= NAND_ECC_HW
;
1404 } else if (strength
== 1 && ecc_stepsize
== 512 && page_size
== 512) {
1405 info
->chunk_size
= 512;
1406 info
->spare_size
= 8;
1408 ecc
->mode
= NAND_ECC_HW
;
1413 * Required ECC: 4-bit correction per 512 bytes
1414 * Select: 16-bit correction per 2048 bytes
1416 } else if (strength
== 4 && ecc_stepsize
== 512 && page_size
== 2048) {
1418 info
->chunk_size
= 2048;
1419 info
->spare_size
= 32;
1420 info
->ecc_size
= 32;
1421 ecc
->mode
= NAND_ECC_HW
;
1422 ecc
->size
= info
->chunk_size
;
1423 ecc
->layout
= &ecc_layout_2KB_bch4bit
;
1426 } else if (strength
== 4 && ecc_stepsize
== 512 && page_size
== 4096) {
1428 info
->chunk_size
= 2048;
1429 info
->spare_size
= 32;
1430 info
->ecc_size
= 32;
1431 ecc
->mode
= NAND_ECC_HW
;
1432 ecc
->size
= info
->chunk_size
;
1433 ecc
->layout
= &ecc_layout_4KB_bch4bit
;
1437 * Required ECC: 8-bit correction per 512 bytes
1438 * Select: 16-bit correction per 1024 bytes
1440 } else if (strength
== 8 && ecc_stepsize
== 512 && page_size
== 4096) {
1442 info
->chunk_size
= 1024;
1443 info
->spare_size
= 0;
1444 info
->ecc_size
= 32;
1445 ecc
->mode
= NAND_ECC_HW
;
1446 ecc
->size
= info
->chunk_size
;
1447 ecc
->layout
= &ecc_layout_4KB_bch8bit
;
1450 dev_err(&info
->pdev
->dev
,
1451 "ECC strength %d at page size %d is not supported\n",
1452 strength
, page_size
);
1456 dev_info(&info
->pdev
->dev
, "ECC strength %d, ECC step size %d\n",
1457 ecc
->strength
, ecc
->size
);
1461 static int pxa3xx_nand_scan(struct mtd_info
*mtd
)
1463 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1464 struct pxa3xx_nand_info
*info
= host
->info_data
;
1465 struct platform_device
*pdev
= info
->pdev
;
1466 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1467 struct nand_flash_dev pxa3xx_flash_ids
[2], *def
= NULL
;
1468 const struct pxa3xx_nand_flash
*f
= NULL
;
1469 struct nand_chip
*chip
= mtd
->priv
;
1473 uint16_t ecc_strength
, ecc_step
;
1475 if (pdata
->keep_config
&& !pxa3xx_nand_detect_config(info
))
1478 /* Set a default chunk size */
1479 info
->chunk_size
= 512;
1481 ret
= pxa3xx_nand_sensing(info
);
1483 dev_info(&info
->pdev
->dev
, "There is no chip on cs %d!\n",
1489 chip
->cmdfunc(mtd
, NAND_CMD_READID
, 0, 0);
1490 id
= *((uint16_t *)(info
->data_buff
));
1492 dev_info(&info
->pdev
->dev
, "Detect a flash id %x\n", id
);
1494 dev_warn(&info
->pdev
->dev
,
1495 "Read out ID 0, potential timing set wrong!!\n");
1500 num
= ARRAY_SIZE(builtin_flash_types
) + pdata
->num_flash
- 1;
1501 for (i
= 0; i
< num
; i
++) {
1502 if (i
< pdata
->num_flash
)
1503 f
= pdata
->flash
+ i
;
1505 f
= &builtin_flash_types
[i
- pdata
->num_flash
+ 1];
1507 /* find the chip in default list */
1508 if (f
->chip_id
== id
)
1512 if (i
>= (ARRAY_SIZE(builtin_flash_types
) + pdata
->num_flash
- 1)) {
1513 dev_err(&info
->pdev
->dev
, "ERROR!! flash not defined!!!\n");
1518 ret
= pxa3xx_nand_config_flash(info
, f
);
1520 dev_err(&info
->pdev
->dev
, "ERROR! Configure failed\n");
1524 memset(pxa3xx_flash_ids
, 0, sizeof(pxa3xx_flash_ids
));
1526 pxa3xx_flash_ids
[0].name
= f
->name
;
1527 pxa3xx_flash_ids
[0].dev_id
= (f
->chip_id
>> 8) & 0xffff;
1528 pxa3xx_flash_ids
[0].pagesize
= f
->page_size
;
1529 chipsize
= (uint64_t)f
->num_blocks
* f
->page_per_block
* f
->page_size
;
1530 pxa3xx_flash_ids
[0].chipsize
= chipsize
>> 20;
1531 pxa3xx_flash_ids
[0].erasesize
= f
->page_size
* f
->page_per_block
;
1532 if (f
->flash_width
== 16)
1533 pxa3xx_flash_ids
[0].options
= NAND_BUSWIDTH_16
;
1534 pxa3xx_flash_ids
[1].name
= NULL
;
1535 def
= pxa3xx_flash_ids
;
1537 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1538 chip
->options
|= NAND_BUSWIDTH_16
;
1540 /* Device detection must be done with ECC disabled */
1541 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
1542 nand_writel(info
, NDECCCTRL
, 0x0);
1544 if (nand_scan_ident(mtd
, 1, def
))
1547 if (pdata
->flash_bbt
) {
1549 * We'll use a bad block table stored in-flash and don't
1550 * allow writing the bad block marker to the flash.
1552 chip
->bbt_options
|= NAND_BBT_USE_FLASH
|
1553 NAND_BBT_NO_OOB_BBM
;
1554 chip
->bbt_td
= &bbt_main_descr
;
1555 chip
->bbt_md
= &bbt_mirror_descr
;
1559 * If the page size is bigger than the FIFO size, let's check
1560 * we are given the right variant and then switch to the extended
1561 * (aka splitted) command handling,
1563 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1564 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
) {
1565 chip
->cmdfunc
= nand_cmdfunc_extended
;
1567 dev_err(&info
->pdev
->dev
,
1568 "unsupported page size on this variant\n");
1573 if (pdata
->ecc_strength
&& pdata
->ecc_step_size
) {
1574 ecc_strength
= pdata
->ecc_strength
;
1575 ecc_step
= pdata
->ecc_step_size
;
1577 ecc_strength
= chip
->ecc_strength_ds
;
1578 ecc_step
= chip
->ecc_step_ds
;
1581 /* Set default ECC strength requirements on non-ONFI devices */
1582 if (ecc_strength
< 1 && ecc_step
< 1) {
1587 ret
= pxa_ecc_init(info
, &chip
->ecc
, ecc_strength
,
1588 ecc_step
, mtd
->writesize
);
1592 /* calculate addressing information */
1593 if (mtd
->writesize
>= 2048)
1594 host
->col_addr_cycles
= 2;
1596 host
->col_addr_cycles
= 1;
1598 /* release the initial buffer */
1599 kfree(info
->data_buff
);
1601 /* allocate the real data + oob buffer */
1602 info
->buf_size
= mtd
->writesize
+ mtd
->oobsize
;
1603 ret
= pxa3xx_nand_init_buff(info
);
1606 info
->oob_buff
= info
->data_buff
+ mtd
->writesize
;
1608 if ((mtd
->size
>> chip
->page_shift
) > 65536)
1609 host
->row_addr_cycles
= 3;
1611 host
->row_addr_cycles
= 2;
1612 return nand_scan_tail(mtd
);
1615 static int alloc_nand_resource(struct platform_device
*pdev
)
1617 struct pxa3xx_nand_platform_data
*pdata
;
1618 struct pxa3xx_nand_info
*info
;
1619 struct pxa3xx_nand_host
*host
;
1620 struct nand_chip
*chip
= NULL
;
1621 struct mtd_info
*mtd
;
1625 pdata
= dev_get_platdata(&pdev
->dev
);
1626 if (pdata
->num_cs
<= 0)
1628 info
= devm_kzalloc(&pdev
->dev
, sizeof(*info
) + (sizeof(*mtd
) +
1629 sizeof(*host
)) * pdata
->num_cs
, GFP_KERNEL
);
1634 info
->variant
= pxa3xx_nand_get_variant(pdev
);
1635 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1636 mtd
= (struct mtd_info
*)((unsigned int)&info
[1] +
1637 (sizeof(*mtd
) + sizeof(*host
)) * cs
);
1638 chip
= (struct nand_chip
*)(&mtd
[1]);
1639 host
= (struct pxa3xx_nand_host
*)chip
;
1640 info
->host
[cs
] = host
;
1643 host
->info_data
= info
;
1645 mtd
->owner
= THIS_MODULE
;
1647 chip
->ecc
.read_page
= pxa3xx_nand_read_page_hwecc
;
1648 chip
->ecc
.write_page
= pxa3xx_nand_write_page_hwecc
;
1649 chip
->controller
= &info
->controller
;
1650 chip
->waitfunc
= pxa3xx_nand_waitfunc
;
1651 chip
->select_chip
= pxa3xx_nand_select_chip
;
1652 chip
->read_word
= pxa3xx_nand_read_word
;
1653 chip
->read_byte
= pxa3xx_nand_read_byte
;
1654 chip
->read_buf
= pxa3xx_nand_read_buf
;
1655 chip
->write_buf
= pxa3xx_nand_write_buf
;
1656 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
1657 chip
->cmdfunc
= nand_cmdfunc
;
1660 spin_lock_init(&chip
->controller
->lock
);
1661 init_waitqueue_head(&chip
->controller
->wq
);
1662 info
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1663 if (IS_ERR(info
->clk
)) {
1664 dev_err(&pdev
->dev
, "failed to get nand clock\n");
1665 return PTR_ERR(info
->clk
);
1667 ret
= clk_prepare_enable(info
->clk
);
1673 * This is a dirty hack to make this driver work from
1674 * devicetree bindings. It can be removed once we have
1675 * a prober DMA controller framework for DT.
1677 if (pdev
->dev
.of_node
&&
1678 of_machine_is_compatible("marvell,pxa3xx")) {
1679 info
->drcmr_dat
= 97;
1680 info
->drcmr_cmd
= 99;
1682 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1685 "no resource defined for data DMA\n");
1687 goto fail_disable_clk
;
1689 info
->drcmr_dat
= r
->start
;
1691 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
1694 "no resource defined for cmd DMA\n");
1696 goto fail_disable_clk
;
1698 info
->drcmr_cmd
= r
->start
;
1702 irq
= platform_get_irq(pdev
, 0);
1704 dev_err(&pdev
->dev
, "no IRQ resource defined\n");
1706 goto fail_disable_clk
;
1709 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1710 info
->mmio_base
= devm_ioremap_resource(&pdev
->dev
, r
);
1711 if (IS_ERR(info
->mmio_base
)) {
1712 ret
= PTR_ERR(info
->mmio_base
);
1713 goto fail_disable_clk
;
1715 info
->mmio_phys
= r
->start
;
1717 /* Allocate a buffer to allow flash detection */
1718 info
->buf_size
= INIT_BUFFER_SIZE
;
1719 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1720 if (info
->data_buff
== NULL
) {
1722 goto fail_disable_clk
;
1725 /* initialize all interrupts to be disabled */
1726 disable_int(info
, NDSR_MASK
);
1728 ret
= request_threaded_irq(irq
, pxa3xx_nand_irq
,
1729 pxa3xx_nand_irq_thread
, IRQF_ONESHOT
,
1732 dev_err(&pdev
->dev
, "failed to request IRQ\n");
1736 platform_set_drvdata(pdev
, info
);
1741 free_irq(irq
, info
);
1742 kfree(info
->data_buff
);
1744 clk_disable_unprepare(info
->clk
);
1748 static int pxa3xx_nand_remove(struct platform_device
*pdev
)
1750 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1751 struct pxa3xx_nand_platform_data
*pdata
;
1757 pdata
= dev_get_platdata(&pdev
->dev
);
1759 irq
= platform_get_irq(pdev
, 0);
1761 free_irq(irq
, info
);
1762 pxa3xx_nand_free_buff(info
);
1764 clk_disable_unprepare(info
->clk
);
1766 for (cs
= 0; cs
< pdata
->num_cs
; cs
++)
1767 nand_release(info
->host
[cs
]->mtd
);
1771 static int pxa3xx_nand_probe_dt(struct platform_device
*pdev
)
1773 struct pxa3xx_nand_platform_data
*pdata
;
1774 struct device_node
*np
= pdev
->dev
.of_node
;
1775 const struct of_device_id
*of_id
=
1776 of_match_device(pxa3xx_nand_dt_ids
, &pdev
->dev
);
1781 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1785 if (of_get_property(np
, "marvell,nand-enable-arbiter", NULL
))
1786 pdata
->enable_arbiter
= 1;
1787 if (of_get_property(np
, "marvell,nand-keep-config", NULL
))
1788 pdata
->keep_config
= 1;
1789 of_property_read_u32(np
, "num-cs", &pdata
->num_cs
);
1790 pdata
->flash_bbt
= of_get_nand_on_flash_bbt(np
);
1792 pdata
->ecc_strength
= of_get_nand_ecc_strength(np
);
1793 if (pdata
->ecc_strength
< 0)
1794 pdata
->ecc_strength
= 0;
1796 pdata
->ecc_step_size
= of_get_nand_ecc_step_size(np
);
1797 if (pdata
->ecc_step_size
< 0)
1798 pdata
->ecc_step_size
= 0;
1800 pdev
->dev
.platform_data
= pdata
;
1805 static int pxa3xx_nand_probe(struct platform_device
*pdev
)
1807 struct pxa3xx_nand_platform_data
*pdata
;
1808 struct mtd_part_parser_data ppdata
= {};
1809 struct pxa3xx_nand_info
*info
;
1810 int ret
, cs
, probe_success
;
1812 #ifndef ARCH_HAS_DMA
1815 dev_warn(&pdev
->dev
,
1816 "This platform can't do DMA on this device\n");
1819 ret
= pxa3xx_nand_probe_dt(pdev
);
1823 pdata
= dev_get_platdata(&pdev
->dev
);
1825 dev_err(&pdev
->dev
, "no platform data defined\n");
1829 ret
= alloc_nand_resource(pdev
);
1831 dev_err(&pdev
->dev
, "alloc nand resource failed\n");
1835 info
= platform_get_drvdata(pdev
);
1837 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1838 struct mtd_info
*mtd
= info
->host
[cs
]->mtd
;
1841 * The mtd name matches the one used in 'mtdparts' kernel
1842 * parameter. This name cannot be changed or otherwise
1843 * user's mtd partitions configuration would get broken.
1845 mtd
->name
= "pxa3xx_nand-0";
1847 ret
= pxa3xx_nand_scan(mtd
);
1849 dev_warn(&pdev
->dev
, "failed to scan nand at cs %d\n",
1854 ppdata
.of_node
= pdev
->dev
.of_node
;
1855 ret
= mtd_device_parse_register(mtd
, NULL
,
1856 &ppdata
, pdata
->parts
[cs
],
1857 pdata
->nr_parts
[cs
]);
1862 if (!probe_success
) {
1863 pxa3xx_nand_remove(pdev
);
1871 static int pxa3xx_nand_suspend(struct platform_device
*pdev
, pm_message_t state
)
1873 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1874 struct pxa3xx_nand_platform_data
*pdata
;
1875 struct mtd_info
*mtd
;
1878 pdata
= dev_get_platdata(&pdev
->dev
);
1880 dev_err(&pdev
->dev
, "driver busy, state = %d\n", info
->state
);
1884 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1885 mtd
= info
->host
[cs
]->mtd
;
1892 static int pxa3xx_nand_resume(struct platform_device
*pdev
)
1894 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1895 struct pxa3xx_nand_platform_data
*pdata
;
1896 struct mtd_info
*mtd
;
1899 pdata
= dev_get_platdata(&pdev
->dev
);
1900 /* We don't want to handle interrupt without calling mtd routine */
1901 disable_int(info
, NDCR_INT_MASK
);
1904 * Directly set the chip select to a invalid value,
1905 * then the driver would reset the timing according
1906 * to current chip select at the beginning of cmdfunc
1911 * As the spec says, the NDSR would be updated to 0x1800 when
1912 * doing the nand_clk disable/enable.
1913 * To prevent it damaging state machine of the driver, clear
1914 * all status before resume
1916 nand_writel(info
, NDSR
, NDSR_MASK
);
1917 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1918 mtd
= info
->host
[cs
]->mtd
;
1925 #define pxa3xx_nand_suspend NULL
1926 #define pxa3xx_nand_resume NULL
1929 static struct platform_driver pxa3xx_nand_driver
= {
1931 .name
= "pxa3xx-nand",
1932 .of_match_table
= pxa3xx_nand_dt_ids
,
1934 .probe
= pxa3xx_nand_probe
,
1935 .remove
= pxa3xx_nand_remove
,
1936 .suspend
= pxa3xx_nand_suspend
,
1937 .resume
= pxa3xx_nand_resume
,
1940 module_platform_driver(pxa3xx_nand_driver
);
1942 MODULE_LICENSE("GPL");
1943 MODULE_DESCRIPTION("PXA3xx NAND controller driver");