2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/nand.h>
25 #include <linux/mtd/partitions.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
33 #include <linux/platform_data/mtd-nand-pxa3xx.h>
35 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
36 #define NAND_STOP_DELAY msecs_to_jiffies(40)
37 #define PAGE_CHUNK_SIZE (2048)
40 * Define a buffer size for the initial command that detects the flash device:
41 * STATUS, READID and PARAM.
42 * ONFI param page is 256 bytes, and there are three redundant copies
43 * to be read. JEDEC param page is 512 bytes, and there are also three
44 * redundant copies to be read.
45 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
47 #define INIT_BUFFER_SIZE 2048
49 /* registers and bit definitions */
50 #define NDCR (0x00) /* Control register */
51 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
52 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
53 #define NDSR (0x14) /* Status Register */
54 #define NDPCR (0x18) /* Page Count Register */
55 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
56 #define NDBDR1 (0x20) /* Bad Block Register 1 */
57 #define NDECCCTRL (0x28) /* ECC control */
58 #define NDDB (0x40) /* Data Buffer */
59 #define NDCB0 (0x48) /* Command Buffer0 */
60 #define NDCB1 (0x4C) /* Command Buffer1 */
61 #define NDCB2 (0x50) /* Command Buffer2 */
63 #define NDCR_SPARE_EN (0x1 << 31)
64 #define NDCR_ECC_EN (0x1 << 30)
65 #define NDCR_DMA_EN (0x1 << 29)
66 #define NDCR_ND_RUN (0x1 << 28)
67 #define NDCR_DWIDTH_C (0x1 << 27)
68 #define NDCR_DWIDTH_M (0x1 << 26)
69 #define NDCR_PAGE_SZ (0x1 << 24)
70 #define NDCR_NCSX (0x1 << 23)
71 #define NDCR_ND_MODE (0x3 << 21)
72 #define NDCR_NAND_MODE (0x0)
73 #define NDCR_CLR_PG_CNT (0x1 << 20)
74 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
75 #define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
76 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
77 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
79 #define NDCR_RA_START (0x1 << 15)
80 #define NDCR_PG_PER_BLK (0x1 << 14)
81 #define NDCR_ND_ARB_EN (0x1 << 12)
82 #define NDCR_INT_MASK (0xFFF)
84 #define NDSR_MASK (0xfff)
85 #define NDSR_ERR_CNT_OFF (16)
86 #define NDSR_ERR_CNT_MASK (0x1f)
87 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
88 #define NDSR_RDY (0x1 << 12)
89 #define NDSR_FLASH_RDY (0x1 << 11)
90 #define NDSR_CS0_PAGED (0x1 << 10)
91 #define NDSR_CS1_PAGED (0x1 << 9)
92 #define NDSR_CS0_CMDD (0x1 << 8)
93 #define NDSR_CS1_CMDD (0x1 << 7)
94 #define NDSR_CS0_BBD (0x1 << 6)
95 #define NDSR_CS1_BBD (0x1 << 5)
96 #define NDSR_UNCORERR (0x1 << 4)
97 #define NDSR_CORERR (0x1 << 3)
98 #define NDSR_WRDREQ (0x1 << 2)
99 #define NDSR_RDDREQ (0x1 << 1)
100 #define NDSR_WRCMDREQ (0x1)
102 #define NDCB0_LEN_OVRD (0x1 << 28)
103 #define NDCB0_ST_ROW_EN (0x1 << 26)
104 #define NDCB0_AUTO_RS (0x1 << 25)
105 #define NDCB0_CSEL (0x1 << 24)
106 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
107 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
108 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
109 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
110 #define NDCB0_NC (0x1 << 20)
111 #define NDCB0_DBC (0x1 << 19)
112 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
113 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
114 #define NDCB0_CMD2_MASK (0xff << 8)
115 #define NDCB0_CMD1_MASK (0xff)
116 #define NDCB0_ADDR_CYC_SHIFT (16)
118 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
119 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
120 #define EXT_CMD_TYPE_READ 4 /* Read */
121 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
122 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
123 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
124 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
127 * This should be large enough to read 'ONFI' and 'JEDEC'.
128 * Let's use 7 bytes, which is the maximum ID count supported
129 * by the controller (see NDCR_RD_ID_CNT_MASK).
131 #define READ_ID_BYTES 7
133 /* macros for registers read/write */
134 #define nand_writel(info, off, val) \
136 dev_vdbg(&info->pdev->dev, \
137 "%s():%d nand_writel(0x%x, 0x%04x)\n", \
138 __func__, __LINE__, (val), (off)); \
139 writel_relaxed((val), (info)->mmio_base + (off)); \
142 #define nand_readl(info, off) \
145 _v = readl_relaxed((info)->mmio_base + (off)); \
146 dev_vdbg(&info->pdev->dev, \
147 "%s():%d nand_readl(0x%04x) = 0x%x\n", \
148 __func__, __LINE__, (off), _v); \
152 /* error code and state */
175 enum pxa3xx_nand_variant
{
176 PXA3XX_NAND_VARIANT_PXA
,
177 PXA3XX_NAND_VARIANT_ARMADA370
,
180 struct pxa3xx_nand_host
{
181 struct nand_chip chip
;
184 /* page size of attached chip */
188 /* calculated from pxa3xx_nand_flash data */
189 unsigned int col_addr_cycles
;
190 unsigned int row_addr_cycles
;
193 struct pxa3xx_nand_info
{
194 struct nand_hw_control controller
;
195 struct platform_device
*pdev
;
198 void __iomem
*mmio_base
;
199 unsigned long mmio_phys
;
200 struct completion cmd_complete
, dev_ready
;
202 unsigned int buf_start
;
203 unsigned int buf_count
;
204 unsigned int buf_size
;
205 unsigned int data_buff_pos
;
206 unsigned int oob_buff_pos
;
208 /* DMA information */
209 struct scatterlist sg
;
210 enum dma_data_direction dma_dir
;
211 struct dma_chan
*dma_chan
;
212 dma_cookie_t dma_cookie
;
215 unsigned char *data_buff
;
216 unsigned char *oob_buff
;
217 dma_addr_t data_buff_phys
;
220 struct pxa3xx_nand_host
*host
[NUM_CHIP_SELECT
];
224 * This driver supports NFCv1 (as found in PXA SoC)
225 * and NFCv2 (as found in Armada 370/XP SoC).
227 enum pxa3xx_nand_variant variant
;
230 int use_ecc
; /* use HW ECC ? */
231 int ecc_bch
; /* using BCH ECC? */
232 int use_dma
; /* use DMA ? */
233 int use_spare
; /* use spare ? */
236 /* Amount of real data per full chunk */
237 unsigned int chunk_size
;
239 /* Amount of spare data per full chunk */
240 unsigned int spare_size
;
242 /* Number of full chunks (i.e chunk_size + spare_size) */
243 unsigned int nfullchunks
;
246 * Total number of chunks. If equal to nfullchunks, then there
247 * are only full chunks. Otherwise, there is one last chunk of
248 * size (last_chunk_size + last_spare_size)
250 unsigned int ntotalchunks
;
252 /* Amount of real data in the last chunk */
253 unsigned int last_chunk_size
;
255 /* Amount of spare data in the last chunk */
256 unsigned int last_spare_size
;
258 unsigned int ecc_size
;
259 unsigned int ecc_err_cnt
;
260 unsigned int max_bitflips
;
264 * Variables only valid during command
265 * execution. step_chunk_size and step_spare_size is the
266 * amount of real data and spare data in the current
267 * chunk. cur_chunk is the current chunk being
270 unsigned int step_chunk_size
;
271 unsigned int step_spare_size
;
272 unsigned int cur_chunk
;
274 /* cached register value */
279 /* generated NDCBx register values */
286 static bool use_dma
= 1;
287 module_param(use_dma
, bool, 0444);
288 MODULE_PARM_DESC(use_dma
, "enable DMA for data transferring to/from NAND HW");
290 struct pxa3xx_nand_timing
{
291 unsigned int tCH
; /* Enable signal hold time */
292 unsigned int tCS
; /* Enable signal setup time */
293 unsigned int tWH
; /* ND_nWE high duration */
294 unsigned int tWP
; /* ND_nWE pulse time */
295 unsigned int tRH
; /* ND_nRE high duration */
296 unsigned int tRP
; /* ND_nRE pulse width */
297 unsigned int tR
; /* ND_nWE high to ND_nRE low for read */
298 unsigned int tWHR
; /* ND_nWE high to ND_nRE low for status read */
299 unsigned int tAR
; /* ND_ALE low to ND_nRE low delay */
302 struct pxa3xx_nand_flash
{
304 unsigned int flash_width
; /* Width of Flash memory (DWIDTH_M) */
305 unsigned int dfc_width
; /* Width of flash controller(DWIDTH_C) */
306 struct pxa3xx_nand_timing
*timing
; /* NAND Flash timing */
309 static struct pxa3xx_nand_timing timing
[] = {
310 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
311 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
312 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
313 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
316 static struct pxa3xx_nand_flash builtin_flash_types
[] = {
317 { 0x46ec, 16, 16, &timing
[1] },
318 { 0xdaec, 8, 8, &timing
[1] },
319 { 0xd7ec, 8, 8, &timing
[1] },
320 { 0xa12c, 8, 8, &timing
[2] },
321 { 0xb12c, 16, 16, &timing
[2] },
322 { 0xdc2c, 8, 8, &timing
[2] },
323 { 0xcc2c, 16, 16, &timing
[2] },
324 { 0xba20, 16, 16, &timing
[3] },
327 static u8 bbt_pattern
[] = {'M', 'V', 'B', 'b', 't', '0' };
328 static u8 bbt_mirror_pattern
[] = {'1', 't', 'b', 'B', 'V', 'M' };
330 static struct nand_bbt_descr bbt_main_descr
= {
331 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
332 | NAND_BBT_2BIT
| NAND_BBT_VERSION
,
336 .maxblocks
= 8, /* Last 8 blocks in each chip */
337 .pattern
= bbt_pattern
340 static struct nand_bbt_descr bbt_mirror_descr
= {
341 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
342 | NAND_BBT_2BIT
| NAND_BBT_VERSION
,
346 .maxblocks
= 8, /* Last 8 blocks in each chip */
347 .pattern
= bbt_mirror_pattern
350 static struct nand_ecclayout ecc_layout_2KB_bch4bit
= {
353 32, 33, 34, 35, 36, 37, 38, 39,
354 40, 41, 42, 43, 44, 45, 46, 47,
355 48, 49, 50, 51, 52, 53, 54, 55,
356 56, 57, 58, 59, 60, 61, 62, 63},
357 .oobfree
= { {2, 30} }
360 static struct nand_ecclayout ecc_layout_4KB_bch4bit
= {
363 32, 33, 34, 35, 36, 37, 38, 39,
364 40, 41, 42, 43, 44, 45, 46, 47,
365 48, 49, 50, 51, 52, 53, 54, 55,
366 56, 57, 58, 59, 60, 61, 62, 63,
367 96, 97, 98, 99, 100, 101, 102, 103,
368 104, 105, 106, 107, 108, 109, 110, 111,
369 112, 113, 114, 115, 116, 117, 118, 119,
370 120, 121, 122, 123, 124, 125, 126, 127},
371 /* Bootrom looks in bytes 0 & 5 for bad blocks */
372 .oobfree
= { {6, 26}, { 64, 32} }
375 static struct nand_ecclayout ecc_layout_4KB_bch8bit
= {
378 32, 33, 34, 35, 36, 37, 38, 39,
379 40, 41, 42, 43, 44, 45, 46, 47,
380 48, 49, 50, 51, 52, 53, 54, 55,
381 56, 57, 58, 59, 60, 61, 62, 63},
385 #define NDTR0_tCH(c) (min((c), 7) << 19)
386 #define NDTR0_tCS(c) (min((c), 7) << 16)
387 #define NDTR0_tWH(c) (min((c), 7) << 11)
388 #define NDTR0_tWP(c) (min((c), 7) << 8)
389 #define NDTR0_tRH(c) (min((c), 7) << 3)
390 #define NDTR0_tRP(c) (min((c), 7) << 0)
392 #define NDTR1_tR(c) (min((c), 65535) << 16)
393 #define NDTR1_tWHR(c) (min((c), 15) << 4)
394 #define NDTR1_tAR(c) (min((c), 15) << 0)
396 /* convert nano-seconds to nand flash controller clock cycles */
397 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
399 static const struct of_device_id pxa3xx_nand_dt_ids
[] = {
401 .compatible
= "marvell,pxa3xx-nand",
402 .data
= (void *)PXA3XX_NAND_VARIANT_PXA
,
405 .compatible
= "marvell,armada370-nand",
406 .data
= (void *)PXA3XX_NAND_VARIANT_ARMADA370
,
410 MODULE_DEVICE_TABLE(of
, pxa3xx_nand_dt_ids
);
412 static enum pxa3xx_nand_variant
413 pxa3xx_nand_get_variant(struct platform_device
*pdev
)
415 const struct of_device_id
*of_id
=
416 of_match_device(pxa3xx_nand_dt_ids
, &pdev
->dev
);
418 return PXA3XX_NAND_VARIANT_PXA
;
419 return (enum pxa3xx_nand_variant
)of_id
->data
;
422 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host
*host
,
423 const struct pxa3xx_nand_timing
*t
)
425 struct pxa3xx_nand_info
*info
= host
->info_data
;
426 unsigned long nand_clk
= clk_get_rate(info
->clk
);
427 uint32_t ndtr0
, ndtr1
;
429 ndtr0
= NDTR0_tCH(ns2cycle(t
->tCH
, nand_clk
)) |
430 NDTR0_tCS(ns2cycle(t
->tCS
, nand_clk
)) |
431 NDTR0_tWH(ns2cycle(t
->tWH
, nand_clk
)) |
432 NDTR0_tWP(ns2cycle(t
->tWP
, nand_clk
)) |
433 NDTR0_tRH(ns2cycle(t
->tRH
, nand_clk
)) |
434 NDTR0_tRP(ns2cycle(t
->tRP
, nand_clk
));
436 ndtr1
= NDTR1_tR(ns2cycle(t
->tR
, nand_clk
)) |
437 NDTR1_tWHR(ns2cycle(t
->tWHR
, nand_clk
)) |
438 NDTR1_tAR(ns2cycle(t
->tAR
, nand_clk
));
440 info
->ndtr0cs0
= ndtr0
;
441 info
->ndtr1cs0
= ndtr1
;
442 nand_writel(info
, NDTR0CS0
, ndtr0
);
443 nand_writel(info
, NDTR1CS0
, ndtr1
);
446 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host
*host
,
447 const struct nand_sdr_timings
*t
)
449 struct pxa3xx_nand_info
*info
= host
->info_data
;
450 struct nand_chip
*chip
= &host
->chip
;
451 unsigned long nand_clk
= clk_get_rate(info
->clk
);
452 uint32_t ndtr0
, ndtr1
;
454 u32 tCH_min
= DIV_ROUND_UP(t
->tCH_min
, 1000);
455 u32 tCS_min
= DIV_ROUND_UP(t
->tCS_min
, 1000);
456 u32 tWH_min
= DIV_ROUND_UP(t
->tWH_min
, 1000);
457 u32 tWP_min
= DIV_ROUND_UP(t
->tWC_min
- t
->tWH_min
, 1000);
458 u32 tREH_min
= DIV_ROUND_UP(t
->tREH_min
, 1000);
459 u32 tRP_min
= DIV_ROUND_UP(t
->tRC_min
- t
->tREH_min
, 1000);
460 u32 tR
= chip
->chip_delay
* 1000;
461 u32 tWHR_min
= DIV_ROUND_UP(t
->tWHR_min
, 1000);
462 u32 tAR_min
= DIV_ROUND_UP(t
->tAR_min
, 1000);
464 /* fallback to a default value if tR = 0 */
468 ndtr0
= NDTR0_tCH(ns2cycle(tCH_min
, nand_clk
)) |
469 NDTR0_tCS(ns2cycle(tCS_min
, nand_clk
)) |
470 NDTR0_tWH(ns2cycle(tWH_min
, nand_clk
)) |
471 NDTR0_tWP(ns2cycle(tWP_min
, nand_clk
)) |
472 NDTR0_tRH(ns2cycle(tREH_min
, nand_clk
)) |
473 NDTR0_tRP(ns2cycle(tRP_min
, nand_clk
));
475 ndtr1
= NDTR1_tR(ns2cycle(tR
, nand_clk
)) |
476 NDTR1_tWHR(ns2cycle(tWHR_min
, nand_clk
)) |
477 NDTR1_tAR(ns2cycle(tAR_min
, nand_clk
));
479 info
->ndtr0cs0
= ndtr0
;
480 info
->ndtr1cs0
= ndtr1
;
481 nand_writel(info
, NDTR0CS0
, ndtr0
);
482 nand_writel(info
, NDTR1CS0
, ndtr1
);
485 static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host
*host
,
486 unsigned int *flash_width
,
487 unsigned int *dfc_width
)
489 struct nand_chip
*chip
= &host
->chip
;
490 struct pxa3xx_nand_info
*info
= host
->info_data
;
491 const struct pxa3xx_nand_flash
*f
= NULL
;
492 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
495 ntypes
= ARRAY_SIZE(builtin_flash_types
);
497 chip
->cmdfunc(mtd
, NAND_CMD_READID
, 0x00, -1);
499 id
= chip
->read_byte(mtd
);
500 id
|= chip
->read_byte(mtd
) << 0x8;
502 for (i
= 0; i
< ntypes
; i
++) {
503 f
= &builtin_flash_types
[i
];
505 if (f
->chip_id
== id
)
510 dev_err(&info
->pdev
->dev
, "Error: timings not found\n");
514 pxa3xx_nand_set_timing(host
, f
->timing
);
516 *flash_width
= f
->flash_width
;
517 *dfc_width
= f
->dfc_width
;
522 static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host
*host
,
525 const struct nand_sdr_timings
*timings
;
527 mode
= fls(mode
) - 1;
531 timings
= onfi_async_timing_mode_to_sdr_timings(mode
);
533 return PTR_ERR(timings
);
535 pxa3xx_nand_set_sdr_timing(host
, timings
);
540 static int pxa3xx_nand_init(struct pxa3xx_nand_host
*host
)
542 struct nand_chip
*chip
= &host
->chip
;
543 struct pxa3xx_nand_info
*info
= host
->info_data
;
544 unsigned int flash_width
= 0, dfc_width
= 0;
547 mode
= onfi_get_async_timing_mode(chip
);
548 if (mode
== ONFI_TIMING_MODE_UNKNOWN
) {
549 err
= pxa3xx_nand_init_timings_compat(host
, &flash_width
,
554 if (flash_width
== 16) {
555 info
->reg_ndcr
|= NDCR_DWIDTH_M
;
556 chip
->options
|= NAND_BUSWIDTH_16
;
559 info
->reg_ndcr
|= (dfc_width
== 16) ? NDCR_DWIDTH_C
: 0;
561 err
= pxa3xx_nand_init_timings_onfi(host
, mode
);
570 * NOTE: it is a must to set ND_RUN firstly, then write
571 * command buffer, otherwise, it does not work.
572 * We enable all the interrupt at the same time, and
573 * let pxa3xx_nand_irq to handle all logic.
575 static void pxa3xx_nand_start(struct pxa3xx_nand_info
*info
)
579 ndcr
= info
->reg_ndcr
;
584 nand_writel(info
, NDECCCTRL
, 0x1);
586 ndcr
&= ~NDCR_ECC_EN
;
588 nand_writel(info
, NDECCCTRL
, 0x0);
594 ndcr
&= ~NDCR_DMA_EN
;
597 ndcr
|= NDCR_SPARE_EN
;
599 ndcr
&= ~NDCR_SPARE_EN
;
603 /* clear status bits and run */
604 nand_writel(info
, NDSR
, NDSR_MASK
);
605 nand_writel(info
, NDCR
, 0);
606 nand_writel(info
, NDCR
, ndcr
);
609 static void pxa3xx_nand_stop(struct pxa3xx_nand_info
*info
)
612 int timeout
= NAND_STOP_DELAY
;
614 /* wait RUN bit in NDCR become 0 */
615 ndcr
= nand_readl(info
, NDCR
);
616 while ((ndcr
& NDCR_ND_RUN
) && (timeout
-- > 0)) {
617 ndcr
= nand_readl(info
, NDCR
);
622 ndcr
&= ~NDCR_ND_RUN
;
623 nand_writel(info
, NDCR
, ndcr
);
626 dmaengine_terminate_all(info
->dma_chan
);
628 /* clear status bits */
629 nand_writel(info
, NDSR
, NDSR_MASK
);
632 static void __maybe_unused
633 enable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
637 ndcr
= nand_readl(info
, NDCR
);
638 nand_writel(info
, NDCR
, ndcr
& ~int_mask
);
641 static void disable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
645 ndcr
= nand_readl(info
, NDCR
);
646 nand_writel(info
, NDCR
, ndcr
| int_mask
);
649 static void drain_fifo(struct pxa3xx_nand_info
*info
, void *data
, int len
)
656 * According to the datasheet, when reading from NDDB
657 * with BCH enabled, after each 32 bytes reads, we
658 * have to make sure that the NDSR.RDDREQ bit is set.
660 * Drain the FIFO 8 32 bits reads at a time, and skip
661 * the polling on the last read.
664 ioread32_rep(info
->mmio_base
+ NDDB
, data
, 8);
666 ret
= readl_relaxed_poll_timeout(info
->mmio_base
+ NDSR
, val
,
667 val
& NDSR_RDDREQ
, 1000, 5000);
669 dev_err(&info
->pdev
->dev
,
670 "Timeout on RDDREQ while draining the FIFO\n");
679 ioread32_rep(info
->mmio_base
+ NDDB
, data
, len
);
682 static void handle_data_pio(struct pxa3xx_nand_info
*info
)
684 switch (info
->state
) {
685 case STATE_PIO_WRITING
:
686 if (info
->step_chunk_size
)
687 writesl(info
->mmio_base
+ NDDB
,
688 info
->data_buff
+ info
->data_buff_pos
,
689 DIV_ROUND_UP(info
->step_chunk_size
, 4));
691 if (info
->step_spare_size
)
692 writesl(info
->mmio_base
+ NDDB
,
693 info
->oob_buff
+ info
->oob_buff_pos
,
694 DIV_ROUND_UP(info
->step_spare_size
, 4));
696 case STATE_PIO_READING
:
697 if (info
->step_chunk_size
)
699 info
->data_buff
+ info
->data_buff_pos
,
700 DIV_ROUND_UP(info
->step_chunk_size
, 4));
702 if (info
->step_spare_size
)
704 info
->oob_buff
+ info
->oob_buff_pos
,
705 DIV_ROUND_UP(info
->step_spare_size
, 4));
708 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
713 /* Update buffer pointers for multi-page read/write */
714 info
->data_buff_pos
+= info
->step_chunk_size
;
715 info
->oob_buff_pos
+= info
->step_spare_size
;
718 static void pxa3xx_nand_data_dma_irq(void *data
)
720 struct pxa3xx_nand_info
*info
= data
;
721 struct dma_tx_state state
;
722 enum dma_status status
;
724 status
= dmaengine_tx_status(info
->dma_chan
, info
->dma_cookie
, &state
);
725 if (likely(status
== DMA_COMPLETE
)) {
726 info
->state
= STATE_DMA_DONE
;
728 dev_err(&info
->pdev
->dev
, "DMA error on data channel\n");
729 info
->retcode
= ERR_DMABUSERR
;
731 dma_unmap_sg(info
->dma_chan
->device
->dev
, &info
->sg
, 1, info
->dma_dir
);
733 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
734 enable_int(info
, NDCR_INT_MASK
);
737 static void start_data_dma(struct pxa3xx_nand_info
*info
)
739 enum dma_transfer_direction direction
;
740 struct dma_async_tx_descriptor
*tx
;
742 switch (info
->state
) {
743 case STATE_DMA_WRITING
:
744 info
->dma_dir
= DMA_TO_DEVICE
;
745 direction
= DMA_MEM_TO_DEV
;
747 case STATE_DMA_READING
:
748 info
->dma_dir
= DMA_FROM_DEVICE
;
749 direction
= DMA_DEV_TO_MEM
;
752 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
756 info
->sg
.length
= info
->chunk_size
;
758 info
->sg
.length
+= info
->spare_size
+ info
->ecc_size
;
759 dma_map_sg(info
->dma_chan
->device
->dev
, &info
->sg
, 1, info
->dma_dir
);
761 tx
= dmaengine_prep_slave_sg(info
->dma_chan
, &info
->sg
, 1, direction
,
764 dev_err(&info
->pdev
->dev
, "prep_slave_sg() failed\n");
767 tx
->callback
= pxa3xx_nand_data_dma_irq
;
768 tx
->callback_param
= info
;
769 info
->dma_cookie
= dmaengine_submit(tx
);
770 dma_async_issue_pending(info
->dma_chan
);
771 dev_dbg(&info
->pdev
->dev
, "%s(dir=%d cookie=%x size=%u)\n",
772 __func__
, direction
, info
->dma_cookie
, info
->sg
.length
);
775 static irqreturn_t
pxa3xx_nand_irq_thread(int irq
, void *data
)
777 struct pxa3xx_nand_info
*info
= data
;
779 handle_data_pio(info
);
781 info
->state
= STATE_CMD_DONE
;
782 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
787 static irqreturn_t
pxa3xx_nand_irq(int irq
, void *devid
)
789 struct pxa3xx_nand_info
*info
= devid
;
790 unsigned int status
, is_completed
= 0, is_ready
= 0;
791 unsigned int ready
, cmd_done
;
792 irqreturn_t ret
= IRQ_HANDLED
;
795 ready
= NDSR_FLASH_RDY
;
796 cmd_done
= NDSR_CS0_CMDD
;
799 cmd_done
= NDSR_CS1_CMDD
;
802 status
= nand_readl(info
, NDSR
);
804 if (status
& NDSR_UNCORERR
)
805 info
->retcode
= ERR_UNCORERR
;
806 if (status
& NDSR_CORERR
) {
807 info
->retcode
= ERR_CORERR
;
808 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
&&
810 info
->ecc_err_cnt
= NDSR_ERR_CNT(status
);
812 info
->ecc_err_cnt
= 1;
815 * Each chunk composing a page is corrected independently,
816 * and we need to store maximum number of corrected bitflips
817 * to return it to the MTD layer in ecc.read_page().
819 info
->max_bitflips
= max_t(unsigned int,
823 if (status
& (NDSR_RDDREQ
| NDSR_WRDREQ
)) {
824 /* whether use dma to transfer data */
826 disable_int(info
, NDCR_INT_MASK
);
827 info
->state
= (status
& NDSR_RDDREQ
) ?
828 STATE_DMA_READING
: STATE_DMA_WRITING
;
829 start_data_dma(info
);
830 goto NORMAL_IRQ_EXIT
;
832 info
->state
= (status
& NDSR_RDDREQ
) ?
833 STATE_PIO_READING
: STATE_PIO_WRITING
;
834 ret
= IRQ_WAKE_THREAD
;
835 goto NORMAL_IRQ_EXIT
;
838 if (status
& cmd_done
) {
839 info
->state
= STATE_CMD_DONE
;
842 if (status
& ready
) {
843 info
->state
= STATE_READY
;
848 * Clear all status bit before issuing the next command, which
849 * can and will alter the status bits and will deserve a new
850 * interrupt on its own. This lets the controller exit the IRQ
852 nand_writel(info
, NDSR
, status
);
854 if (status
& NDSR_WRCMDREQ
) {
855 status
&= ~NDSR_WRCMDREQ
;
856 info
->state
= STATE_CMD_HANDLE
;
859 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
860 * must be loaded by writing directly either 12 or 16
861 * bytes directly to NDCB0, four bytes at a time.
863 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
864 * but each NDCBx register can be read.
866 nand_writel(info
, NDCB0
, info
->ndcb0
);
867 nand_writel(info
, NDCB0
, info
->ndcb1
);
868 nand_writel(info
, NDCB0
, info
->ndcb2
);
870 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
871 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
872 nand_writel(info
, NDCB0
, info
->ndcb3
);
876 complete(&info
->cmd_complete
);
878 complete(&info
->dev_ready
);
883 static inline int is_buf_blank(uint8_t *buf
, size_t len
)
885 for (; len
> 0; len
--)
891 static void set_command_address(struct pxa3xx_nand_info
*info
,
892 unsigned int page_size
, uint16_t column
, int page_addr
)
894 /* small page addr setting */
895 if (page_size
< PAGE_CHUNK_SIZE
) {
896 info
->ndcb1
= ((page_addr
& 0xFFFFFF) << 8)
901 info
->ndcb1
= ((page_addr
& 0xFFFF) << 16)
904 if (page_addr
& 0xFF0000)
905 info
->ndcb2
= (page_addr
& 0xFF0000) >> 16;
911 static void prepare_start_command(struct pxa3xx_nand_info
*info
, int command
)
913 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
914 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
916 /* reset data and oob column point to handle data */
919 info
->data_buff_pos
= 0;
920 info
->oob_buff_pos
= 0;
921 info
->step_chunk_size
= 0;
922 info
->step_spare_size
= 0;
926 info
->retcode
= ERR_NONE
;
927 info
->ecc_err_cnt
= 0;
933 case NAND_CMD_PAGEPROG
:
946 * If we are about to issue a read command, or about to set
947 * the write address, then clean the data buffer.
949 if (command
== NAND_CMD_READ0
||
950 command
== NAND_CMD_READOOB
||
951 command
== NAND_CMD_SEQIN
) {
953 info
->buf_count
= mtd
->writesize
+ mtd
->oobsize
;
954 memset(info
->data_buff
, 0xFF, info
->buf_count
);
959 static int prepare_set_command(struct pxa3xx_nand_info
*info
, int command
,
960 int ext_cmd_type
, uint16_t column
, int page_addr
)
962 int addr_cycle
, exec_cmd
;
963 struct pxa3xx_nand_host
*host
;
964 struct mtd_info
*mtd
;
966 host
= info
->host
[info
->cs
];
967 mtd
= nand_to_mtd(&host
->chip
);
972 info
->ndcb0
= NDCB0_CSEL
;
976 if (command
== NAND_CMD_SEQIN
)
979 addr_cycle
= NDCB0_ADDR_CYC(host
->row_addr_cycles
980 + host
->col_addr_cycles
);
983 case NAND_CMD_READOOB
:
985 info
->buf_start
= column
;
986 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
990 if (command
== NAND_CMD_READOOB
)
991 info
->buf_start
+= mtd
->writesize
;
993 if (info
->cur_chunk
< info
->nfullchunks
) {
994 info
->step_chunk_size
= info
->chunk_size
;
995 info
->step_spare_size
= info
->spare_size
;
997 info
->step_chunk_size
= info
->last_chunk_size
;
998 info
->step_spare_size
= info
->last_spare_size
;
1002 * Multiple page read needs an 'extended command type' field,
1003 * which is either naked-read or last-read according to the
1006 if (mtd
->writesize
== PAGE_CHUNK_SIZE
) {
1007 info
->ndcb0
|= NDCB0_DBC
| (NAND_CMD_READSTART
<< 8);
1008 } else if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1009 info
->ndcb0
|= NDCB0_DBC
| (NAND_CMD_READSTART
<< 8)
1011 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
);
1012 info
->ndcb3
= info
->step_chunk_size
+
1013 info
->step_spare_size
;
1016 set_command_address(info
, mtd
->writesize
, column
, page_addr
);
1019 case NAND_CMD_SEQIN
:
1021 info
->buf_start
= column
;
1022 set_command_address(info
, mtd
->writesize
, 0, page_addr
);
1025 * Multiple page programming needs to execute the initial
1026 * SEQIN command that sets the page address.
1028 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1029 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
1030 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
)
1037 case NAND_CMD_PAGEPROG
:
1038 if (is_buf_blank(info
->data_buff
,
1039 (mtd
->writesize
+ mtd
->oobsize
))) {
1044 if (info
->cur_chunk
< info
->nfullchunks
) {
1045 info
->step_chunk_size
= info
->chunk_size
;
1046 info
->step_spare_size
= info
->spare_size
;
1048 info
->step_chunk_size
= info
->last_chunk_size
;
1049 info
->step_spare_size
= info
->last_spare_size
;
1052 /* Second command setting for large pages */
1053 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1055 * Multiple page write uses the 'extended command'
1056 * field. This can be used to issue a command dispatch
1057 * or a naked-write depending on the current stage.
1059 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
1061 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
);
1062 info
->ndcb3
= info
->step_chunk_size
+
1063 info
->step_spare_size
;
1066 * This is the command dispatch that completes a chunked
1067 * page program operation.
1069 if (info
->cur_chunk
== info
->ntotalchunks
) {
1070 info
->ndcb0
= NDCB0_CMD_TYPE(0x1)
1071 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
)
1078 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
1082 | (NAND_CMD_PAGEPROG
<< 8)
1088 case NAND_CMD_PARAM
:
1089 info
->buf_count
= INIT_BUFFER_SIZE
;
1090 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
1094 info
->ndcb1
= (column
& 0xFF);
1095 info
->ndcb3
= INIT_BUFFER_SIZE
;
1096 info
->step_chunk_size
= INIT_BUFFER_SIZE
;
1099 case NAND_CMD_READID
:
1100 info
->buf_count
= READ_ID_BYTES
;
1101 info
->ndcb0
|= NDCB0_CMD_TYPE(3)
1104 info
->ndcb1
= (column
& 0xFF);
1106 info
->step_chunk_size
= 8;
1108 case NAND_CMD_STATUS
:
1109 info
->buf_count
= 1;
1110 info
->ndcb0
|= NDCB0_CMD_TYPE(4)
1114 info
->step_chunk_size
= 8;
1117 case NAND_CMD_ERASE1
:
1118 info
->ndcb0
|= NDCB0_CMD_TYPE(2)
1122 | (NAND_CMD_ERASE2
<< 8)
1124 info
->ndcb1
= page_addr
;
1128 case NAND_CMD_RESET
:
1129 info
->ndcb0
|= NDCB0_CMD_TYPE(5)
1134 case NAND_CMD_ERASE2
:
1140 dev_err(&info
->pdev
->dev
, "non-supported command %x\n",
1148 static void nand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
1149 int column
, int page_addr
)
1151 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1152 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1153 struct pxa3xx_nand_info
*info
= host
->info_data
;
1157 * if this is a x16 device ,then convert the input
1158 * "byte" address into a "word" address appropriate
1159 * for indexing a word-oriented device
1161 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1165 * There may be different NAND chip hooked to
1166 * different chip select, so check whether
1167 * chip select has been changed, if yes, reset the timing
1169 if (info
->cs
!= host
->cs
) {
1170 info
->cs
= host
->cs
;
1171 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
1172 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
1175 prepare_start_command(info
, command
);
1177 info
->state
= STATE_PREPARED
;
1178 exec_cmd
= prepare_set_command(info
, command
, 0, column
, page_addr
);
1181 init_completion(&info
->cmd_complete
);
1182 init_completion(&info
->dev_ready
);
1183 info
->need_wait
= 1;
1184 pxa3xx_nand_start(info
);
1186 if (!wait_for_completion_timeout(&info
->cmd_complete
,
1187 CHIP_DELAY_TIMEOUT
)) {
1188 dev_err(&info
->pdev
->dev
, "Wait time out!!!\n");
1189 /* Stop State Machine for next command cycle */
1190 pxa3xx_nand_stop(info
);
1193 info
->state
= STATE_IDLE
;
1196 static void nand_cmdfunc_extended(struct mtd_info
*mtd
,
1197 const unsigned command
,
1198 int column
, int page_addr
)
1200 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1201 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1202 struct pxa3xx_nand_info
*info
= host
->info_data
;
1203 int exec_cmd
, ext_cmd_type
;
1206 * if this is a x16 device then convert the input
1207 * "byte" address into a "word" address appropriate
1208 * for indexing a word-oriented device
1210 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1214 * There may be different NAND chip hooked to
1215 * different chip select, so check whether
1216 * chip select has been changed, if yes, reset the timing
1218 if (info
->cs
!= host
->cs
) {
1219 info
->cs
= host
->cs
;
1220 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
1221 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
1224 /* Select the extended command for the first command */
1226 case NAND_CMD_READ0
:
1227 case NAND_CMD_READOOB
:
1228 ext_cmd_type
= EXT_CMD_TYPE_MONO
;
1230 case NAND_CMD_SEQIN
:
1231 ext_cmd_type
= EXT_CMD_TYPE_DISPATCH
;
1233 case NAND_CMD_PAGEPROG
:
1234 ext_cmd_type
= EXT_CMD_TYPE_NAKED_RW
;
1241 prepare_start_command(info
, command
);
1244 * Prepare the "is ready" completion before starting a command
1245 * transaction sequence. If the command is not executed the
1246 * completion will be completed, see below.
1248 * We can do that inside the loop because the command variable
1249 * is invariant and thus so is the exec_cmd.
1251 info
->need_wait
= 1;
1252 init_completion(&info
->dev_ready
);
1254 info
->state
= STATE_PREPARED
;
1256 exec_cmd
= prepare_set_command(info
, command
, ext_cmd_type
,
1259 info
->need_wait
= 0;
1260 complete(&info
->dev_ready
);
1264 init_completion(&info
->cmd_complete
);
1265 pxa3xx_nand_start(info
);
1267 if (!wait_for_completion_timeout(&info
->cmd_complete
,
1268 CHIP_DELAY_TIMEOUT
)) {
1269 dev_err(&info
->pdev
->dev
, "Wait time out!!!\n");
1270 /* Stop State Machine for next command cycle */
1271 pxa3xx_nand_stop(info
);
1275 /* Only a few commands need several steps */
1276 if (command
!= NAND_CMD_PAGEPROG
&&
1277 command
!= NAND_CMD_READ0
&&
1278 command
!= NAND_CMD_READOOB
)
1283 /* Check if the sequence is complete */
1284 if (info
->cur_chunk
== info
->ntotalchunks
&& command
!= NAND_CMD_PAGEPROG
)
1288 * After a splitted program command sequence has issued
1289 * the command dispatch, the command sequence is complete.
1291 if (info
->cur_chunk
== (info
->ntotalchunks
+ 1) &&
1292 command
== NAND_CMD_PAGEPROG
&&
1293 ext_cmd_type
== EXT_CMD_TYPE_DISPATCH
)
1296 if (command
== NAND_CMD_READ0
|| command
== NAND_CMD_READOOB
) {
1297 /* Last read: issue a 'last naked read' */
1298 if (info
->cur_chunk
== info
->ntotalchunks
- 1)
1299 ext_cmd_type
= EXT_CMD_TYPE_LAST_RW
;
1301 ext_cmd_type
= EXT_CMD_TYPE_NAKED_RW
;
1304 * If a splitted program command has no more data to transfer,
1305 * the command dispatch must be issued to complete.
1307 } else if (command
== NAND_CMD_PAGEPROG
&&
1308 info
->cur_chunk
== info
->ntotalchunks
) {
1309 ext_cmd_type
= EXT_CMD_TYPE_DISPATCH
;
1313 info
->state
= STATE_IDLE
;
1316 static int pxa3xx_nand_write_page_hwecc(struct mtd_info
*mtd
,
1317 struct nand_chip
*chip
, const uint8_t *buf
, int oob_required
,
1320 chip
->write_buf(mtd
, buf
, mtd
->writesize
);
1321 chip
->write_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1326 static int pxa3xx_nand_read_page_hwecc(struct mtd_info
*mtd
,
1327 struct nand_chip
*chip
, uint8_t *buf
, int oob_required
,
1330 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1331 struct pxa3xx_nand_info
*info
= host
->info_data
;
1333 chip
->read_buf(mtd
, buf
, mtd
->writesize
);
1334 chip
->read_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1336 if (info
->retcode
== ERR_CORERR
&& info
->use_ecc
) {
1337 mtd
->ecc_stats
.corrected
+= info
->ecc_err_cnt
;
1339 } else if (info
->retcode
== ERR_UNCORERR
) {
1341 * for blank page (all 0xff), HW will calculate its ECC as
1342 * 0, which is different from the ECC information within
1343 * OOB, ignore such uncorrectable errors
1345 if (is_buf_blank(buf
, mtd
->writesize
))
1346 info
->retcode
= ERR_NONE
;
1348 mtd
->ecc_stats
.failed
++;
1351 return info
->max_bitflips
;
1354 static uint8_t pxa3xx_nand_read_byte(struct mtd_info
*mtd
)
1356 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1357 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1358 struct pxa3xx_nand_info
*info
= host
->info_data
;
1361 if (info
->buf_start
< info
->buf_count
)
1362 /* Has just send a new command? */
1363 retval
= info
->data_buff
[info
->buf_start
++];
1368 static u16
pxa3xx_nand_read_word(struct mtd_info
*mtd
)
1370 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1371 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1372 struct pxa3xx_nand_info
*info
= host
->info_data
;
1373 u16 retval
= 0xFFFF;
1375 if (!(info
->buf_start
& 0x01) && info
->buf_start
< info
->buf_count
) {
1376 retval
= *((u16
*)(info
->data_buff
+info
->buf_start
));
1377 info
->buf_start
+= 2;
1382 static void pxa3xx_nand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
1384 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1385 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1386 struct pxa3xx_nand_info
*info
= host
->info_data
;
1387 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
1389 memcpy(buf
, info
->data_buff
+ info
->buf_start
, real_len
);
1390 info
->buf_start
+= real_len
;
1393 static void pxa3xx_nand_write_buf(struct mtd_info
*mtd
,
1394 const uint8_t *buf
, int len
)
1396 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1397 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1398 struct pxa3xx_nand_info
*info
= host
->info_data
;
1399 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
1401 memcpy(info
->data_buff
+ info
->buf_start
, buf
, real_len
);
1402 info
->buf_start
+= real_len
;
1405 static void pxa3xx_nand_select_chip(struct mtd_info
*mtd
, int chip
)
1410 static int pxa3xx_nand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
1412 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1413 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1414 struct pxa3xx_nand_info
*info
= host
->info_data
;
1416 if (info
->need_wait
) {
1417 info
->need_wait
= 0;
1418 if (!wait_for_completion_timeout(&info
->dev_ready
,
1419 CHIP_DELAY_TIMEOUT
)) {
1420 dev_err(&info
->pdev
->dev
, "Ready time out!!!\n");
1421 return NAND_STATUS_FAIL
;
1425 /* pxa3xx_nand_send_command has waited for command complete */
1426 if (this->state
== FL_WRITING
|| this->state
== FL_ERASING
) {
1427 if (info
->retcode
== ERR_NONE
)
1430 return NAND_STATUS_FAIL
;
1433 return NAND_STATUS_READY
;
1436 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info
*info
)
1438 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
1439 struct platform_device
*pdev
= info
->pdev
;
1440 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1441 const struct nand_sdr_timings
*timings
;
1443 /* Configure default flash values */
1444 info
->chunk_size
= PAGE_CHUNK_SIZE
;
1445 info
->reg_ndcr
= 0x0; /* enable all interrupts */
1446 info
->reg_ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
1447 info
->reg_ndcr
|= NDCR_RD_ID_CNT(READ_ID_BYTES
);
1448 info
->reg_ndcr
|= NDCR_SPARE_EN
;
1450 /* use the common timing to make a try */
1451 timings
= onfi_async_timing_mode_to_sdr_timings(0);
1452 if (IS_ERR(timings
))
1453 return PTR_ERR(timings
);
1455 pxa3xx_nand_set_sdr_timing(host
, timings
);
1459 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info
*info
)
1461 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
1462 struct nand_chip
*chip
= &host
->chip
;
1463 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1465 info
->reg_ndcr
|= (host
->col_addr_cycles
== 2) ? NDCR_RA_START
: 0;
1466 info
->reg_ndcr
|= (chip
->page_shift
== 6) ? NDCR_PG_PER_BLK
: 0;
1467 info
->reg_ndcr
|= (mtd
->writesize
== 2048) ? NDCR_PAGE_SZ
: 0;
1470 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info
*info
)
1472 struct platform_device
*pdev
= info
->pdev
;
1473 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1474 uint32_t ndcr
= nand_readl(info
, NDCR
);
1476 /* Set an initial chunk size */
1477 info
->chunk_size
= ndcr
& NDCR_PAGE_SZ
? 2048 : 512;
1478 info
->reg_ndcr
= ndcr
&
1479 ~(NDCR_INT_MASK
| NDCR_ND_ARB_EN
| NFCV1_NDCR_ARB_CNTL
);
1480 info
->reg_ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
1481 info
->ndtr0cs0
= nand_readl(info
, NDTR0CS0
);
1482 info
->ndtr1cs0
= nand_readl(info
, NDTR1CS0
);
1485 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info
*info
)
1487 struct platform_device
*pdev
= info
->pdev
;
1488 struct dma_slave_config config
;
1489 dma_cap_mask_t mask
;
1490 struct pxad_param param
;
1493 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1494 if (info
->data_buff
== NULL
)
1499 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
1503 sg_init_one(&info
->sg
, info
->data_buff
, info
->buf_size
);
1505 dma_cap_set(DMA_SLAVE
, mask
);
1506 param
.prio
= PXAD_PRIO_LOWEST
;
1507 param
.drcmr
= info
->drcmr_dat
;
1508 info
->dma_chan
= dma_request_slave_channel_compat(mask
, pxad_filter_fn
,
1511 if (!info
->dma_chan
) {
1512 dev_err(&pdev
->dev
, "unable to request data dma channel\n");
1516 memset(&config
, 0, sizeof(config
));
1517 config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1518 config
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1519 config
.src_addr
= info
->mmio_phys
+ NDDB
;
1520 config
.dst_addr
= info
->mmio_phys
+ NDDB
;
1521 config
.src_maxburst
= 32;
1522 config
.dst_maxburst
= 32;
1523 ret
= dmaengine_slave_config(info
->dma_chan
, &config
);
1525 dev_err(&info
->pdev
->dev
,
1526 "dma channel configuration failed: %d\n",
1532 * Now that DMA buffers are allocated we turn on
1533 * DMA proper for I/O operations.
1539 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info
*info
)
1541 if (info
->use_dma
) {
1542 dmaengine_terminate_all(info
->dma_chan
);
1543 dma_release_channel(info
->dma_chan
);
1545 kfree(info
->data_buff
);
1548 static int pxa_ecc_init(struct pxa3xx_nand_info
*info
,
1549 struct nand_ecc_ctrl
*ecc
,
1550 int strength
, int ecc_stepsize
, int page_size
)
1552 if (strength
== 1 && ecc_stepsize
== 512 && page_size
== 2048) {
1553 info
->nfullchunks
= 1;
1554 info
->ntotalchunks
= 1;
1555 info
->chunk_size
= 2048;
1556 info
->spare_size
= 40;
1557 info
->ecc_size
= 24;
1558 ecc
->mode
= NAND_ECC_HW
;
1562 } else if (strength
== 1 && ecc_stepsize
== 512 && page_size
== 512) {
1563 info
->nfullchunks
= 1;
1564 info
->ntotalchunks
= 1;
1565 info
->chunk_size
= 512;
1566 info
->spare_size
= 8;
1568 ecc
->mode
= NAND_ECC_HW
;
1573 * Required ECC: 4-bit correction per 512 bytes
1574 * Select: 16-bit correction per 2048 bytes
1576 } else if (strength
== 4 && ecc_stepsize
== 512 && page_size
== 2048) {
1578 info
->nfullchunks
= 1;
1579 info
->ntotalchunks
= 1;
1580 info
->chunk_size
= 2048;
1581 info
->spare_size
= 32;
1582 info
->ecc_size
= 32;
1583 ecc
->mode
= NAND_ECC_HW
;
1584 ecc
->size
= info
->chunk_size
;
1585 ecc
->layout
= &ecc_layout_2KB_bch4bit
;
1588 } else if (strength
== 4 && ecc_stepsize
== 512 && page_size
== 4096) {
1590 info
->nfullchunks
= 2;
1591 info
->ntotalchunks
= 2;
1592 info
->chunk_size
= 2048;
1593 info
->spare_size
= 32;
1594 info
->ecc_size
= 32;
1595 ecc
->mode
= NAND_ECC_HW
;
1596 ecc
->size
= info
->chunk_size
;
1597 ecc
->layout
= &ecc_layout_4KB_bch4bit
;
1601 * Required ECC: 8-bit correction per 512 bytes
1602 * Select: 16-bit correction per 1024 bytes
1604 } else if (strength
== 8 && ecc_stepsize
== 512 && page_size
== 4096) {
1606 info
->nfullchunks
= 4;
1607 info
->ntotalchunks
= 5;
1608 info
->chunk_size
= 1024;
1609 info
->spare_size
= 0;
1610 info
->last_chunk_size
= 0;
1611 info
->last_spare_size
= 64;
1612 info
->ecc_size
= 32;
1613 ecc
->mode
= NAND_ECC_HW
;
1614 ecc
->size
= info
->chunk_size
;
1615 ecc
->layout
= &ecc_layout_4KB_bch8bit
;
1618 dev_err(&info
->pdev
->dev
,
1619 "ECC strength %d at page size %d is not supported\n",
1620 strength
, page_size
);
1624 dev_info(&info
->pdev
->dev
, "ECC strength %d, ECC step size %d\n",
1625 ecc
->strength
, ecc
->size
);
1629 static int pxa3xx_nand_scan(struct mtd_info
*mtd
)
1631 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1632 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1633 struct pxa3xx_nand_info
*info
= host
->info_data
;
1634 struct platform_device
*pdev
= info
->pdev
;
1635 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1637 uint16_t ecc_strength
, ecc_step
;
1639 if (pdata
->keep_config
) {
1640 pxa3xx_nand_detect_config(info
);
1642 ret
= pxa3xx_nand_config_ident(info
);
1647 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1648 chip
->options
|= NAND_BUSWIDTH_16
;
1650 /* Device detection must be done with ECC disabled */
1651 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
1652 nand_writel(info
, NDECCCTRL
, 0x0);
1654 if (nand_scan_ident(mtd
, 1, NULL
))
1657 if (!pdata
->keep_config
) {
1658 ret
= pxa3xx_nand_init(host
);
1660 dev_err(&info
->pdev
->dev
, "Failed to init nand: %d\n",
1666 if (pdata
->flash_bbt
) {
1668 * We'll use a bad block table stored in-flash and don't
1669 * allow writing the bad block marker to the flash.
1671 chip
->bbt_options
|= NAND_BBT_USE_FLASH
|
1672 NAND_BBT_NO_OOB_BBM
;
1673 chip
->bbt_td
= &bbt_main_descr
;
1674 chip
->bbt_md
= &bbt_mirror_descr
;
1678 * If the page size is bigger than the FIFO size, let's check
1679 * we are given the right variant and then switch to the extended
1680 * (aka splitted) command handling,
1682 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1683 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
) {
1684 chip
->cmdfunc
= nand_cmdfunc_extended
;
1686 dev_err(&info
->pdev
->dev
,
1687 "unsupported page size on this variant\n");
1692 if (pdata
->ecc_strength
&& pdata
->ecc_step_size
) {
1693 ecc_strength
= pdata
->ecc_strength
;
1694 ecc_step
= pdata
->ecc_step_size
;
1696 ecc_strength
= chip
->ecc_strength_ds
;
1697 ecc_step
= chip
->ecc_step_ds
;
1700 /* Set default ECC strength requirements on non-ONFI devices */
1701 if (ecc_strength
< 1 && ecc_step
< 1) {
1706 ret
= pxa_ecc_init(info
, &chip
->ecc
, ecc_strength
,
1707 ecc_step
, mtd
->writesize
);
1711 /* calculate addressing information */
1712 if (mtd
->writesize
>= 2048)
1713 host
->col_addr_cycles
= 2;
1715 host
->col_addr_cycles
= 1;
1717 /* release the initial buffer */
1718 kfree(info
->data_buff
);
1720 /* allocate the real data + oob buffer */
1721 info
->buf_size
= mtd
->writesize
+ mtd
->oobsize
;
1722 ret
= pxa3xx_nand_init_buff(info
);
1725 info
->oob_buff
= info
->data_buff
+ mtd
->writesize
;
1727 if ((mtd
->size
>> chip
->page_shift
) > 65536)
1728 host
->row_addr_cycles
= 3;
1730 host
->row_addr_cycles
= 2;
1732 if (!pdata
->keep_config
)
1733 pxa3xx_nand_config_tail(info
);
1735 return nand_scan_tail(mtd
);
1738 static int alloc_nand_resource(struct platform_device
*pdev
)
1740 struct device_node
*np
= pdev
->dev
.of_node
;
1741 struct pxa3xx_nand_platform_data
*pdata
;
1742 struct pxa3xx_nand_info
*info
;
1743 struct pxa3xx_nand_host
*host
;
1744 struct nand_chip
*chip
= NULL
;
1745 struct mtd_info
*mtd
;
1749 pdata
= dev_get_platdata(&pdev
->dev
);
1750 if (pdata
->num_cs
<= 0)
1752 info
= devm_kzalloc(&pdev
->dev
,
1753 sizeof(*info
) + sizeof(*host
) * pdata
->num_cs
,
1759 info
->variant
= pxa3xx_nand_get_variant(pdev
);
1760 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1761 host
= (void *)&info
[1] + sizeof(*host
) * cs
;
1763 nand_set_controller_data(chip
, host
);
1764 mtd
= nand_to_mtd(chip
);
1765 info
->host
[cs
] = host
;
1767 host
->info_data
= info
;
1768 mtd
->dev
.parent
= &pdev
->dev
;
1769 /* FIXME: all chips use the same device tree partitions */
1770 nand_set_flash_node(chip
, np
);
1772 nand_set_controller_data(chip
, host
);
1773 chip
->ecc
.read_page
= pxa3xx_nand_read_page_hwecc
;
1774 chip
->ecc
.write_page
= pxa3xx_nand_write_page_hwecc
;
1775 chip
->controller
= &info
->controller
;
1776 chip
->waitfunc
= pxa3xx_nand_waitfunc
;
1777 chip
->select_chip
= pxa3xx_nand_select_chip
;
1778 chip
->read_word
= pxa3xx_nand_read_word
;
1779 chip
->read_byte
= pxa3xx_nand_read_byte
;
1780 chip
->read_buf
= pxa3xx_nand_read_buf
;
1781 chip
->write_buf
= pxa3xx_nand_write_buf
;
1782 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
1783 chip
->cmdfunc
= nand_cmdfunc
;
1786 spin_lock_init(&chip
->controller
->lock
);
1787 init_waitqueue_head(&chip
->controller
->wq
);
1788 info
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1789 if (IS_ERR(info
->clk
)) {
1790 dev_err(&pdev
->dev
, "failed to get nand clock\n");
1791 return PTR_ERR(info
->clk
);
1793 ret
= clk_prepare_enable(info
->clk
);
1797 if (!np
&& use_dma
) {
1798 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1801 "no resource defined for data DMA\n");
1803 goto fail_disable_clk
;
1805 info
->drcmr_dat
= r
->start
;
1808 irq
= platform_get_irq(pdev
, 0);
1810 dev_err(&pdev
->dev
, "no IRQ resource defined\n");
1812 goto fail_disable_clk
;
1815 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1816 info
->mmio_base
= devm_ioremap_resource(&pdev
->dev
, r
);
1817 if (IS_ERR(info
->mmio_base
)) {
1818 ret
= PTR_ERR(info
->mmio_base
);
1819 goto fail_disable_clk
;
1821 info
->mmio_phys
= r
->start
;
1823 /* Allocate a buffer to allow flash detection */
1824 info
->buf_size
= INIT_BUFFER_SIZE
;
1825 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1826 if (info
->data_buff
== NULL
) {
1828 goto fail_disable_clk
;
1831 /* initialize all interrupts to be disabled */
1832 disable_int(info
, NDSR_MASK
);
1834 ret
= request_threaded_irq(irq
, pxa3xx_nand_irq
,
1835 pxa3xx_nand_irq_thread
, IRQF_ONESHOT
,
1838 dev_err(&pdev
->dev
, "failed to request IRQ\n");
1842 platform_set_drvdata(pdev
, info
);
1847 free_irq(irq
, info
);
1848 kfree(info
->data_buff
);
1850 clk_disable_unprepare(info
->clk
);
1854 static int pxa3xx_nand_remove(struct platform_device
*pdev
)
1856 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1857 struct pxa3xx_nand_platform_data
*pdata
;
1863 pdata
= dev_get_platdata(&pdev
->dev
);
1865 irq
= platform_get_irq(pdev
, 0);
1867 free_irq(irq
, info
);
1868 pxa3xx_nand_free_buff(info
);
1871 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1872 * In order to prevent a lockup of the system bus, the DFI bus
1873 * arbitration is granted to SMC upon driver removal. This is done by
1874 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1875 * access to the bus anymore.
1877 nand_writel(info
, NDCR
,
1878 (nand_readl(info
, NDCR
) & ~NDCR_ND_ARB_EN
) |
1879 NFCV1_NDCR_ARB_CNTL
);
1880 clk_disable_unprepare(info
->clk
);
1882 for (cs
= 0; cs
< pdata
->num_cs
; cs
++)
1883 nand_release(nand_to_mtd(&info
->host
[cs
]->chip
));
1887 static int pxa3xx_nand_probe_dt(struct platform_device
*pdev
)
1889 struct pxa3xx_nand_platform_data
*pdata
;
1890 struct device_node
*np
= pdev
->dev
.of_node
;
1891 const struct of_device_id
*of_id
=
1892 of_match_device(pxa3xx_nand_dt_ids
, &pdev
->dev
);
1897 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1901 if (of_get_property(np
, "marvell,nand-enable-arbiter", NULL
))
1902 pdata
->enable_arbiter
= 1;
1903 if (of_get_property(np
, "marvell,nand-keep-config", NULL
))
1904 pdata
->keep_config
= 1;
1905 of_property_read_u32(np
, "num-cs", &pdata
->num_cs
);
1906 pdata
->flash_bbt
= of_get_nand_on_flash_bbt(np
);
1908 pdata
->ecc_strength
= of_get_nand_ecc_strength(np
);
1909 if (pdata
->ecc_strength
< 0)
1910 pdata
->ecc_strength
= 0;
1912 pdata
->ecc_step_size
= of_get_nand_ecc_step_size(np
);
1913 if (pdata
->ecc_step_size
< 0)
1914 pdata
->ecc_step_size
= 0;
1916 pdev
->dev
.platform_data
= pdata
;
1921 static int pxa3xx_nand_probe(struct platform_device
*pdev
)
1923 struct pxa3xx_nand_platform_data
*pdata
;
1924 struct pxa3xx_nand_info
*info
;
1925 int ret
, cs
, probe_success
, dma_available
;
1927 dma_available
= IS_ENABLED(CONFIG_ARM
) &&
1928 (IS_ENABLED(CONFIG_ARCH_PXA
) || IS_ENABLED(CONFIG_ARCH_MMP
));
1929 if (use_dma
&& !dma_available
) {
1931 dev_warn(&pdev
->dev
,
1932 "This platform can't do DMA on this device\n");
1935 ret
= pxa3xx_nand_probe_dt(pdev
);
1939 pdata
= dev_get_platdata(&pdev
->dev
);
1941 dev_err(&pdev
->dev
, "no platform data defined\n");
1945 ret
= alloc_nand_resource(pdev
);
1947 dev_err(&pdev
->dev
, "alloc nand resource failed\n");
1951 info
= platform_get_drvdata(pdev
);
1953 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1954 struct mtd_info
*mtd
= nand_to_mtd(&info
->host
[cs
]->chip
);
1957 * The mtd name matches the one used in 'mtdparts' kernel
1958 * parameter. This name cannot be changed or otherwise
1959 * user's mtd partitions configuration would get broken.
1961 mtd
->name
= "pxa3xx_nand-0";
1963 ret
= pxa3xx_nand_scan(mtd
);
1965 dev_warn(&pdev
->dev
, "failed to scan nand at cs %d\n",
1970 ret
= mtd_device_register(mtd
, pdata
->parts
[cs
],
1971 pdata
->nr_parts
[cs
]);
1976 if (!probe_success
) {
1977 pxa3xx_nand_remove(pdev
);
1985 static int pxa3xx_nand_suspend(struct device
*dev
)
1987 struct pxa3xx_nand_info
*info
= dev_get_drvdata(dev
);
1990 dev_err(dev
, "driver busy, state = %d\n", info
->state
);
1994 clk_disable(info
->clk
);
1998 static int pxa3xx_nand_resume(struct device
*dev
)
2000 struct pxa3xx_nand_info
*info
= dev_get_drvdata(dev
);
2003 ret
= clk_enable(info
->clk
);
2007 /* We don't want to handle interrupt without calling mtd routine */
2008 disable_int(info
, NDCR_INT_MASK
);
2011 * Directly set the chip select to a invalid value,
2012 * then the driver would reset the timing according
2013 * to current chip select at the beginning of cmdfunc
2018 * As the spec says, the NDSR would be updated to 0x1800 when
2019 * doing the nand_clk disable/enable.
2020 * To prevent it damaging state machine of the driver, clear
2021 * all status before resume
2023 nand_writel(info
, NDSR
, NDSR_MASK
);
2028 #define pxa3xx_nand_suspend NULL
2029 #define pxa3xx_nand_resume NULL
2032 static const struct dev_pm_ops pxa3xx_nand_pm_ops
= {
2033 .suspend
= pxa3xx_nand_suspend
,
2034 .resume
= pxa3xx_nand_resume
,
2037 static struct platform_driver pxa3xx_nand_driver
= {
2039 .name
= "pxa3xx-nand",
2040 .of_match_table
= pxa3xx_nand_dt_ids
,
2041 .pm
= &pxa3xx_nand_pm_ops
,
2043 .probe
= pxa3xx_nand_probe
,
2044 .remove
= pxa3xx_nand_remove
,
2047 module_platform_driver(pxa3xx_nand_driver
);
2049 MODULE_LICENSE("GPL");
2050 MODULE_DESCRIPTION("PXA3xx NAND controller driver");