2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/nand.h>
25 #include <linux/mtd/partitions.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
34 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
38 #include <linux/platform_data/mtd-nand-pxa3xx.h>
40 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
41 #define NAND_STOP_DELAY msecs_to_jiffies(40)
42 #define PAGE_CHUNK_SIZE (2048)
45 * Define a buffer size for the initial command that detects the flash device:
46 * STATUS, READID and PARAM.
47 * ONFI param page is 256 bytes, and there are three redundant copies
48 * to be read. JEDEC param page is 512 bytes, and there are also three
49 * redundant copies to be read.
50 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
52 #define INIT_BUFFER_SIZE 2048
54 /* registers and bit definitions */
55 #define NDCR (0x00) /* Control register */
56 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
57 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
58 #define NDSR (0x14) /* Status Register */
59 #define NDPCR (0x18) /* Page Count Register */
60 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
61 #define NDBDR1 (0x20) /* Bad Block Register 1 */
62 #define NDECCCTRL (0x28) /* ECC control */
63 #define NDDB (0x40) /* Data Buffer */
64 #define NDCB0 (0x48) /* Command Buffer0 */
65 #define NDCB1 (0x4C) /* Command Buffer1 */
66 #define NDCB2 (0x50) /* Command Buffer2 */
68 #define NDCR_SPARE_EN (0x1 << 31)
69 #define NDCR_ECC_EN (0x1 << 30)
70 #define NDCR_DMA_EN (0x1 << 29)
71 #define NDCR_ND_RUN (0x1 << 28)
72 #define NDCR_DWIDTH_C (0x1 << 27)
73 #define NDCR_DWIDTH_M (0x1 << 26)
74 #define NDCR_PAGE_SZ (0x1 << 24)
75 #define NDCR_NCSX (0x1 << 23)
76 #define NDCR_ND_MODE (0x3 << 21)
77 #define NDCR_NAND_MODE (0x0)
78 #define NDCR_CLR_PG_CNT (0x1 << 20)
79 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
80 #define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
81 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
82 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
84 #define NDCR_RA_START (0x1 << 15)
85 #define NDCR_PG_PER_BLK (0x1 << 14)
86 #define NDCR_ND_ARB_EN (0x1 << 12)
87 #define NDCR_INT_MASK (0xFFF)
89 #define NDSR_MASK (0xfff)
90 #define NDSR_ERR_CNT_OFF (16)
91 #define NDSR_ERR_CNT_MASK (0x1f)
92 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
93 #define NDSR_RDY (0x1 << 12)
94 #define NDSR_FLASH_RDY (0x1 << 11)
95 #define NDSR_CS0_PAGED (0x1 << 10)
96 #define NDSR_CS1_PAGED (0x1 << 9)
97 #define NDSR_CS0_CMDD (0x1 << 8)
98 #define NDSR_CS1_CMDD (0x1 << 7)
99 #define NDSR_CS0_BBD (0x1 << 6)
100 #define NDSR_CS1_BBD (0x1 << 5)
101 #define NDSR_UNCORERR (0x1 << 4)
102 #define NDSR_CORERR (0x1 << 3)
103 #define NDSR_WRDREQ (0x1 << 2)
104 #define NDSR_RDDREQ (0x1 << 1)
105 #define NDSR_WRCMDREQ (0x1)
107 #define NDCB0_LEN_OVRD (0x1 << 28)
108 #define NDCB0_ST_ROW_EN (0x1 << 26)
109 #define NDCB0_AUTO_RS (0x1 << 25)
110 #define NDCB0_CSEL (0x1 << 24)
111 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
112 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
113 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
114 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
115 #define NDCB0_NC (0x1 << 20)
116 #define NDCB0_DBC (0x1 << 19)
117 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
118 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
119 #define NDCB0_CMD2_MASK (0xff << 8)
120 #define NDCB0_CMD1_MASK (0xff)
121 #define NDCB0_ADDR_CYC_SHIFT (16)
123 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
124 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
125 #define EXT_CMD_TYPE_READ 4 /* Read */
126 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
127 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
128 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
129 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
132 * This should be large enough to read 'ONFI' and 'JEDEC'.
133 * Let's use 7 bytes, which is the maximum ID count supported
134 * by the controller (see NDCR_RD_ID_CNT_MASK).
136 #define READ_ID_BYTES 7
138 /* macros for registers read/write */
139 #define nand_writel(info, off, val) \
140 writel_relaxed((val), (info)->mmio_base + (off))
142 #define nand_readl(info, off) \
143 readl_relaxed((info)->mmio_base + (off))
145 /* error code and state */
168 enum pxa3xx_nand_variant
{
169 PXA3XX_NAND_VARIANT_PXA
,
170 PXA3XX_NAND_VARIANT_ARMADA370
,
173 struct pxa3xx_nand_host
{
174 struct nand_chip chip
;
175 struct mtd_info
*mtd
;
178 /* page size of attached chip */
182 /* calculated from pxa3xx_nand_flash data */
183 unsigned int col_addr_cycles
;
184 unsigned int row_addr_cycles
;
187 struct pxa3xx_nand_info
{
188 struct nand_hw_control controller
;
189 struct platform_device
*pdev
;
192 void __iomem
*mmio_base
;
193 unsigned long mmio_phys
;
194 struct completion cmd_complete
, dev_ready
;
196 unsigned int buf_start
;
197 unsigned int buf_count
;
198 unsigned int buf_size
;
199 unsigned int data_buff_pos
;
200 unsigned int oob_buff_pos
;
202 /* DMA information */
203 struct scatterlist sg
;
204 enum dma_data_direction dma_dir
;
205 struct dma_chan
*dma_chan
;
206 dma_cookie_t dma_cookie
;
210 unsigned char *data_buff
;
211 unsigned char *oob_buff
;
212 dma_addr_t data_buff_phys
;
215 struct pxa3xx_nand_host
*host
[NUM_CHIP_SELECT
];
219 * This driver supports NFCv1 (as found in PXA SoC)
220 * and NFCv2 (as found in Armada 370/XP SoC).
222 enum pxa3xx_nand_variant variant
;
225 int use_ecc
; /* use HW ECC ? */
226 int ecc_bch
; /* using BCH ECC? */
227 int use_dma
; /* use DMA ? */
228 int use_spare
; /* use spare ? */
231 unsigned int data_size
; /* data to be read from FIFO */
232 unsigned int chunk_size
; /* split commands chunk size */
233 unsigned int oob_size
;
234 unsigned int spare_size
;
235 unsigned int ecc_size
;
236 unsigned int ecc_err_cnt
;
237 unsigned int max_bitflips
;
240 /* cached register value */
245 /* generated NDCBx register values */
252 static bool use_dma
= 1;
253 module_param(use_dma
, bool, 0444);
254 MODULE_PARM_DESC(use_dma
, "enable DMA for data transferring to/from NAND HW");
256 struct pxa3xx_nand_timing
{
257 unsigned int tCH
; /* Enable signal hold time */
258 unsigned int tCS
; /* Enable signal setup time */
259 unsigned int tWH
; /* ND_nWE high duration */
260 unsigned int tWP
; /* ND_nWE pulse time */
261 unsigned int tRH
; /* ND_nRE high duration */
262 unsigned int tRP
; /* ND_nRE pulse width */
263 unsigned int tR
; /* ND_nWE high to ND_nRE low for read */
264 unsigned int tWHR
; /* ND_nWE high to ND_nRE low for status read */
265 unsigned int tAR
; /* ND_ALE low to ND_nRE low delay */
268 struct pxa3xx_nand_flash
{
270 unsigned int flash_width
; /* Width of Flash memory (DWIDTH_M) */
271 unsigned int dfc_width
; /* Width of flash controller(DWIDTH_C) */
272 struct pxa3xx_nand_timing
*timing
; /* NAND Flash timing */
275 static struct pxa3xx_nand_timing timing
[] = {
276 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
277 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
278 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
279 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
282 static struct pxa3xx_nand_flash builtin_flash_types
[] = {
283 { 0x46ec, 16, 16, &timing
[1] },
284 { 0xdaec, 8, 8, &timing
[1] },
285 { 0xd7ec, 8, 8, &timing
[1] },
286 { 0xa12c, 8, 8, &timing
[2] },
287 { 0xb12c, 16, 16, &timing
[2] },
288 { 0xdc2c, 8, 8, &timing
[2] },
289 { 0xcc2c, 16, 16, &timing
[2] },
290 { 0xba20, 16, 16, &timing
[3] },
293 static u8 bbt_pattern
[] = {'M', 'V', 'B', 'b', 't', '0' };
294 static u8 bbt_mirror_pattern
[] = {'1', 't', 'b', 'B', 'V', 'M' };
296 static struct nand_bbt_descr bbt_main_descr
= {
297 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
298 | NAND_BBT_2BIT
| NAND_BBT_VERSION
,
302 .maxblocks
= 8, /* Last 8 blocks in each chip */
303 .pattern
= bbt_pattern
306 static struct nand_bbt_descr bbt_mirror_descr
= {
307 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
308 | NAND_BBT_2BIT
| NAND_BBT_VERSION
,
312 .maxblocks
= 8, /* Last 8 blocks in each chip */
313 .pattern
= bbt_mirror_pattern
316 static struct nand_ecclayout ecc_layout_2KB_bch4bit
= {
319 32, 33, 34, 35, 36, 37, 38, 39,
320 40, 41, 42, 43, 44, 45, 46, 47,
321 48, 49, 50, 51, 52, 53, 54, 55,
322 56, 57, 58, 59, 60, 61, 62, 63},
323 .oobfree
= { {2, 30} }
326 static struct nand_ecclayout ecc_layout_4KB_bch4bit
= {
329 32, 33, 34, 35, 36, 37, 38, 39,
330 40, 41, 42, 43, 44, 45, 46, 47,
331 48, 49, 50, 51, 52, 53, 54, 55,
332 56, 57, 58, 59, 60, 61, 62, 63,
333 96, 97, 98, 99, 100, 101, 102, 103,
334 104, 105, 106, 107, 108, 109, 110, 111,
335 112, 113, 114, 115, 116, 117, 118, 119,
336 120, 121, 122, 123, 124, 125, 126, 127},
337 /* Bootrom looks in bytes 0 & 5 for bad blocks */
338 .oobfree
= { {6, 26}, { 64, 32} }
341 static struct nand_ecclayout ecc_layout_4KB_bch8bit
= {
344 32, 33, 34, 35, 36, 37, 38, 39,
345 40, 41, 42, 43, 44, 45, 46, 47,
346 48, 49, 50, 51, 52, 53, 54, 55,
347 56, 57, 58, 59, 60, 61, 62, 63},
351 #define NDTR0_tCH(c) (min((c), 7) << 19)
352 #define NDTR0_tCS(c) (min((c), 7) << 16)
353 #define NDTR0_tWH(c) (min((c), 7) << 11)
354 #define NDTR0_tWP(c) (min((c), 7) << 8)
355 #define NDTR0_tRH(c) (min((c), 7) << 3)
356 #define NDTR0_tRP(c) (min((c), 7) << 0)
358 #define NDTR1_tR(c) (min((c), 65535) << 16)
359 #define NDTR1_tWHR(c) (min((c), 15) << 4)
360 #define NDTR1_tAR(c) (min((c), 15) << 0)
362 /* convert nano-seconds to nand flash controller clock cycles */
363 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
365 static const struct of_device_id pxa3xx_nand_dt_ids
[] = {
367 .compatible
= "marvell,pxa3xx-nand",
368 .data
= (void *)PXA3XX_NAND_VARIANT_PXA
,
371 .compatible
= "marvell,armada370-nand",
372 .data
= (void *)PXA3XX_NAND_VARIANT_ARMADA370
,
376 MODULE_DEVICE_TABLE(of
, pxa3xx_nand_dt_ids
);
378 static enum pxa3xx_nand_variant
379 pxa3xx_nand_get_variant(struct platform_device
*pdev
)
381 const struct of_device_id
*of_id
=
382 of_match_device(pxa3xx_nand_dt_ids
, &pdev
->dev
);
384 return PXA3XX_NAND_VARIANT_PXA
;
385 return (enum pxa3xx_nand_variant
)of_id
->data
;
388 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host
*host
,
389 const struct pxa3xx_nand_timing
*t
)
391 struct pxa3xx_nand_info
*info
= host
->info_data
;
392 unsigned long nand_clk
= clk_get_rate(info
->clk
);
393 uint32_t ndtr0
, ndtr1
;
395 ndtr0
= NDTR0_tCH(ns2cycle(t
->tCH
, nand_clk
)) |
396 NDTR0_tCS(ns2cycle(t
->tCS
, nand_clk
)) |
397 NDTR0_tWH(ns2cycle(t
->tWH
, nand_clk
)) |
398 NDTR0_tWP(ns2cycle(t
->tWP
, nand_clk
)) |
399 NDTR0_tRH(ns2cycle(t
->tRH
, nand_clk
)) |
400 NDTR0_tRP(ns2cycle(t
->tRP
, nand_clk
));
402 ndtr1
= NDTR1_tR(ns2cycle(t
->tR
, nand_clk
)) |
403 NDTR1_tWHR(ns2cycle(t
->tWHR
, nand_clk
)) |
404 NDTR1_tAR(ns2cycle(t
->tAR
, nand_clk
));
406 info
->ndtr0cs0
= ndtr0
;
407 info
->ndtr1cs0
= ndtr1
;
408 nand_writel(info
, NDTR0CS0
, ndtr0
);
409 nand_writel(info
, NDTR1CS0
, ndtr1
);
412 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host
*host
,
413 const struct nand_sdr_timings
*t
)
415 struct pxa3xx_nand_info
*info
= host
->info_data
;
416 struct nand_chip
*chip
= &host
->chip
;
417 unsigned long nand_clk
= clk_get_rate(info
->clk
);
418 uint32_t ndtr0
, ndtr1
;
420 u32 tCH_min
= DIV_ROUND_UP(t
->tCH_min
, 1000);
421 u32 tCS_min
= DIV_ROUND_UP(t
->tCS_min
, 1000);
422 u32 tWH_min
= DIV_ROUND_UP(t
->tWH_min
, 1000);
423 u32 tWP_min
= DIV_ROUND_UP(t
->tWC_min
- t
->tWH_min
, 1000);
424 u32 tREH_min
= DIV_ROUND_UP(t
->tREH_min
, 1000);
425 u32 tRP_min
= DIV_ROUND_UP(t
->tRC_min
- t
->tREH_min
, 1000);
426 u32 tR
= chip
->chip_delay
* 1000;
427 u32 tWHR_min
= DIV_ROUND_UP(t
->tWHR_min
, 1000);
428 u32 tAR_min
= DIV_ROUND_UP(t
->tAR_min
, 1000);
430 /* fallback to a default value if tR = 0 */
434 ndtr0
= NDTR0_tCH(ns2cycle(tCH_min
, nand_clk
)) |
435 NDTR0_tCS(ns2cycle(tCS_min
, nand_clk
)) |
436 NDTR0_tWH(ns2cycle(tWH_min
, nand_clk
)) |
437 NDTR0_tWP(ns2cycle(tWP_min
, nand_clk
)) |
438 NDTR0_tRH(ns2cycle(tREH_min
, nand_clk
)) |
439 NDTR0_tRP(ns2cycle(tRP_min
, nand_clk
));
441 ndtr1
= NDTR1_tR(ns2cycle(tR
, nand_clk
)) |
442 NDTR1_tWHR(ns2cycle(tWHR_min
, nand_clk
)) |
443 NDTR1_tAR(ns2cycle(tAR_min
, nand_clk
));
445 info
->ndtr0cs0
= ndtr0
;
446 info
->ndtr1cs0
= ndtr1
;
447 nand_writel(info
, NDTR0CS0
, ndtr0
);
448 nand_writel(info
, NDTR1CS0
, ndtr1
);
451 static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host
*host
,
452 unsigned int *flash_width
,
453 unsigned int *dfc_width
)
455 struct nand_chip
*chip
= &host
->chip
;
456 struct pxa3xx_nand_info
*info
= host
->info_data
;
457 const struct pxa3xx_nand_flash
*f
= NULL
;
460 ntypes
= ARRAY_SIZE(builtin_flash_types
);
462 chip
->cmdfunc(host
->mtd
, NAND_CMD_READID
, 0x00, -1);
464 id
= chip
->read_byte(host
->mtd
);
465 id
|= chip
->read_byte(host
->mtd
) << 0x8;
467 for (i
= 0; i
< ntypes
; i
++) {
468 f
= &builtin_flash_types
[i
];
470 if (f
->chip_id
== id
)
475 dev_err(&info
->pdev
->dev
, "Error: timings not found\n");
479 pxa3xx_nand_set_timing(host
, f
->timing
);
481 *flash_width
= f
->flash_width
;
482 *dfc_width
= f
->dfc_width
;
487 static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host
*host
,
490 const struct nand_sdr_timings
*timings
;
492 mode
= fls(mode
) - 1;
496 timings
= onfi_async_timing_mode_to_sdr_timings(mode
);
498 return PTR_ERR(timings
);
500 pxa3xx_nand_set_sdr_timing(host
, timings
);
505 static int pxa3xx_nand_init(struct pxa3xx_nand_host
*host
)
507 struct nand_chip
*chip
= &host
->chip
;
508 struct pxa3xx_nand_info
*info
= host
->info_data
;
509 unsigned int flash_width
= 0, dfc_width
= 0;
512 mode
= onfi_get_async_timing_mode(chip
);
513 if (mode
== ONFI_TIMING_MODE_UNKNOWN
) {
514 err
= pxa3xx_nand_init_timings_compat(host
, &flash_width
,
519 if (flash_width
== 16) {
520 info
->reg_ndcr
|= NDCR_DWIDTH_M
;
521 chip
->options
|= NAND_BUSWIDTH_16
;
524 info
->reg_ndcr
|= (dfc_width
== 16) ? NDCR_DWIDTH_C
: 0;
526 err
= pxa3xx_nand_init_timings_onfi(host
, mode
);
535 * Set the data and OOB size, depending on the selected
536 * spare and ECC configuration.
537 * Only applicable to READ0, READOOB and PAGEPROG commands.
539 static void pxa3xx_set_datasize(struct pxa3xx_nand_info
*info
,
540 struct mtd_info
*mtd
)
542 int oob_enable
= info
->reg_ndcr
& NDCR_SPARE_EN
;
544 info
->data_size
= mtd
->writesize
;
548 info
->oob_size
= info
->spare_size
;
550 info
->oob_size
+= info
->ecc_size
;
554 * NOTE: it is a must to set ND_RUN firstly, then write
555 * command buffer, otherwise, it does not work.
556 * We enable all the interrupt at the same time, and
557 * let pxa3xx_nand_irq to handle all logic.
559 static void pxa3xx_nand_start(struct pxa3xx_nand_info
*info
)
563 ndcr
= info
->reg_ndcr
;
568 nand_writel(info
, NDECCCTRL
, 0x1);
570 ndcr
&= ~NDCR_ECC_EN
;
572 nand_writel(info
, NDECCCTRL
, 0x0);
578 ndcr
&= ~NDCR_DMA_EN
;
581 ndcr
|= NDCR_SPARE_EN
;
583 ndcr
&= ~NDCR_SPARE_EN
;
587 /* clear status bits and run */
588 nand_writel(info
, NDSR
, NDSR_MASK
);
589 nand_writel(info
, NDCR
, 0);
590 nand_writel(info
, NDCR
, ndcr
);
593 static void pxa3xx_nand_stop(struct pxa3xx_nand_info
*info
)
596 int timeout
= NAND_STOP_DELAY
;
598 /* wait RUN bit in NDCR become 0 */
599 ndcr
= nand_readl(info
, NDCR
);
600 while ((ndcr
& NDCR_ND_RUN
) && (timeout
-- > 0)) {
601 ndcr
= nand_readl(info
, NDCR
);
606 ndcr
&= ~NDCR_ND_RUN
;
607 nand_writel(info
, NDCR
, ndcr
);
610 dmaengine_terminate_all(info
->dma_chan
);
612 /* clear status bits */
613 nand_writel(info
, NDSR
, NDSR_MASK
);
616 static void __maybe_unused
617 enable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
621 ndcr
= nand_readl(info
, NDCR
);
622 nand_writel(info
, NDCR
, ndcr
& ~int_mask
);
625 static void disable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
629 ndcr
= nand_readl(info
, NDCR
);
630 nand_writel(info
, NDCR
, ndcr
| int_mask
);
633 static void drain_fifo(struct pxa3xx_nand_info
*info
, void *data
, int len
)
640 * According to the datasheet, when reading from NDDB
641 * with BCH enabled, after each 32 bytes reads, we
642 * have to make sure that the NDSR.RDDREQ bit is set.
644 * Drain the FIFO 8 32 bits reads at a time, and skip
645 * the polling on the last read.
648 ioread32_rep(info
->mmio_base
+ NDDB
, data
, 8);
650 ret
= readl_relaxed_poll_timeout(info
->mmio_base
+ NDSR
, val
,
651 val
& NDSR_RDDREQ
, 1000, 5000);
653 dev_err(&info
->pdev
->dev
,
654 "Timeout on RDDREQ while draining the FIFO\n");
663 ioread32_rep(info
->mmio_base
+ NDDB
, data
, len
);
666 static void handle_data_pio(struct pxa3xx_nand_info
*info
)
668 unsigned int do_bytes
= min(info
->data_size
, info
->chunk_size
);
670 switch (info
->state
) {
671 case STATE_PIO_WRITING
:
672 writesl(info
->mmio_base
+ NDDB
,
673 info
->data_buff
+ info
->data_buff_pos
,
674 DIV_ROUND_UP(do_bytes
, 4));
676 if (info
->oob_size
> 0)
677 writesl(info
->mmio_base
+ NDDB
,
678 info
->oob_buff
+ info
->oob_buff_pos
,
679 DIV_ROUND_UP(info
->oob_size
, 4));
681 case STATE_PIO_READING
:
683 info
->data_buff
+ info
->data_buff_pos
,
684 DIV_ROUND_UP(do_bytes
, 4));
686 if (info
->oob_size
> 0)
688 info
->oob_buff
+ info
->oob_buff_pos
,
689 DIV_ROUND_UP(info
->oob_size
, 4));
692 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
697 /* Update buffer pointers for multi-page read/write */
698 info
->data_buff_pos
+= do_bytes
;
699 info
->oob_buff_pos
+= info
->oob_size
;
700 info
->data_size
-= do_bytes
;
703 static void pxa3xx_nand_data_dma_irq(void *data
)
705 struct pxa3xx_nand_info
*info
= data
;
706 struct dma_tx_state state
;
707 enum dma_status status
;
709 status
= dmaengine_tx_status(info
->dma_chan
, info
->dma_cookie
, &state
);
710 if (likely(status
== DMA_COMPLETE
)) {
711 info
->state
= STATE_DMA_DONE
;
713 dev_err(&info
->pdev
->dev
, "DMA error on data channel\n");
714 info
->retcode
= ERR_DMABUSERR
;
716 dma_unmap_sg(info
->dma_chan
->device
->dev
, &info
->sg
, 1, info
->dma_dir
);
718 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
719 enable_int(info
, NDCR_INT_MASK
);
722 static void start_data_dma(struct pxa3xx_nand_info
*info
)
724 enum dma_transfer_direction direction
;
725 struct dma_async_tx_descriptor
*tx
;
727 switch (info
->state
) {
728 case STATE_DMA_WRITING
:
729 info
->dma_dir
= DMA_TO_DEVICE
;
730 direction
= DMA_MEM_TO_DEV
;
732 case STATE_DMA_READING
:
733 info
->dma_dir
= DMA_FROM_DEVICE
;
734 direction
= DMA_DEV_TO_MEM
;
737 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
741 info
->sg
.length
= info
->data_size
+
742 (info
->oob_size
? info
->spare_size
+ info
->ecc_size
: 0);
743 dma_map_sg(info
->dma_chan
->device
->dev
, &info
->sg
, 1, info
->dma_dir
);
745 tx
= dmaengine_prep_slave_sg(info
->dma_chan
, &info
->sg
, 1, direction
,
748 dev_err(&info
->pdev
->dev
, "prep_slave_sg() failed\n");
751 tx
->callback
= pxa3xx_nand_data_dma_irq
;
752 tx
->callback_param
= info
;
753 info
->dma_cookie
= dmaengine_submit(tx
);
754 dma_async_issue_pending(info
->dma_chan
);
755 dev_dbg(&info
->pdev
->dev
, "%s(dir=%d cookie=%x size=%u)\n",
756 __func__
, direction
, info
->dma_cookie
, info
->sg
.length
);
759 static irqreturn_t
pxa3xx_nand_irq_thread(int irq
, void *data
)
761 struct pxa3xx_nand_info
*info
= data
;
763 handle_data_pio(info
);
765 info
->state
= STATE_CMD_DONE
;
766 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
771 static irqreturn_t
pxa3xx_nand_irq(int irq
, void *devid
)
773 struct pxa3xx_nand_info
*info
= devid
;
774 unsigned int status
, is_completed
= 0, is_ready
= 0;
775 unsigned int ready
, cmd_done
;
776 irqreturn_t ret
= IRQ_HANDLED
;
779 ready
= NDSR_FLASH_RDY
;
780 cmd_done
= NDSR_CS0_CMDD
;
783 cmd_done
= NDSR_CS1_CMDD
;
786 status
= nand_readl(info
, NDSR
);
788 if (status
& NDSR_UNCORERR
)
789 info
->retcode
= ERR_UNCORERR
;
790 if (status
& NDSR_CORERR
) {
791 info
->retcode
= ERR_CORERR
;
792 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
&&
794 info
->ecc_err_cnt
= NDSR_ERR_CNT(status
);
796 info
->ecc_err_cnt
= 1;
799 * Each chunk composing a page is corrected independently,
800 * and we need to store maximum number of corrected bitflips
801 * to return it to the MTD layer in ecc.read_page().
803 info
->max_bitflips
= max_t(unsigned int,
807 if (status
& (NDSR_RDDREQ
| NDSR_WRDREQ
)) {
808 /* whether use dma to transfer data */
810 disable_int(info
, NDCR_INT_MASK
);
811 info
->state
= (status
& NDSR_RDDREQ
) ?
812 STATE_DMA_READING
: STATE_DMA_WRITING
;
813 start_data_dma(info
);
814 goto NORMAL_IRQ_EXIT
;
816 info
->state
= (status
& NDSR_RDDREQ
) ?
817 STATE_PIO_READING
: STATE_PIO_WRITING
;
818 ret
= IRQ_WAKE_THREAD
;
819 goto NORMAL_IRQ_EXIT
;
822 if (status
& cmd_done
) {
823 info
->state
= STATE_CMD_DONE
;
826 if (status
& ready
) {
827 info
->state
= STATE_READY
;
832 * Clear all status bit before issuing the next command, which
833 * can and will alter the status bits and will deserve a new
834 * interrupt on its own. This lets the controller exit the IRQ
836 nand_writel(info
, NDSR
, status
);
838 if (status
& NDSR_WRCMDREQ
) {
839 status
&= ~NDSR_WRCMDREQ
;
840 info
->state
= STATE_CMD_HANDLE
;
843 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
844 * must be loaded by writing directly either 12 or 16
845 * bytes directly to NDCB0, four bytes at a time.
847 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
848 * but each NDCBx register can be read.
850 nand_writel(info
, NDCB0
, info
->ndcb0
);
851 nand_writel(info
, NDCB0
, info
->ndcb1
);
852 nand_writel(info
, NDCB0
, info
->ndcb2
);
854 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
855 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
856 nand_writel(info
, NDCB0
, info
->ndcb3
);
860 complete(&info
->cmd_complete
);
862 complete(&info
->dev_ready
);
867 static inline int is_buf_blank(uint8_t *buf
, size_t len
)
869 for (; len
> 0; len
--)
875 static void set_command_address(struct pxa3xx_nand_info
*info
,
876 unsigned int page_size
, uint16_t column
, int page_addr
)
878 /* small page addr setting */
879 if (page_size
< PAGE_CHUNK_SIZE
) {
880 info
->ndcb1
= ((page_addr
& 0xFFFFFF) << 8)
885 info
->ndcb1
= ((page_addr
& 0xFFFF) << 16)
888 if (page_addr
& 0xFF0000)
889 info
->ndcb2
= (page_addr
& 0xFF0000) >> 16;
895 static void prepare_start_command(struct pxa3xx_nand_info
*info
, int command
)
897 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
898 struct mtd_info
*mtd
= host
->mtd
;
900 /* reset data and oob column point to handle data */
904 info
->data_buff_pos
= 0;
905 info
->oob_buff_pos
= 0;
908 info
->retcode
= ERR_NONE
;
909 info
->ecc_err_cnt
= 0;
915 case NAND_CMD_PAGEPROG
:
917 case NAND_CMD_READOOB
:
918 pxa3xx_set_datasize(info
, mtd
);
930 * If we are about to issue a read command, or about to set
931 * the write address, then clean the data buffer.
933 if (command
== NAND_CMD_READ0
||
934 command
== NAND_CMD_READOOB
||
935 command
== NAND_CMD_SEQIN
) {
937 info
->buf_count
= mtd
->writesize
+ mtd
->oobsize
;
938 memset(info
->data_buff
, 0xFF, info
->buf_count
);
943 static int prepare_set_command(struct pxa3xx_nand_info
*info
, int command
,
944 int ext_cmd_type
, uint16_t column
, int page_addr
)
946 int addr_cycle
, exec_cmd
;
947 struct pxa3xx_nand_host
*host
;
948 struct mtd_info
*mtd
;
950 host
= info
->host
[info
->cs
];
956 info
->ndcb0
= NDCB0_CSEL
;
960 if (command
== NAND_CMD_SEQIN
)
963 addr_cycle
= NDCB0_ADDR_CYC(host
->row_addr_cycles
964 + host
->col_addr_cycles
);
967 case NAND_CMD_READOOB
:
969 info
->buf_start
= column
;
970 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
974 if (command
== NAND_CMD_READOOB
)
975 info
->buf_start
+= mtd
->writesize
;
978 * Multiple page read needs an 'extended command type' field,
979 * which is either naked-read or last-read according to the
982 if (mtd
->writesize
== PAGE_CHUNK_SIZE
) {
983 info
->ndcb0
|= NDCB0_DBC
| (NAND_CMD_READSTART
<< 8);
984 } else if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
985 info
->ndcb0
|= NDCB0_DBC
| (NAND_CMD_READSTART
<< 8)
987 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
);
988 info
->ndcb3
= info
->chunk_size
+
992 set_command_address(info
, mtd
->writesize
, column
, page_addr
);
997 info
->buf_start
= column
;
998 set_command_address(info
, mtd
->writesize
, 0, page_addr
);
1001 * Multiple page programming needs to execute the initial
1002 * SEQIN command that sets the page address.
1004 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1005 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
1006 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
)
1009 /* No data transfer in this case */
1010 info
->data_size
= 0;
1015 case NAND_CMD_PAGEPROG
:
1016 if (is_buf_blank(info
->data_buff
,
1017 (mtd
->writesize
+ mtd
->oobsize
))) {
1022 /* Second command setting for large pages */
1023 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1025 * Multiple page write uses the 'extended command'
1026 * field. This can be used to issue a command dispatch
1027 * or a naked-write depending on the current stage.
1029 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
1031 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
);
1032 info
->ndcb3
= info
->chunk_size
+
1036 * This is the command dispatch that completes a chunked
1037 * page program operation.
1039 if (info
->data_size
== 0) {
1040 info
->ndcb0
= NDCB0_CMD_TYPE(0x1)
1041 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
)
1048 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
1052 | (NAND_CMD_PAGEPROG
<< 8)
1058 case NAND_CMD_PARAM
:
1059 info
->buf_count
= INIT_BUFFER_SIZE
;
1060 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
1064 info
->ndcb1
= (column
& 0xFF);
1065 info
->ndcb3
= INIT_BUFFER_SIZE
;
1066 info
->data_size
= INIT_BUFFER_SIZE
;
1069 case NAND_CMD_READID
:
1070 info
->buf_count
= READ_ID_BYTES
;
1071 info
->ndcb0
|= NDCB0_CMD_TYPE(3)
1074 info
->ndcb1
= (column
& 0xFF);
1076 info
->data_size
= 8;
1078 case NAND_CMD_STATUS
:
1079 info
->buf_count
= 1;
1080 info
->ndcb0
|= NDCB0_CMD_TYPE(4)
1084 info
->data_size
= 8;
1087 case NAND_CMD_ERASE1
:
1088 info
->ndcb0
|= NDCB0_CMD_TYPE(2)
1092 | (NAND_CMD_ERASE2
<< 8)
1094 info
->ndcb1
= page_addr
;
1098 case NAND_CMD_RESET
:
1099 info
->ndcb0
|= NDCB0_CMD_TYPE(5)
1104 case NAND_CMD_ERASE2
:
1110 dev_err(&info
->pdev
->dev
, "non-supported command %x\n",
1118 static void nand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
1119 int column
, int page_addr
)
1121 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1122 struct pxa3xx_nand_info
*info
= host
->info_data
;
1126 * if this is a x16 device ,then convert the input
1127 * "byte" address into a "word" address appropriate
1128 * for indexing a word-oriented device
1130 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1134 * There may be different NAND chip hooked to
1135 * different chip select, so check whether
1136 * chip select has been changed, if yes, reset the timing
1138 if (info
->cs
!= host
->cs
) {
1139 info
->cs
= host
->cs
;
1140 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
1141 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
1144 prepare_start_command(info
, command
);
1146 info
->state
= STATE_PREPARED
;
1147 exec_cmd
= prepare_set_command(info
, command
, 0, column
, page_addr
);
1150 init_completion(&info
->cmd_complete
);
1151 init_completion(&info
->dev_ready
);
1152 info
->need_wait
= 1;
1153 pxa3xx_nand_start(info
);
1155 if (!wait_for_completion_timeout(&info
->cmd_complete
,
1156 CHIP_DELAY_TIMEOUT
)) {
1157 dev_err(&info
->pdev
->dev
, "Wait time out!!!\n");
1158 /* Stop State Machine for next command cycle */
1159 pxa3xx_nand_stop(info
);
1162 info
->state
= STATE_IDLE
;
1165 static void nand_cmdfunc_extended(struct mtd_info
*mtd
,
1166 const unsigned command
,
1167 int column
, int page_addr
)
1169 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1170 struct pxa3xx_nand_info
*info
= host
->info_data
;
1171 int exec_cmd
, ext_cmd_type
;
1174 * if this is a x16 device then convert the input
1175 * "byte" address into a "word" address appropriate
1176 * for indexing a word-oriented device
1178 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1182 * There may be different NAND chip hooked to
1183 * different chip select, so check whether
1184 * chip select has been changed, if yes, reset the timing
1186 if (info
->cs
!= host
->cs
) {
1187 info
->cs
= host
->cs
;
1188 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
1189 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
1192 /* Select the extended command for the first command */
1194 case NAND_CMD_READ0
:
1195 case NAND_CMD_READOOB
:
1196 ext_cmd_type
= EXT_CMD_TYPE_MONO
;
1198 case NAND_CMD_SEQIN
:
1199 ext_cmd_type
= EXT_CMD_TYPE_DISPATCH
;
1201 case NAND_CMD_PAGEPROG
:
1202 ext_cmd_type
= EXT_CMD_TYPE_NAKED_RW
;
1209 prepare_start_command(info
, command
);
1212 * Prepare the "is ready" completion before starting a command
1213 * transaction sequence. If the command is not executed the
1214 * completion will be completed, see below.
1216 * We can do that inside the loop because the command variable
1217 * is invariant and thus so is the exec_cmd.
1219 info
->need_wait
= 1;
1220 init_completion(&info
->dev_ready
);
1222 info
->state
= STATE_PREPARED
;
1223 exec_cmd
= prepare_set_command(info
, command
, ext_cmd_type
,
1226 info
->need_wait
= 0;
1227 complete(&info
->dev_ready
);
1231 init_completion(&info
->cmd_complete
);
1232 pxa3xx_nand_start(info
);
1234 if (!wait_for_completion_timeout(&info
->cmd_complete
,
1235 CHIP_DELAY_TIMEOUT
)) {
1236 dev_err(&info
->pdev
->dev
, "Wait time out!!!\n");
1237 /* Stop State Machine for next command cycle */
1238 pxa3xx_nand_stop(info
);
1242 /* Check if the sequence is complete */
1243 if (info
->data_size
== 0 && command
!= NAND_CMD_PAGEPROG
)
1247 * After a splitted program command sequence has issued
1248 * the command dispatch, the command sequence is complete.
1250 if (info
->data_size
== 0 &&
1251 command
== NAND_CMD_PAGEPROG
&&
1252 ext_cmd_type
== EXT_CMD_TYPE_DISPATCH
)
1255 if (command
== NAND_CMD_READ0
|| command
== NAND_CMD_READOOB
) {
1256 /* Last read: issue a 'last naked read' */
1257 if (info
->data_size
== info
->chunk_size
)
1258 ext_cmd_type
= EXT_CMD_TYPE_LAST_RW
;
1260 ext_cmd_type
= EXT_CMD_TYPE_NAKED_RW
;
1263 * If a splitted program command has no more data to transfer,
1264 * the command dispatch must be issued to complete.
1266 } else if (command
== NAND_CMD_PAGEPROG
&&
1267 info
->data_size
== 0) {
1268 ext_cmd_type
= EXT_CMD_TYPE_DISPATCH
;
1272 info
->state
= STATE_IDLE
;
1275 static int pxa3xx_nand_write_page_hwecc(struct mtd_info
*mtd
,
1276 struct nand_chip
*chip
, const uint8_t *buf
, int oob_required
,
1279 chip
->write_buf(mtd
, buf
, mtd
->writesize
);
1280 chip
->write_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1285 static int pxa3xx_nand_read_page_hwecc(struct mtd_info
*mtd
,
1286 struct nand_chip
*chip
, uint8_t *buf
, int oob_required
,
1289 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1290 struct pxa3xx_nand_info
*info
= host
->info_data
;
1292 chip
->read_buf(mtd
, buf
, mtd
->writesize
);
1293 chip
->read_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1295 if (info
->retcode
== ERR_CORERR
&& info
->use_ecc
) {
1296 mtd
->ecc_stats
.corrected
+= info
->ecc_err_cnt
;
1298 } else if (info
->retcode
== ERR_UNCORERR
) {
1300 * for blank page (all 0xff), HW will calculate its ECC as
1301 * 0, which is different from the ECC information within
1302 * OOB, ignore such uncorrectable errors
1304 if (is_buf_blank(buf
, mtd
->writesize
))
1305 info
->retcode
= ERR_NONE
;
1307 mtd
->ecc_stats
.failed
++;
1310 return info
->max_bitflips
;
1313 static uint8_t pxa3xx_nand_read_byte(struct mtd_info
*mtd
)
1315 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1316 struct pxa3xx_nand_info
*info
= host
->info_data
;
1319 if (info
->buf_start
< info
->buf_count
)
1320 /* Has just send a new command? */
1321 retval
= info
->data_buff
[info
->buf_start
++];
1326 static u16
pxa3xx_nand_read_word(struct mtd_info
*mtd
)
1328 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1329 struct pxa3xx_nand_info
*info
= host
->info_data
;
1330 u16 retval
= 0xFFFF;
1332 if (!(info
->buf_start
& 0x01) && info
->buf_start
< info
->buf_count
) {
1333 retval
= *((u16
*)(info
->data_buff
+info
->buf_start
));
1334 info
->buf_start
+= 2;
1339 static void pxa3xx_nand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
1341 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1342 struct pxa3xx_nand_info
*info
= host
->info_data
;
1343 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
1345 memcpy(buf
, info
->data_buff
+ info
->buf_start
, real_len
);
1346 info
->buf_start
+= real_len
;
1349 static void pxa3xx_nand_write_buf(struct mtd_info
*mtd
,
1350 const uint8_t *buf
, int len
)
1352 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1353 struct pxa3xx_nand_info
*info
= host
->info_data
;
1354 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
1356 memcpy(info
->data_buff
+ info
->buf_start
, buf
, real_len
);
1357 info
->buf_start
+= real_len
;
1360 static void pxa3xx_nand_select_chip(struct mtd_info
*mtd
, int chip
)
1365 static int pxa3xx_nand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
1367 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1368 struct pxa3xx_nand_info
*info
= host
->info_data
;
1370 if (info
->need_wait
) {
1371 info
->need_wait
= 0;
1372 if (!wait_for_completion_timeout(&info
->dev_ready
,
1373 CHIP_DELAY_TIMEOUT
)) {
1374 dev_err(&info
->pdev
->dev
, "Ready time out!!!\n");
1375 return NAND_STATUS_FAIL
;
1379 /* pxa3xx_nand_send_command has waited for command complete */
1380 if (this->state
== FL_WRITING
|| this->state
== FL_ERASING
) {
1381 if (info
->retcode
== ERR_NONE
)
1384 return NAND_STATUS_FAIL
;
1387 return NAND_STATUS_READY
;
1390 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info
*info
)
1392 struct platform_device
*pdev
= info
->pdev
;
1393 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1394 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
1395 struct mtd_info
*mtd
= host
->mtd
;
1396 struct nand_chip
*chip
= mtd
->priv
;
1398 /* configure default flash values */
1399 info
->reg_ndcr
= 0x0; /* enable all interrupts */
1400 info
->reg_ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
1401 info
->reg_ndcr
|= NDCR_RD_ID_CNT(READ_ID_BYTES
);
1402 info
->reg_ndcr
|= NDCR_SPARE_EN
; /* enable spare by default */
1403 info
->reg_ndcr
|= (host
->col_addr_cycles
== 2) ? NDCR_RA_START
: 0;
1404 info
->reg_ndcr
|= (chip
->page_shift
== 6) ? NDCR_PG_PER_BLK
: 0;
1405 info
->reg_ndcr
|= (mtd
->writesize
== 2048) ? NDCR_PAGE_SZ
: 0;
1410 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info
*info
)
1412 uint32_t ndcr
= nand_readl(info
, NDCR
);
1414 /* Set an initial chunk size */
1415 info
->chunk_size
= ndcr
& NDCR_PAGE_SZ
? 2048 : 512;
1416 info
->reg_ndcr
= ndcr
&
1417 ~(NDCR_INT_MASK
| NDCR_ND_ARB_EN
| NFCV1_NDCR_ARB_CNTL
);
1418 info
->ndtr0cs0
= nand_readl(info
, NDTR0CS0
);
1419 info
->ndtr1cs0
= nand_readl(info
, NDTR1CS0
);
1423 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info
*info
)
1425 struct platform_device
*pdev
= info
->pdev
;
1426 struct dma_slave_config config
;
1427 dma_cap_mask_t mask
;
1428 struct pxad_param param
;
1431 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1432 if (info
->data_buff
== NULL
)
1437 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
1441 sg_init_one(&info
->sg
, info
->data_buff
, info
->buf_size
);
1443 dma_cap_set(DMA_SLAVE
, mask
);
1444 param
.prio
= PXAD_PRIO_LOWEST
;
1445 param
.drcmr
= info
->drcmr_dat
;
1446 info
->dma_chan
= dma_request_slave_channel_compat(mask
, pxad_filter_fn
,
1449 if (!info
->dma_chan
) {
1450 dev_err(&pdev
->dev
, "unable to request data dma channel\n");
1454 memset(&config
, 0, sizeof(config
));
1455 config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1456 config
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1457 config
.src_addr
= info
->mmio_phys
+ NDDB
;
1458 config
.dst_addr
= info
->mmio_phys
+ NDDB
;
1459 config
.src_maxburst
= 32;
1460 config
.dst_maxburst
= 32;
1461 ret
= dmaengine_slave_config(info
->dma_chan
, &config
);
1463 dev_err(&info
->pdev
->dev
,
1464 "dma channel configuration failed: %d\n",
1470 * Now that DMA buffers are allocated we turn on
1471 * DMA proper for I/O operations.
1477 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info
*info
)
1479 if (info
->use_dma
) {
1480 dmaengine_terminate_all(info
->dma_chan
);
1481 dma_release_channel(info
->dma_chan
);
1483 kfree(info
->data_buff
);
1486 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host
*host
)
1488 struct pxa3xx_nand_info
*info
= host
->info_data
;
1489 struct mtd_info
*mtd
;
1490 struct nand_chip
*chip
;
1491 const struct nand_sdr_timings
*timings
;
1494 mtd
= info
->host
[info
->cs
]->mtd
;
1497 /* use the common timing to make a try */
1498 timings
= onfi_async_timing_mode_to_sdr_timings(0);
1499 if (IS_ERR(timings
))
1500 return PTR_ERR(timings
);
1502 pxa3xx_nand_set_sdr_timing(host
, timings
);
1504 chip
->cmdfunc(mtd
, NAND_CMD_RESET
, 0, 0);
1505 ret
= chip
->waitfunc(mtd
, chip
);
1506 if (ret
& NAND_STATUS_FAIL
)
1512 static int pxa_ecc_init(struct pxa3xx_nand_info
*info
,
1513 struct nand_ecc_ctrl
*ecc
,
1514 int strength
, int ecc_stepsize
, int page_size
)
1516 if (strength
== 1 && ecc_stepsize
== 512 && page_size
== 2048) {
1517 info
->chunk_size
= 2048;
1518 info
->spare_size
= 40;
1519 info
->ecc_size
= 24;
1520 ecc
->mode
= NAND_ECC_HW
;
1524 } else if (strength
== 1 && ecc_stepsize
== 512 && page_size
== 512) {
1525 info
->chunk_size
= 512;
1526 info
->spare_size
= 8;
1528 ecc
->mode
= NAND_ECC_HW
;
1533 * Required ECC: 4-bit correction per 512 bytes
1534 * Select: 16-bit correction per 2048 bytes
1536 } else if (strength
== 4 && ecc_stepsize
== 512 && page_size
== 2048) {
1538 info
->chunk_size
= 2048;
1539 info
->spare_size
= 32;
1540 info
->ecc_size
= 32;
1541 ecc
->mode
= NAND_ECC_HW
;
1542 ecc
->size
= info
->chunk_size
;
1543 ecc
->layout
= &ecc_layout_2KB_bch4bit
;
1546 } else if (strength
== 4 && ecc_stepsize
== 512 && page_size
== 4096) {
1548 info
->chunk_size
= 2048;
1549 info
->spare_size
= 32;
1550 info
->ecc_size
= 32;
1551 ecc
->mode
= NAND_ECC_HW
;
1552 ecc
->size
= info
->chunk_size
;
1553 ecc
->layout
= &ecc_layout_4KB_bch4bit
;
1557 * Required ECC: 8-bit correction per 512 bytes
1558 * Select: 16-bit correction per 1024 bytes
1560 } else if (strength
== 8 && ecc_stepsize
== 512 && page_size
== 4096) {
1562 info
->chunk_size
= 1024;
1563 info
->spare_size
= 0;
1564 info
->ecc_size
= 32;
1565 ecc
->mode
= NAND_ECC_HW
;
1566 ecc
->size
= info
->chunk_size
;
1567 ecc
->layout
= &ecc_layout_4KB_bch8bit
;
1570 dev_err(&info
->pdev
->dev
,
1571 "ECC strength %d at page size %d is not supported\n",
1572 strength
, page_size
);
1576 dev_info(&info
->pdev
->dev
, "ECC strength %d, ECC step size %d\n",
1577 ecc
->strength
, ecc
->size
);
1581 static int pxa3xx_nand_scan(struct mtd_info
*mtd
)
1583 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1584 struct pxa3xx_nand_info
*info
= host
->info_data
;
1585 struct platform_device
*pdev
= info
->pdev
;
1586 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1587 struct nand_chip
*chip
= mtd
->priv
;
1589 uint16_t ecc_strength
, ecc_step
;
1591 if (pdata
->keep_config
&& !pxa3xx_nand_detect_config(info
))
1594 /* Set a default chunk size */
1595 info
->chunk_size
= 512;
1597 ret
= pxa3xx_nand_config_flash(info
);
1601 ret
= pxa3xx_nand_sensing(host
);
1603 dev_info(&info
->pdev
->dev
, "There is no chip on cs %d!\n",
1610 info
->reg_ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
1611 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1612 chip
->options
|= NAND_BUSWIDTH_16
;
1614 /* Device detection must be done with ECC disabled */
1615 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
1616 nand_writel(info
, NDECCCTRL
, 0x0);
1618 if (nand_scan_ident(mtd
, 1, NULL
))
1621 if (!pdata
->keep_config
) {
1622 ret
= pxa3xx_nand_init(host
);
1624 dev_err(&info
->pdev
->dev
, "Failed to init nand: %d\n",
1630 if (pdata
->flash_bbt
) {
1632 * We'll use a bad block table stored in-flash and don't
1633 * allow writing the bad block marker to the flash.
1635 chip
->bbt_options
|= NAND_BBT_USE_FLASH
|
1636 NAND_BBT_NO_OOB_BBM
;
1637 chip
->bbt_td
= &bbt_main_descr
;
1638 chip
->bbt_md
= &bbt_mirror_descr
;
1642 * If the page size is bigger than the FIFO size, let's check
1643 * we are given the right variant and then switch to the extended
1644 * (aka splitted) command handling,
1646 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1647 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
) {
1648 chip
->cmdfunc
= nand_cmdfunc_extended
;
1650 dev_err(&info
->pdev
->dev
,
1651 "unsupported page size on this variant\n");
1656 if (pdata
->ecc_strength
&& pdata
->ecc_step_size
) {
1657 ecc_strength
= pdata
->ecc_strength
;
1658 ecc_step
= pdata
->ecc_step_size
;
1660 ecc_strength
= chip
->ecc_strength_ds
;
1661 ecc_step
= chip
->ecc_step_ds
;
1664 /* Set default ECC strength requirements on non-ONFI devices */
1665 if (ecc_strength
< 1 && ecc_step
< 1) {
1670 ret
= pxa_ecc_init(info
, &chip
->ecc
, ecc_strength
,
1671 ecc_step
, mtd
->writesize
);
1675 /* calculate addressing information */
1676 if (mtd
->writesize
>= 2048)
1677 host
->col_addr_cycles
= 2;
1679 host
->col_addr_cycles
= 1;
1681 /* release the initial buffer */
1682 kfree(info
->data_buff
);
1684 /* allocate the real data + oob buffer */
1685 info
->buf_size
= mtd
->writesize
+ mtd
->oobsize
;
1686 ret
= pxa3xx_nand_init_buff(info
);
1689 info
->oob_buff
= info
->data_buff
+ mtd
->writesize
;
1691 if ((mtd
->size
>> chip
->page_shift
) > 65536)
1692 host
->row_addr_cycles
= 3;
1694 host
->row_addr_cycles
= 2;
1695 return nand_scan_tail(mtd
);
1698 static int alloc_nand_resource(struct platform_device
*pdev
)
1700 struct pxa3xx_nand_platform_data
*pdata
;
1701 struct pxa3xx_nand_info
*info
;
1702 struct pxa3xx_nand_host
*host
;
1703 struct nand_chip
*chip
= NULL
;
1704 struct mtd_info
*mtd
;
1708 pdata
= dev_get_platdata(&pdev
->dev
);
1709 if (pdata
->num_cs
<= 0)
1711 info
= devm_kzalloc(&pdev
->dev
, sizeof(*info
) + (sizeof(*mtd
) +
1712 sizeof(*host
)) * pdata
->num_cs
, GFP_KERNEL
);
1717 info
->variant
= pxa3xx_nand_get_variant(pdev
);
1718 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1719 mtd
= (void *)&info
[1] + (sizeof(*mtd
) + sizeof(*host
)) * cs
;
1720 chip
= (struct nand_chip
*)(&mtd
[1]);
1721 host
= (struct pxa3xx_nand_host
*)chip
;
1722 info
->host
[cs
] = host
;
1725 host
->info_data
= info
;
1727 mtd
->dev
.parent
= &pdev
->dev
;
1729 chip
->ecc
.read_page
= pxa3xx_nand_read_page_hwecc
;
1730 chip
->ecc
.write_page
= pxa3xx_nand_write_page_hwecc
;
1731 chip
->controller
= &info
->controller
;
1732 chip
->waitfunc
= pxa3xx_nand_waitfunc
;
1733 chip
->select_chip
= pxa3xx_nand_select_chip
;
1734 chip
->read_word
= pxa3xx_nand_read_word
;
1735 chip
->read_byte
= pxa3xx_nand_read_byte
;
1736 chip
->read_buf
= pxa3xx_nand_read_buf
;
1737 chip
->write_buf
= pxa3xx_nand_write_buf
;
1738 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
1739 chip
->cmdfunc
= nand_cmdfunc
;
1742 spin_lock_init(&chip
->controller
->lock
);
1743 init_waitqueue_head(&chip
->controller
->wq
);
1744 info
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1745 if (IS_ERR(info
->clk
)) {
1746 dev_err(&pdev
->dev
, "failed to get nand clock\n");
1747 return PTR_ERR(info
->clk
);
1749 ret
= clk_prepare_enable(info
->clk
);
1754 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1757 "no resource defined for data DMA\n");
1759 goto fail_disable_clk
;
1761 info
->drcmr_dat
= r
->start
;
1763 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
1766 "no resource defined for cmd DMA\n");
1768 goto fail_disable_clk
;
1770 info
->drcmr_cmd
= r
->start
;
1773 irq
= platform_get_irq(pdev
, 0);
1775 dev_err(&pdev
->dev
, "no IRQ resource defined\n");
1777 goto fail_disable_clk
;
1780 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1781 info
->mmio_base
= devm_ioremap_resource(&pdev
->dev
, r
);
1782 if (IS_ERR(info
->mmio_base
)) {
1783 ret
= PTR_ERR(info
->mmio_base
);
1784 goto fail_disable_clk
;
1786 info
->mmio_phys
= r
->start
;
1788 /* Allocate a buffer to allow flash detection */
1789 info
->buf_size
= INIT_BUFFER_SIZE
;
1790 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1791 if (info
->data_buff
== NULL
) {
1793 goto fail_disable_clk
;
1796 /* initialize all interrupts to be disabled */
1797 disable_int(info
, NDSR_MASK
);
1799 ret
= request_threaded_irq(irq
, pxa3xx_nand_irq
,
1800 pxa3xx_nand_irq_thread
, IRQF_ONESHOT
,
1803 dev_err(&pdev
->dev
, "failed to request IRQ\n");
1807 platform_set_drvdata(pdev
, info
);
1812 free_irq(irq
, info
);
1813 kfree(info
->data_buff
);
1815 clk_disable_unprepare(info
->clk
);
1819 static int pxa3xx_nand_remove(struct platform_device
*pdev
)
1821 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1822 struct pxa3xx_nand_platform_data
*pdata
;
1828 pdata
= dev_get_platdata(&pdev
->dev
);
1830 irq
= platform_get_irq(pdev
, 0);
1832 free_irq(irq
, info
);
1833 pxa3xx_nand_free_buff(info
);
1836 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1837 * In order to prevent a lockup of the system bus, the DFI bus
1838 * arbitration is granted to SMC upon driver removal. This is done by
1839 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1840 * access to the bus anymore.
1842 nand_writel(info
, NDCR
,
1843 (nand_readl(info
, NDCR
) & ~NDCR_ND_ARB_EN
) |
1844 NFCV1_NDCR_ARB_CNTL
);
1845 clk_disable_unprepare(info
->clk
);
1847 for (cs
= 0; cs
< pdata
->num_cs
; cs
++)
1848 nand_release(info
->host
[cs
]->mtd
);
1852 static int pxa3xx_nand_probe_dt(struct platform_device
*pdev
)
1854 struct pxa3xx_nand_platform_data
*pdata
;
1855 struct device_node
*np
= pdev
->dev
.of_node
;
1856 const struct of_device_id
*of_id
=
1857 of_match_device(pxa3xx_nand_dt_ids
, &pdev
->dev
);
1862 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1866 if (of_get_property(np
, "marvell,nand-enable-arbiter", NULL
))
1867 pdata
->enable_arbiter
= 1;
1868 if (of_get_property(np
, "marvell,nand-keep-config", NULL
))
1869 pdata
->keep_config
= 1;
1870 of_property_read_u32(np
, "num-cs", &pdata
->num_cs
);
1871 pdata
->flash_bbt
= of_get_nand_on_flash_bbt(np
);
1873 pdata
->ecc_strength
= of_get_nand_ecc_strength(np
);
1874 if (pdata
->ecc_strength
< 0)
1875 pdata
->ecc_strength
= 0;
1877 pdata
->ecc_step_size
= of_get_nand_ecc_step_size(np
);
1878 if (pdata
->ecc_step_size
< 0)
1879 pdata
->ecc_step_size
= 0;
1881 pdev
->dev
.platform_data
= pdata
;
1886 static int pxa3xx_nand_probe(struct platform_device
*pdev
)
1888 struct pxa3xx_nand_platform_data
*pdata
;
1889 struct mtd_part_parser_data ppdata
= {};
1890 struct pxa3xx_nand_info
*info
;
1891 int ret
, cs
, probe_success
, dma_available
;
1893 dma_available
= IS_ENABLED(CONFIG_ARM
) &&
1894 (IS_ENABLED(CONFIG_ARCH_PXA
) || IS_ENABLED(CONFIG_ARCH_MMP
));
1895 if (use_dma
&& !dma_available
) {
1897 dev_warn(&pdev
->dev
,
1898 "This platform can't do DMA on this device\n");
1901 ret
= pxa3xx_nand_probe_dt(pdev
);
1905 pdata
= dev_get_platdata(&pdev
->dev
);
1907 dev_err(&pdev
->dev
, "no platform data defined\n");
1911 ret
= alloc_nand_resource(pdev
);
1913 dev_err(&pdev
->dev
, "alloc nand resource failed\n");
1917 info
= platform_get_drvdata(pdev
);
1919 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1920 struct mtd_info
*mtd
= info
->host
[cs
]->mtd
;
1923 * The mtd name matches the one used in 'mtdparts' kernel
1924 * parameter. This name cannot be changed or otherwise
1925 * user's mtd partitions configuration would get broken.
1927 mtd
->name
= "pxa3xx_nand-0";
1929 ret
= pxa3xx_nand_scan(mtd
);
1931 dev_warn(&pdev
->dev
, "failed to scan nand at cs %d\n",
1936 ppdata
.of_node
= pdev
->dev
.of_node
;
1937 ret
= mtd_device_parse_register(mtd
, NULL
,
1938 &ppdata
, pdata
->parts
[cs
],
1939 pdata
->nr_parts
[cs
]);
1944 if (!probe_success
) {
1945 pxa3xx_nand_remove(pdev
);
1953 static int pxa3xx_nand_suspend(struct device
*dev
)
1955 struct pxa3xx_nand_info
*info
= dev_get_drvdata(dev
);
1958 dev_err(dev
, "driver busy, state = %d\n", info
->state
);
1965 static int pxa3xx_nand_resume(struct device
*dev
)
1967 struct pxa3xx_nand_info
*info
= dev_get_drvdata(dev
);
1969 /* We don't want to handle interrupt without calling mtd routine */
1970 disable_int(info
, NDCR_INT_MASK
);
1973 * Directly set the chip select to a invalid value,
1974 * then the driver would reset the timing according
1975 * to current chip select at the beginning of cmdfunc
1980 * As the spec says, the NDSR would be updated to 0x1800 when
1981 * doing the nand_clk disable/enable.
1982 * To prevent it damaging state machine of the driver, clear
1983 * all status before resume
1985 nand_writel(info
, NDSR
, NDSR_MASK
);
1990 #define pxa3xx_nand_suspend NULL
1991 #define pxa3xx_nand_resume NULL
1994 static const struct dev_pm_ops pxa3xx_nand_pm_ops
= {
1995 .suspend
= pxa3xx_nand_suspend
,
1996 .resume
= pxa3xx_nand_resume
,
1999 static struct platform_driver pxa3xx_nand_driver
= {
2001 .name
= "pxa3xx-nand",
2002 .of_match_table
= pxa3xx_nand_dt_ids
,
2003 .pm
= &pxa3xx_nand_pm_ops
,
2005 .probe
= pxa3xx_nand_probe
,
2006 .remove
= pxa3xx_nand_remove
,
2009 module_platform_driver(pxa3xx_nand_driver
);
2011 MODULE_LICENSE("GPL");
2012 MODULE_DESCRIPTION("PXA3xx NAND controller driver");