2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/delay.h>
18 #include <linux/clk.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/mtd/partitions.h>
23 #include <linux/irq.h>
24 #include <linux/slab.h>
27 #include <plat/pxa3xx_nand.h>
29 #define CHIP_DELAY_TIMEOUT (2 * HZ/10)
30 #define NAND_STOP_DELAY (2 * HZ/50)
31 #define PAGE_CHUNK_SIZE (2048)
33 /* registers and bit definitions */
34 #define NDCR (0x00) /* Control register */
35 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
36 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
37 #define NDSR (0x14) /* Status Register */
38 #define NDPCR (0x18) /* Page Count Register */
39 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
40 #define NDBDR1 (0x20) /* Bad Block Register 1 */
41 #define NDDB (0x40) /* Data Buffer */
42 #define NDCB0 (0x48) /* Command Buffer0 */
43 #define NDCB1 (0x4C) /* Command Buffer1 */
44 #define NDCB2 (0x50) /* Command Buffer2 */
46 #define NDCR_SPARE_EN (0x1 << 31)
47 #define NDCR_ECC_EN (0x1 << 30)
48 #define NDCR_DMA_EN (0x1 << 29)
49 #define NDCR_ND_RUN (0x1 << 28)
50 #define NDCR_DWIDTH_C (0x1 << 27)
51 #define NDCR_DWIDTH_M (0x1 << 26)
52 #define NDCR_PAGE_SZ (0x1 << 24)
53 #define NDCR_NCSX (0x1 << 23)
54 #define NDCR_ND_MODE (0x3 << 21)
55 #define NDCR_NAND_MODE (0x0)
56 #define NDCR_CLR_PG_CNT (0x1 << 20)
57 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
58 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
59 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
61 #define NDCR_RA_START (0x1 << 15)
62 #define NDCR_PG_PER_BLK (0x1 << 14)
63 #define NDCR_ND_ARB_EN (0x1 << 12)
64 #define NDCR_INT_MASK (0xFFF)
66 #define NDSR_MASK (0xfff)
67 #define NDSR_RDY (0x1 << 12)
68 #define NDSR_FLASH_RDY (0x1 << 11)
69 #define NDSR_CS0_PAGED (0x1 << 10)
70 #define NDSR_CS1_PAGED (0x1 << 9)
71 #define NDSR_CS0_CMDD (0x1 << 8)
72 #define NDSR_CS1_CMDD (0x1 << 7)
73 #define NDSR_CS0_BBD (0x1 << 6)
74 #define NDSR_CS1_BBD (0x1 << 5)
75 #define NDSR_DBERR (0x1 << 4)
76 #define NDSR_SBERR (0x1 << 3)
77 #define NDSR_WRDREQ (0x1 << 2)
78 #define NDSR_RDDREQ (0x1 << 1)
79 #define NDSR_WRCMDREQ (0x1)
81 #define NDCB0_ST_ROW_EN (0x1 << 26)
82 #define NDCB0_AUTO_RS (0x1 << 25)
83 #define NDCB0_CSEL (0x1 << 24)
84 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
85 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
86 #define NDCB0_NC (0x1 << 20)
87 #define NDCB0_DBC (0x1 << 19)
88 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
89 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
90 #define NDCB0_CMD2_MASK (0xff << 8)
91 #define NDCB0_CMD1_MASK (0xff)
92 #define NDCB0_ADDR_CYC_SHIFT (16)
94 /* macros for registers read/write */
95 #define nand_writel(info, off, val) \
96 __raw_writel((val), (info)->mmio_base + (off))
98 #define nand_readl(info, off) \
99 __raw_readl((info)->mmio_base + (off))
101 /* error code and state */
124 struct pxa3xx_nand_host
{
125 struct nand_chip chip
;
126 struct pxa3xx_nand_cmdset
*cmdset
;
127 struct mtd_info
*mtd
;
130 /* page size of attached chip */
131 unsigned int page_size
;
135 /* calculated from pxa3xx_nand_flash data */
136 unsigned int col_addr_cycles
;
137 unsigned int row_addr_cycles
;
138 size_t read_id_bytes
;
140 /* cached register value */
146 struct pxa3xx_nand_info
{
147 struct nand_hw_control controller
;
148 struct platform_device
*pdev
;
151 void __iomem
*mmio_base
;
152 unsigned long mmio_phys
;
153 struct completion cmd_complete
;
155 unsigned int buf_start
;
156 unsigned int buf_count
;
158 /* DMA information */
162 unsigned char *data_buff
;
163 unsigned char *oob_buff
;
164 dma_addr_t data_buff_phys
;
166 struct pxa_dma_desc
*data_desc
;
167 dma_addr_t data_desc_addr
;
169 struct pxa3xx_nand_host
*host
[NUM_CHIP_SELECT
];
173 int use_ecc
; /* use HW ECC ? */
174 int use_dma
; /* use DMA ? */
177 unsigned int page_size
; /* page size of attached chip */
178 unsigned int data_size
; /* data size in FIFO */
179 unsigned int oob_size
;
182 /* generated NDCBx register values */
188 static bool use_dma
= 1;
189 module_param(use_dma
, bool, 0444);
190 MODULE_PARM_DESC(use_dma
, "enable DMA for data transferring to/from NAND HW");
193 * Default NAND flash controller configuration setup by the
194 * bootloader. This configuration is used only when pdata->keep_config is set
196 static struct pxa3xx_nand_cmdset default_cmdset
= {
200 .read_status
= 0x0070,
206 .lock_status
= 0x007A,
209 static struct pxa3xx_nand_timing timing
[] = {
210 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
211 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
212 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
213 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
216 static struct pxa3xx_nand_flash builtin_flash_types
[] = {
217 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing
[0] },
218 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing
[1] },
219 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing
[1] },
220 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing
[1] },
221 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing
[2] },
222 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing
[2] },
223 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing
[2] },
224 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing
[2] },
225 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing
[3] },
228 /* Define a default flash type setting serve as flash detecting only */
229 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
231 const char *mtd_names
[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL
};
233 #define NDTR0_tCH(c) (min((c), 7) << 19)
234 #define NDTR0_tCS(c) (min((c), 7) << 16)
235 #define NDTR0_tWH(c) (min((c), 7) << 11)
236 #define NDTR0_tWP(c) (min((c), 7) << 8)
237 #define NDTR0_tRH(c) (min((c), 7) << 3)
238 #define NDTR0_tRP(c) (min((c), 7) << 0)
240 #define NDTR1_tR(c) (min((c), 65535) << 16)
241 #define NDTR1_tWHR(c) (min((c), 15) << 4)
242 #define NDTR1_tAR(c) (min((c), 15) << 0)
244 /* convert nano-seconds to nand flash controller clock cycles */
245 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
247 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host
*host
,
248 const struct pxa3xx_nand_timing
*t
)
250 struct pxa3xx_nand_info
*info
= host
->info_data
;
251 unsigned long nand_clk
= clk_get_rate(info
->clk
);
252 uint32_t ndtr0
, ndtr1
;
254 ndtr0
= NDTR0_tCH(ns2cycle(t
->tCH
, nand_clk
)) |
255 NDTR0_tCS(ns2cycle(t
->tCS
, nand_clk
)) |
256 NDTR0_tWH(ns2cycle(t
->tWH
, nand_clk
)) |
257 NDTR0_tWP(ns2cycle(t
->tWP
, nand_clk
)) |
258 NDTR0_tRH(ns2cycle(t
->tRH
, nand_clk
)) |
259 NDTR0_tRP(ns2cycle(t
->tRP
, nand_clk
));
261 ndtr1
= NDTR1_tR(ns2cycle(t
->tR
, nand_clk
)) |
262 NDTR1_tWHR(ns2cycle(t
->tWHR
, nand_clk
)) |
263 NDTR1_tAR(ns2cycle(t
->tAR
, nand_clk
));
265 host
->ndtr0cs0
= ndtr0
;
266 host
->ndtr1cs0
= ndtr1
;
267 nand_writel(info
, NDTR0CS0
, ndtr0
);
268 nand_writel(info
, NDTR1CS0
, ndtr1
);
271 static void pxa3xx_set_datasize(struct pxa3xx_nand_info
*info
)
273 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
274 int oob_enable
= host
->reg_ndcr
& NDCR_SPARE_EN
;
276 info
->data_size
= host
->page_size
;
282 switch (host
->page_size
) {
284 info
->oob_size
= (info
->use_ecc
) ? 40 : 64;
287 info
->oob_size
= (info
->use_ecc
) ? 8 : 16;
293 * NOTE: it is a must to set ND_RUN firstly, then write
294 * command buffer, otherwise, it does not work.
295 * We enable all the interrupt at the same time, and
296 * let pxa3xx_nand_irq to handle all logic.
298 static void pxa3xx_nand_start(struct pxa3xx_nand_info
*info
)
300 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
303 ndcr
= host
->reg_ndcr
;
304 ndcr
|= info
->use_ecc
? NDCR_ECC_EN
: 0;
305 ndcr
|= info
->use_dma
? NDCR_DMA_EN
: 0;
308 /* clear status bits and run */
309 nand_writel(info
, NDCR
, 0);
310 nand_writel(info
, NDSR
, NDSR_MASK
);
311 nand_writel(info
, NDCR
, ndcr
);
314 static void pxa3xx_nand_stop(struct pxa3xx_nand_info
*info
)
317 int timeout
= NAND_STOP_DELAY
;
319 /* wait RUN bit in NDCR become 0 */
320 ndcr
= nand_readl(info
, NDCR
);
321 while ((ndcr
& NDCR_ND_RUN
) && (timeout
-- > 0)) {
322 ndcr
= nand_readl(info
, NDCR
);
327 ndcr
&= ~NDCR_ND_RUN
;
328 nand_writel(info
, NDCR
, ndcr
);
330 /* clear status bits */
331 nand_writel(info
, NDSR
, NDSR_MASK
);
334 static void enable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
338 ndcr
= nand_readl(info
, NDCR
);
339 nand_writel(info
, NDCR
, ndcr
& ~int_mask
);
342 static void disable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
346 ndcr
= nand_readl(info
, NDCR
);
347 nand_writel(info
, NDCR
, ndcr
| int_mask
);
350 static void handle_data_pio(struct pxa3xx_nand_info
*info
)
352 switch (info
->state
) {
353 case STATE_PIO_WRITING
:
354 __raw_writesl(info
->mmio_base
+ NDDB
, info
->data_buff
,
355 DIV_ROUND_UP(info
->data_size
, 4));
356 if (info
->oob_size
> 0)
357 __raw_writesl(info
->mmio_base
+ NDDB
, info
->oob_buff
,
358 DIV_ROUND_UP(info
->oob_size
, 4));
360 case STATE_PIO_READING
:
361 __raw_readsl(info
->mmio_base
+ NDDB
, info
->data_buff
,
362 DIV_ROUND_UP(info
->data_size
, 4));
363 if (info
->oob_size
> 0)
364 __raw_readsl(info
->mmio_base
+ NDDB
, info
->oob_buff
,
365 DIV_ROUND_UP(info
->oob_size
, 4));
368 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
374 static void start_data_dma(struct pxa3xx_nand_info
*info
)
376 struct pxa_dma_desc
*desc
= info
->data_desc
;
377 int dma_len
= ALIGN(info
->data_size
+ info
->oob_size
, 32);
379 desc
->ddadr
= DDADR_STOP
;
380 desc
->dcmd
= DCMD_ENDIRQEN
| DCMD_WIDTH4
| DCMD_BURST32
| dma_len
;
382 switch (info
->state
) {
383 case STATE_DMA_WRITING
:
384 desc
->dsadr
= info
->data_buff_phys
;
385 desc
->dtadr
= info
->mmio_phys
+ NDDB
;
386 desc
->dcmd
|= DCMD_INCSRCADDR
| DCMD_FLOWTRG
;
388 case STATE_DMA_READING
:
389 desc
->dtadr
= info
->data_buff_phys
;
390 desc
->dsadr
= info
->mmio_phys
+ NDDB
;
391 desc
->dcmd
|= DCMD_INCTRGADDR
| DCMD_FLOWSRC
;
394 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
399 DRCMR(info
->drcmr_dat
) = DRCMR_MAPVLD
| info
->data_dma_ch
;
400 DDADR(info
->data_dma_ch
) = info
->data_desc_addr
;
401 DCSR(info
->data_dma_ch
) |= DCSR_RUN
;
404 static void pxa3xx_nand_data_dma_irq(int channel
, void *data
)
406 struct pxa3xx_nand_info
*info
= data
;
409 dcsr
= DCSR(channel
);
410 DCSR(channel
) = dcsr
;
412 if (dcsr
& DCSR_BUSERR
) {
413 info
->retcode
= ERR_DMABUSERR
;
416 info
->state
= STATE_DMA_DONE
;
417 enable_int(info
, NDCR_INT_MASK
);
418 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
421 static irqreturn_t
pxa3xx_nand_irq(int irq
, void *devid
)
423 struct pxa3xx_nand_info
*info
= devid
;
424 unsigned int status
, is_completed
= 0;
425 unsigned int ready
, cmd_done
;
428 ready
= NDSR_FLASH_RDY
;
429 cmd_done
= NDSR_CS0_CMDD
;
432 cmd_done
= NDSR_CS1_CMDD
;
435 status
= nand_readl(info
, NDSR
);
437 if (status
& NDSR_DBERR
)
438 info
->retcode
= ERR_DBERR
;
439 if (status
& NDSR_SBERR
)
440 info
->retcode
= ERR_SBERR
;
441 if (status
& (NDSR_RDDREQ
| NDSR_WRDREQ
)) {
442 /* whether use dma to transfer data */
444 disable_int(info
, NDCR_INT_MASK
);
445 info
->state
= (status
& NDSR_RDDREQ
) ?
446 STATE_DMA_READING
: STATE_DMA_WRITING
;
447 start_data_dma(info
);
448 goto NORMAL_IRQ_EXIT
;
450 info
->state
= (status
& NDSR_RDDREQ
) ?
451 STATE_PIO_READING
: STATE_PIO_WRITING
;
452 handle_data_pio(info
);
455 if (status
& cmd_done
) {
456 info
->state
= STATE_CMD_DONE
;
459 if (status
& ready
) {
461 info
->state
= STATE_READY
;
464 if (status
& NDSR_WRCMDREQ
) {
465 nand_writel(info
, NDSR
, NDSR_WRCMDREQ
);
466 status
&= ~NDSR_WRCMDREQ
;
467 info
->state
= STATE_CMD_HANDLE
;
468 nand_writel(info
, NDCB0
, info
->ndcb0
);
469 nand_writel(info
, NDCB0
, info
->ndcb1
);
470 nand_writel(info
, NDCB0
, info
->ndcb2
);
473 /* clear NDSR to let the controller exit the IRQ */
474 nand_writel(info
, NDSR
, status
);
476 complete(&info
->cmd_complete
);
481 static inline int is_buf_blank(uint8_t *buf
, size_t len
)
483 for (; len
> 0; len
--)
489 static int prepare_command_pool(struct pxa3xx_nand_info
*info
, int command
,
490 uint16_t column
, int page_addr
)
493 int addr_cycle
, exec_cmd
;
494 struct pxa3xx_nand_host
*host
;
495 struct mtd_info
*mtd
;
497 host
= info
->host
[info
->cs
];
502 /* reset data and oob column point to handle data */
508 info
->retcode
= ERR_NONE
;
510 info
->ndcb0
= NDCB0_CSEL
;
516 case NAND_CMD_PAGEPROG
:
518 case NAND_CMD_READOOB
:
519 pxa3xx_set_datasize(info
);
530 addr_cycle
= NDCB0_ADDR_CYC(host
->row_addr_cycles
531 + host
->col_addr_cycles
);
534 case NAND_CMD_READOOB
:
536 cmd
= host
->cmdset
->read1
;
537 if (command
== NAND_CMD_READOOB
)
538 info
->buf_start
= mtd
->writesize
+ column
;
540 info
->buf_start
= column
;
542 if (unlikely(host
->page_size
< PAGE_CHUNK_SIZE
))
543 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
545 | (cmd
& NDCB0_CMD1_MASK
);
547 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
553 /* small page addr setting */
554 if (unlikely(host
->page_size
< PAGE_CHUNK_SIZE
)) {
555 info
->ndcb1
= ((page_addr
& 0xFFFFFF) << 8)
560 info
->ndcb1
= ((page_addr
& 0xFFFF) << 16)
563 if (page_addr
& 0xFF0000)
564 info
->ndcb2
= (page_addr
& 0xFF0000) >> 16;
569 info
->buf_count
= mtd
->writesize
+ mtd
->oobsize
;
570 memset(info
->data_buff
, 0xFF, info
->buf_count
);
574 case NAND_CMD_PAGEPROG
:
575 if (is_buf_blank(info
->data_buff
,
576 (mtd
->writesize
+ mtd
->oobsize
))) {
581 cmd
= host
->cmdset
->program
;
582 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
590 case NAND_CMD_READID
:
591 cmd
= host
->cmdset
->read_id
;
592 info
->buf_count
= host
->read_id_bytes
;
593 info
->ndcb0
|= NDCB0_CMD_TYPE(3)
599 case NAND_CMD_STATUS
:
600 cmd
= host
->cmdset
->read_status
;
602 info
->ndcb0
|= NDCB0_CMD_TYPE(4)
609 case NAND_CMD_ERASE1
:
610 cmd
= host
->cmdset
->erase
;
611 info
->ndcb0
|= NDCB0_CMD_TYPE(2)
616 info
->ndcb1
= page_addr
;
621 cmd
= host
->cmdset
->reset
;
622 info
->ndcb0
|= NDCB0_CMD_TYPE(5)
627 case NAND_CMD_ERASE2
:
633 dev_err(&info
->pdev
->dev
, "non-supported command %x\n",
641 static void pxa3xx_nand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
642 int column
, int page_addr
)
644 struct pxa3xx_nand_host
*host
= mtd
->priv
;
645 struct pxa3xx_nand_info
*info
= host
->info_data
;
649 * if this is a x16 device ,then convert the input
650 * "byte" address into a "word" address appropriate
651 * for indexing a word-oriented device
653 if (host
->reg_ndcr
& NDCR_DWIDTH_M
)
657 * There may be different NAND chip hooked to
658 * different chip select, so check whether
659 * chip select has been changed, if yes, reset the timing
661 if (info
->cs
!= host
->cs
) {
663 nand_writel(info
, NDTR0CS0
, host
->ndtr0cs0
);
664 nand_writel(info
, NDTR1CS0
, host
->ndtr1cs0
);
667 info
->state
= STATE_PREPARED
;
668 exec_cmd
= prepare_command_pool(info
, command
, column
, page_addr
);
670 init_completion(&info
->cmd_complete
);
671 pxa3xx_nand_start(info
);
673 ret
= wait_for_completion_timeout(&info
->cmd_complete
,
676 dev_err(&info
->pdev
->dev
, "Wait time out!!!\n");
677 /* Stop State Machine for next command cycle */
678 pxa3xx_nand_stop(info
);
681 info
->state
= STATE_IDLE
;
684 static void pxa3xx_nand_write_page_hwecc(struct mtd_info
*mtd
,
685 struct nand_chip
*chip
, const uint8_t *buf
)
687 chip
->write_buf(mtd
, buf
, mtd
->writesize
);
688 chip
->write_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
691 static int pxa3xx_nand_read_page_hwecc(struct mtd_info
*mtd
,
692 struct nand_chip
*chip
, uint8_t *buf
, int page
)
694 struct pxa3xx_nand_host
*host
= mtd
->priv
;
695 struct pxa3xx_nand_info
*info
= host
->info_data
;
697 chip
->read_buf(mtd
, buf
, mtd
->writesize
);
698 chip
->read_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
700 if (info
->retcode
== ERR_SBERR
) {
701 switch (info
->use_ecc
) {
703 mtd
->ecc_stats
.corrected
++;
709 } else if (info
->retcode
== ERR_DBERR
) {
711 * for blank page (all 0xff), HW will calculate its ECC as
712 * 0, which is different from the ECC information within
713 * OOB, ignore such double bit errors
715 if (is_buf_blank(buf
, mtd
->writesize
))
716 info
->retcode
= ERR_NONE
;
718 mtd
->ecc_stats
.failed
++;
724 static uint8_t pxa3xx_nand_read_byte(struct mtd_info
*mtd
)
726 struct pxa3xx_nand_host
*host
= mtd
->priv
;
727 struct pxa3xx_nand_info
*info
= host
->info_data
;
730 if (info
->buf_start
< info
->buf_count
)
731 /* Has just send a new command? */
732 retval
= info
->data_buff
[info
->buf_start
++];
737 static u16
pxa3xx_nand_read_word(struct mtd_info
*mtd
)
739 struct pxa3xx_nand_host
*host
= mtd
->priv
;
740 struct pxa3xx_nand_info
*info
= host
->info_data
;
743 if (!(info
->buf_start
& 0x01) && info
->buf_start
< info
->buf_count
) {
744 retval
= *((u16
*)(info
->data_buff
+info
->buf_start
));
745 info
->buf_start
+= 2;
750 static void pxa3xx_nand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
752 struct pxa3xx_nand_host
*host
= mtd
->priv
;
753 struct pxa3xx_nand_info
*info
= host
->info_data
;
754 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
756 memcpy(buf
, info
->data_buff
+ info
->buf_start
, real_len
);
757 info
->buf_start
+= real_len
;
760 static void pxa3xx_nand_write_buf(struct mtd_info
*mtd
,
761 const uint8_t *buf
, int len
)
763 struct pxa3xx_nand_host
*host
= mtd
->priv
;
764 struct pxa3xx_nand_info
*info
= host
->info_data
;
765 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
767 memcpy(info
->data_buff
+ info
->buf_start
, buf
, real_len
);
768 info
->buf_start
+= real_len
;
771 static int pxa3xx_nand_verify_buf(struct mtd_info
*mtd
,
772 const uint8_t *buf
, int len
)
777 static void pxa3xx_nand_select_chip(struct mtd_info
*mtd
, int chip
)
782 static int pxa3xx_nand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
784 struct pxa3xx_nand_host
*host
= mtd
->priv
;
785 struct pxa3xx_nand_info
*info
= host
->info_data
;
787 /* pxa3xx_nand_send_command has waited for command complete */
788 if (this->state
== FL_WRITING
|| this->state
== FL_ERASING
) {
789 if (info
->retcode
== ERR_NONE
)
793 * any error make it return 0x01 which will tell
794 * the caller the erase and write fail
803 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info
*info
,
804 const struct pxa3xx_nand_flash
*f
)
806 struct platform_device
*pdev
= info
->pdev
;
807 struct pxa3xx_nand_platform_data
*pdata
= pdev
->dev
.platform_data
;
808 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
809 uint32_t ndcr
= 0x0; /* enable all interrupts */
811 if (f
->page_size
!= 2048 && f
->page_size
!= 512) {
812 dev_err(&pdev
->dev
, "Current only support 2048 and 512 size\n");
816 if (f
->flash_width
!= 16 && f
->flash_width
!= 8) {
817 dev_err(&pdev
->dev
, "Only support 8bit and 16 bit!\n");
821 /* calculate flash information */
822 host
->cmdset
= &default_cmdset
;
823 host
->page_size
= f
->page_size
;
824 host
->read_id_bytes
= (f
->page_size
== 2048) ? 4 : 2;
826 /* calculate addressing information */
827 host
->col_addr_cycles
= (f
->page_size
== 2048) ? 2 : 1;
829 if (f
->num_blocks
* f
->page_per_block
> 65536)
830 host
->row_addr_cycles
= 3;
832 host
->row_addr_cycles
= 2;
834 ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
835 ndcr
|= (host
->col_addr_cycles
== 2) ? NDCR_RA_START
: 0;
836 ndcr
|= (f
->page_per_block
== 64) ? NDCR_PG_PER_BLK
: 0;
837 ndcr
|= (f
->page_size
== 2048) ? NDCR_PAGE_SZ
: 0;
838 ndcr
|= (f
->flash_width
== 16) ? NDCR_DWIDTH_M
: 0;
839 ndcr
|= (f
->dfc_width
== 16) ? NDCR_DWIDTH_C
: 0;
841 ndcr
|= NDCR_RD_ID_CNT(host
->read_id_bytes
);
842 ndcr
|= NDCR_SPARE_EN
; /* enable spare by default */
844 host
->reg_ndcr
= ndcr
;
846 pxa3xx_nand_set_timing(host
, f
->timing
);
850 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info
*info
)
853 * We set 0 by hard coding here, for we don't support keep_config
854 * when there is more than one chip attached to the controller
856 struct pxa3xx_nand_host
*host
= info
->host
[0];
857 uint32_t ndcr
= nand_readl(info
, NDCR
);
859 if (ndcr
& NDCR_PAGE_SZ
) {
860 host
->page_size
= 2048;
861 host
->read_id_bytes
= 4;
863 host
->page_size
= 512;
864 host
->read_id_bytes
= 2;
867 host
->reg_ndcr
= ndcr
& ~NDCR_INT_MASK
;
868 host
->cmdset
= &default_cmdset
;
870 host
->ndtr0cs0
= nand_readl(info
, NDTR0CS0
);
871 host
->ndtr1cs0
= nand_readl(info
, NDTR1CS0
);
876 /* the maximum possible buffer size for large page with OOB data
877 * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
878 * data buffer and the DMA descriptor
880 #define MAX_BUFF_SIZE PAGE_SIZE
882 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info
*info
)
884 struct platform_device
*pdev
= info
->pdev
;
885 int data_desc_offset
= MAX_BUFF_SIZE
- sizeof(struct pxa_dma_desc
);
888 info
->data_buff
= kmalloc(MAX_BUFF_SIZE
, GFP_KERNEL
);
889 if (info
->data_buff
== NULL
)
894 info
->data_buff
= dma_alloc_coherent(&pdev
->dev
, MAX_BUFF_SIZE
,
895 &info
->data_buff_phys
, GFP_KERNEL
);
896 if (info
->data_buff
== NULL
) {
897 dev_err(&pdev
->dev
, "failed to allocate dma buffer\n");
901 info
->data_desc
= (void *)info
->data_buff
+ data_desc_offset
;
902 info
->data_desc_addr
= info
->data_buff_phys
+ data_desc_offset
;
904 info
->data_dma_ch
= pxa_request_dma("nand-data", DMA_PRIO_LOW
,
905 pxa3xx_nand_data_dma_irq
, info
);
906 if (info
->data_dma_ch
< 0) {
907 dev_err(&pdev
->dev
, "failed to request data dma\n");
908 dma_free_coherent(&pdev
->dev
, MAX_BUFF_SIZE
,
909 info
->data_buff
, info
->data_buff_phys
);
910 return info
->data_dma_ch
;
916 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info
*info
)
918 struct mtd_info
*mtd
;
920 mtd
= info
->host
[info
->cs
]->mtd
;
921 /* use the common timing to make a try */
922 ret
= pxa3xx_nand_config_flash(info
, &builtin_flash_types
[0]);
926 pxa3xx_nand_cmdfunc(mtd
, NAND_CMD_RESET
, 0, 0);
933 static int pxa3xx_nand_scan(struct mtd_info
*mtd
)
935 struct pxa3xx_nand_host
*host
= mtd
->priv
;
936 struct pxa3xx_nand_info
*info
= host
->info_data
;
937 struct platform_device
*pdev
= info
->pdev
;
938 struct pxa3xx_nand_platform_data
*pdata
= pdev
->dev
.platform_data
;
939 struct nand_flash_dev pxa3xx_flash_ids
[2], *def
= NULL
;
940 const struct pxa3xx_nand_flash
*f
= NULL
;
941 struct nand_chip
*chip
= mtd
->priv
;
946 if (pdata
->keep_config
&& !pxa3xx_nand_detect_config(info
))
949 ret
= pxa3xx_nand_sensing(info
);
951 dev_info(&info
->pdev
->dev
, "There is no chip on cs %d!\n",
957 chip
->cmdfunc(mtd
, NAND_CMD_READID
, 0, 0);
958 id
= *((uint16_t *)(info
->data_buff
));
960 dev_info(&info
->pdev
->dev
, "Detect a flash id %x\n", id
);
962 dev_warn(&info
->pdev
->dev
,
963 "Read out ID 0, potential timing set wrong!!\n");
968 num
= ARRAY_SIZE(builtin_flash_types
) + pdata
->num_flash
- 1;
969 for (i
= 0; i
< num
; i
++) {
970 if (i
< pdata
->num_flash
)
971 f
= pdata
->flash
+ i
;
973 f
= &builtin_flash_types
[i
- pdata
->num_flash
+ 1];
975 /* find the chip in default list */
976 if (f
->chip_id
== id
)
980 if (i
>= (ARRAY_SIZE(builtin_flash_types
) + pdata
->num_flash
- 1)) {
981 dev_err(&info
->pdev
->dev
, "ERROR!! flash not defined!!!\n");
986 ret
= pxa3xx_nand_config_flash(info
, f
);
988 dev_err(&info
->pdev
->dev
, "ERROR! Configure failed\n");
992 pxa3xx_flash_ids
[0].name
= f
->name
;
993 pxa3xx_flash_ids
[0].id
= (f
->chip_id
>> 8) & 0xffff;
994 pxa3xx_flash_ids
[0].pagesize
= f
->page_size
;
995 chipsize
= (uint64_t)f
->num_blocks
* f
->page_per_block
* f
->page_size
;
996 pxa3xx_flash_ids
[0].chipsize
= chipsize
>> 20;
997 pxa3xx_flash_ids
[0].erasesize
= f
->page_size
* f
->page_per_block
;
998 if (f
->flash_width
== 16)
999 pxa3xx_flash_ids
[0].options
= NAND_BUSWIDTH_16
;
1000 pxa3xx_flash_ids
[1].name
= NULL
;
1001 def
= pxa3xx_flash_ids
;
1003 chip
->ecc
.mode
= NAND_ECC_HW
;
1004 chip
->ecc
.size
= host
->page_size
;
1006 chip
->options
= NAND_NO_AUTOINCR
;
1007 chip
->options
|= NAND_NO_READRDY
;
1008 if (host
->reg_ndcr
& NDCR_DWIDTH_M
)
1009 chip
->options
|= NAND_BUSWIDTH_16
;
1011 if (nand_scan_ident(mtd
, 1, def
))
1013 /* calculate addressing information */
1014 if (mtd
->writesize
>= 2048)
1015 host
->col_addr_cycles
= 2;
1017 host
->col_addr_cycles
= 1;
1019 info
->oob_buff
= info
->data_buff
+ mtd
->writesize
;
1020 if ((mtd
->size
>> chip
->page_shift
) > 65536)
1021 host
->row_addr_cycles
= 3;
1023 host
->row_addr_cycles
= 2;
1025 mtd
->name
= mtd_names
[0];
1026 return nand_scan_tail(mtd
);
1029 static int alloc_nand_resource(struct platform_device
*pdev
)
1031 struct pxa3xx_nand_platform_data
*pdata
;
1032 struct pxa3xx_nand_info
*info
;
1033 struct pxa3xx_nand_host
*host
;
1034 struct nand_chip
*chip
;
1035 struct mtd_info
*mtd
;
1039 pdata
= pdev
->dev
.platform_data
;
1040 info
= kzalloc(sizeof(*info
) + (sizeof(*mtd
) +
1041 sizeof(*host
)) * pdata
->num_cs
, GFP_KERNEL
);
1043 dev_err(&pdev
->dev
, "failed to allocate memory\n");
1048 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1049 mtd
= (struct mtd_info
*)((unsigned int)&info
[1] +
1050 (sizeof(*mtd
) + sizeof(*host
)) * cs
);
1051 chip
= (struct nand_chip
*)(&mtd
[1]);
1052 host
= (struct pxa3xx_nand_host
*)chip
;
1053 info
->host
[cs
] = host
;
1056 host
->info_data
= info
;
1058 mtd
->owner
= THIS_MODULE
;
1060 chip
->ecc
.read_page
= pxa3xx_nand_read_page_hwecc
;
1061 chip
->ecc
.write_page
= pxa3xx_nand_write_page_hwecc
;
1062 chip
->controller
= &info
->controller
;
1063 chip
->waitfunc
= pxa3xx_nand_waitfunc
;
1064 chip
->select_chip
= pxa3xx_nand_select_chip
;
1065 chip
->cmdfunc
= pxa3xx_nand_cmdfunc
;
1066 chip
->read_word
= pxa3xx_nand_read_word
;
1067 chip
->read_byte
= pxa3xx_nand_read_byte
;
1068 chip
->read_buf
= pxa3xx_nand_read_buf
;
1069 chip
->write_buf
= pxa3xx_nand_write_buf
;
1070 chip
->verify_buf
= pxa3xx_nand_verify_buf
;
1073 spin_lock_init(&chip
->controller
->lock
);
1074 init_waitqueue_head(&chip
->controller
->wq
);
1075 info
->clk
= clk_get(&pdev
->dev
, NULL
);
1076 if (IS_ERR(info
->clk
)) {
1077 dev_err(&pdev
->dev
, "failed to get nand clock\n");
1078 ret
= PTR_ERR(info
->clk
);
1081 clk_enable(info
->clk
);
1083 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1085 dev_err(&pdev
->dev
, "no resource defined for data DMA\n");
1089 info
->drcmr_dat
= r
->start
;
1091 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
1093 dev_err(&pdev
->dev
, "no resource defined for command DMA\n");
1097 info
->drcmr_cmd
= r
->start
;
1099 irq
= platform_get_irq(pdev
, 0);
1101 dev_err(&pdev
->dev
, "no IRQ resource defined\n");
1106 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1108 dev_err(&pdev
->dev
, "no IO memory resource defined\n");
1113 r
= request_mem_region(r
->start
, resource_size(r
), pdev
->name
);
1115 dev_err(&pdev
->dev
, "failed to request memory resource\n");
1120 info
->mmio_base
= ioremap(r
->start
, resource_size(r
));
1121 if (info
->mmio_base
== NULL
) {
1122 dev_err(&pdev
->dev
, "ioremap() failed\n");
1126 info
->mmio_phys
= r
->start
;
1128 ret
= pxa3xx_nand_init_buff(info
);
1132 /* initialize all interrupts to be disabled */
1133 disable_int(info
, NDSR_MASK
);
1135 ret
= request_irq(irq
, pxa3xx_nand_irq
, IRQF_DISABLED
,
1138 dev_err(&pdev
->dev
, "failed to request IRQ\n");
1142 platform_set_drvdata(pdev
, info
);
1147 free_irq(irq
, info
);
1149 pxa_free_dma(info
->data_dma_ch
);
1150 dma_free_coherent(&pdev
->dev
, MAX_BUFF_SIZE
,
1151 info
->data_buff
, info
->data_buff_phys
);
1153 kfree(info
->data_buff
);
1155 iounmap(info
->mmio_base
);
1157 release_mem_region(r
->start
, resource_size(r
));
1159 clk_disable(info
->clk
);
1166 static int pxa3xx_nand_remove(struct platform_device
*pdev
)
1168 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1169 struct pxa3xx_nand_platform_data
*pdata
;
1176 pdata
= pdev
->dev
.platform_data
;
1177 platform_set_drvdata(pdev
, NULL
);
1179 irq
= platform_get_irq(pdev
, 0);
1181 free_irq(irq
, info
);
1183 pxa_free_dma(info
->data_dma_ch
);
1184 dma_free_writecombine(&pdev
->dev
, MAX_BUFF_SIZE
,
1185 info
->data_buff
, info
->data_buff_phys
);
1187 kfree(info
->data_buff
);
1189 iounmap(info
->mmio_base
);
1190 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1191 release_mem_region(r
->start
, resource_size(r
));
1193 clk_disable(info
->clk
);
1196 for (cs
= 0; cs
< pdata
->num_cs
; cs
++)
1197 nand_release(info
->host
[cs
]->mtd
);
1202 static int pxa3xx_nand_probe(struct platform_device
*pdev
)
1204 struct pxa3xx_nand_platform_data
*pdata
;
1205 struct pxa3xx_nand_info
*info
;
1206 int ret
, cs
, probe_success
;
1208 pdata
= pdev
->dev
.platform_data
;
1210 dev_err(&pdev
->dev
, "no platform data defined\n");
1214 ret
= alloc_nand_resource(pdev
);
1216 dev_err(&pdev
->dev
, "alloc nand resource failed\n");
1220 info
= platform_get_drvdata(pdev
);
1222 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1224 ret
= pxa3xx_nand_scan(info
->host
[cs
]->mtd
);
1226 dev_warn(&pdev
->dev
, "failed to scan nand at cs %d\n",
1231 ret
= mtd_device_parse_register(info
->host
[cs
]->mtd
, NULL
, 0,
1232 pdata
->parts
[cs
], pdata
->nr_parts
[cs
]);
1237 if (!probe_success
) {
1238 pxa3xx_nand_remove(pdev
);
1246 static int pxa3xx_nand_suspend(struct platform_device
*pdev
, pm_message_t state
)
1248 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1249 struct pxa3xx_nand_platform_data
*pdata
;
1250 struct mtd_info
*mtd
;
1253 pdata
= pdev
->dev
.platform_data
;
1255 dev_err(&pdev
->dev
, "driver busy, state = %d\n", info
->state
);
1259 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1260 mtd
= info
->host
[cs
]->mtd
;
1267 static int pxa3xx_nand_resume(struct platform_device
*pdev
)
1269 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1270 struct pxa3xx_nand_platform_data
*pdata
;
1271 struct mtd_info
*mtd
;
1274 pdata
= pdev
->dev
.platform_data
;
1275 /* We don't want to handle interrupt without calling mtd routine */
1276 disable_int(info
, NDCR_INT_MASK
);
1279 * Directly set the chip select to a invalid value,
1280 * then the driver would reset the timing according
1281 * to current chip select at the beginning of cmdfunc
1286 * As the spec says, the NDSR would be updated to 0x1800 when
1287 * doing the nand_clk disable/enable.
1288 * To prevent it damaging state machine of the driver, clear
1289 * all status before resume
1291 nand_writel(info
, NDSR
, NDSR_MASK
);
1292 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1293 mtd
= info
->host
[cs
]->mtd
;
1300 #define pxa3xx_nand_suspend NULL
1301 #define pxa3xx_nand_resume NULL
1304 static struct platform_driver pxa3xx_nand_driver
= {
1306 .name
= "pxa3xx-nand",
1308 .probe
= pxa3xx_nand_probe
,
1309 .remove
= pxa3xx_nand_remove
,
1310 .suspend
= pxa3xx_nand_suspend
,
1311 .resume
= pxa3xx_nand_resume
,
1314 module_platform_driver(pxa3xx_nand_driver
);
1316 MODULE_LICENSE("GPL");
1317 MODULE_DESCRIPTION("PXA3xx NAND controller driver");