2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/delay.h>
18 #include <linux/clk.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/mtd/partitions.h>
23 #include <linux/irq.h>
24 #include <linux/slab.h>
27 #include <plat/pxa3xx_nand.h>
29 #define CHIP_DELAY_TIMEOUT (2 * HZ/10)
30 #define NAND_STOP_DELAY (2 * HZ/50)
31 #define PAGE_CHUNK_SIZE (2048)
33 /* registers and bit definitions */
34 #define NDCR (0x00) /* Control register */
35 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
36 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
37 #define NDSR (0x14) /* Status Register */
38 #define NDPCR (0x18) /* Page Count Register */
39 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
40 #define NDBDR1 (0x20) /* Bad Block Register 1 */
41 #define NDDB (0x40) /* Data Buffer */
42 #define NDCB0 (0x48) /* Command Buffer0 */
43 #define NDCB1 (0x4C) /* Command Buffer1 */
44 #define NDCB2 (0x50) /* Command Buffer2 */
46 #define NDCR_SPARE_EN (0x1 << 31)
47 #define NDCR_ECC_EN (0x1 << 30)
48 #define NDCR_DMA_EN (0x1 << 29)
49 #define NDCR_ND_RUN (0x1 << 28)
50 #define NDCR_DWIDTH_C (0x1 << 27)
51 #define NDCR_DWIDTH_M (0x1 << 26)
52 #define NDCR_PAGE_SZ (0x1 << 24)
53 #define NDCR_NCSX (0x1 << 23)
54 #define NDCR_ND_MODE (0x3 << 21)
55 #define NDCR_NAND_MODE (0x0)
56 #define NDCR_CLR_PG_CNT (0x1 << 20)
57 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
58 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
59 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
61 #define NDCR_RA_START (0x1 << 15)
62 #define NDCR_PG_PER_BLK (0x1 << 14)
63 #define NDCR_ND_ARB_EN (0x1 << 12)
64 #define NDCR_INT_MASK (0xFFF)
66 #define NDSR_MASK (0xfff)
67 #define NDSR_RDY (0x1 << 12)
68 #define NDSR_FLASH_RDY (0x1 << 11)
69 #define NDSR_CS0_PAGED (0x1 << 10)
70 #define NDSR_CS1_PAGED (0x1 << 9)
71 #define NDSR_CS0_CMDD (0x1 << 8)
72 #define NDSR_CS1_CMDD (0x1 << 7)
73 #define NDSR_CS0_BBD (0x1 << 6)
74 #define NDSR_CS1_BBD (0x1 << 5)
75 #define NDSR_DBERR (0x1 << 4)
76 #define NDSR_SBERR (0x1 << 3)
77 #define NDSR_WRDREQ (0x1 << 2)
78 #define NDSR_RDDREQ (0x1 << 1)
79 #define NDSR_WRCMDREQ (0x1)
81 #define NDCB0_ST_ROW_EN (0x1 << 26)
82 #define NDCB0_AUTO_RS (0x1 << 25)
83 #define NDCB0_CSEL (0x1 << 24)
84 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
85 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
86 #define NDCB0_NC (0x1 << 20)
87 #define NDCB0_DBC (0x1 << 19)
88 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
89 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
90 #define NDCB0_CMD2_MASK (0xff << 8)
91 #define NDCB0_CMD1_MASK (0xff)
92 #define NDCB0_ADDR_CYC_SHIFT (16)
94 /* macros for registers read/write */
95 #define nand_writel(info, off, val) \
96 __raw_writel((val), (info)->mmio_base + (off))
98 #define nand_readl(info, off) \
99 __raw_readl((info)->mmio_base + (off))
101 /* error code and state */
123 struct pxa3xx_nand_info
{
124 struct nand_chip nand_chip
;
126 struct nand_hw_control controller
;
127 struct platform_device
*pdev
;
128 struct pxa3xx_nand_cmdset
*cmdset
;
131 void __iomem
*mmio_base
;
132 unsigned long mmio_phys
;
134 unsigned int buf_start
;
135 unsigned int buf_count
;
137 struct mtd_info
*mtd
;
138 /* DMA information */
142 unsigned char *data_buff
;
143 unsigned char *oob_buff
;
144 dma_addr_t data_buff_phys
;
145 size_t data_buff_size
;
147 struct pxa_dma_desc
*data_desc
;
148 dma_addr_t data_desc_addr
;
152 /* saved column/page_addr during CMD_SEQIN */
156 /* relate to the command */
159 int use_ecc
; /* use HW ECC ? */
160 int use_dma
; /* use DMA ? */
163 unsigned int page_size
; /* page size of attached chip */
164 unsigned int data_size
; /* data size in FIFO */
166 struct completion cmd_complete
;
168 /* generated NDCBx register values */
173 /* timing calcuted from setting */
177 /* calculated from pxa3xx_nand_flash data */
179 size_t read_id_bytes
;
181 unsigned int col_addr_cycles
;
182 unsigned int row_addr_cycles
;
185 static int use_dma
= 1;
186 module_param(use_dma
, bool, 0444);
187 MODULE_PARM_DESC(use_dma
, "enable DMA for data transferring to/from NAND HW");
190 * Default NAND flash controller configuration setup by the
191 * bootloader. This configuration is used only when pdata->keep_config is set
193 static struct pxa3xx_nand_cmdset default_cmdset
= {
197 .read_status
= 0x0070,
203 .lock_status
= 0x007A,
206 static struct pxa3xx_nand_timing timing
[] = {
207 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
208 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
209 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
210 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
213 static struct pxa3xx_nand_flash builtin_flash_types
[] = {
214 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing
[0] },
215 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing
[1] },
216 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing
[1] },
217 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing
[1] },
218 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing
[2] },
219 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing
[2] },
220 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing
[2] },
221 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing
[2] },
222 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing
[3] },
225 /* Define a default flash type setting serve as flash detecting only */
226 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
228 const char *mtd_names
[] = {"pxa3xx_nand-0", NULL
};
230 #define NDTR0_tCH(c) (min((c), 7) << 19)
231 #define NDTR0_tCS(c) (min((c), 7) << 16)
232 #define NDTR0_tWH(c) (min((c), 7) << 11)
233 #define NDTR0_tWP(c) (min((c), 7) << 8)
234 #define NDTR0_tRH(c) (min((c), 7) << 3)
235 #define NDTR0_tRP(c) (min((c), 7) << 0)
237 #define NDTR1_tR(c) (min((c), 65535) << 16)
238 #define NDTR1_tWHR(c) (min((c), 15) << 4)
239 #define NDTR1_tAR(c) (min((c), 15) << 0)
241 /* convert nano-seconds to nand flash controller clock cycles */
242 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
244 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info
*info
,
245 const struct pxa3xx_nand_timing
*t
)
247 unsigned long nand_clk
= clk_get_rate(info
->clk
);
248 uint32_t ndtr0
, ndtr1
;
250 ndtr0
= NDTR0_tCH(ns2cycle(t
->tCH
, nand_clk
)) |
251 NDTR0_tCS(ns2cycle(t
->tCS
, nand_clk
)) |
252 NDTR0_tWH(ns2cycle(t
->tWH
, nand_clk
)) |
253 NDTR0_tWP(ns2cycle(t
->tWP
, nand_clk
)) |
254 NDTR0_tRH(ns2cycle(t
->tRH
, nand_clk
)) |
255 NDTR0_tRP(ns2cycle(t
->tRP
, nand_clk
));
257 ndtr1
= NDTR1_tR(ns2cycle(t
->tR
, nand_clk
)) |
258 NDTR1_tWHR(ns2cycle(t
->tWHR
, nand_clk
)) |
259 NDTR1_tAR(ns2cycle(t
->tAR
, nand_clk
));
261 info
->ndtr0cs0
= ndtr0
;
262 info
->ndtr1cs0
= ndtr1
;
263 nand_writel(info
, NDTR0CS0
, ndtr0
);
264 nand_writel(info
, NDTR1CS0
, ndtr1
);
267 static void pxa3xx_set_datasize(struct pxa3xx_nand_info
*info
)
269 int oob_enable
= info
->reg_ndcr
& NDCR_SPARE_EN
;
271 info
->data_size
= info
->page_size
;
277 switch (info
->page_size
) {
279 info
->oob_size
= (info
->use_ecc
) ? 40 : 64;
282 info
->oob_size
= (info
->use_ecc
) ? 8 : 16;
288 * NOTE: it is a must to set ND_RUN firstly, then write
289 * command buffer, otherwise, it does not work.
290 * We enable all the interrupt at the same time, and
291 * let pxa3xx_nand_irq to handle all logic.
293 static void pxa3xx_nand_start(struct pxa3xx_nand_info
*info
)
297 ndcr
= info
->reg_ndcr
;
298 ndcr
|= info
->use_ecc
? NDCR_ECC_EN
: 0;
299 ndcr
|= info
->use_dma
? NDCR_DMA_EN
: 0;
302 /* clear status bits and run */
303 nand_writel(info
, NDCR
, 0);
304 nand_writel(info
, NDSR
, NDSR_MASK
);
305 nand_writel(info
, NDCR
, ndcr
);
308 static void pxa3xx_nand_stop(struct pxa3xx_nand_info
*info
)
311 int timeout
= NAND_STOP_DELAY
;
313 /* wait RUN bit in NDCR become 0 */
314 ndcr
= nand_readl(info
, NDCR
);
315 while ((ndcr
& NDCR_ND_RUN
) && (timeout
-- > 0)) {
316 ndcr
= nand_readl(info
, NDCR
);
321 ndcr
&= ~NDCR_ND_RUN
;
322 nand_writel(info
, NDCR
, ndcr
);
324 /* clear status bits */
325 nand_writel(info
, NDSR
, NDSR_MASK
);
328 static void enable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
332 ndcr
= nand_readl(info
, NDCR
);
333 nand_writel(info
, NDCR
, ndcr
& ~int_mask
);
336 static void disable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
340 ndcr
= nand_readl(info
, NDCR
);
341 nand_writel(info
, NDCR
, ndcr
| int_mask
);
344 static void handle_data_pio(struct pxa3xx_nand_info
*info
)
346 switch (info
->state
) {
347 case STATE_PIO_WRITING
:
348 __raw_writesl(info
->mmio_base
+ NDDB
, info
->data_buff
,
349 DIV_ROUND_UP(info
->data_size
, 4));
350 if (info
->oob_size
> 0)
351 __raw_writesl(info
->mmio_base
+ NDDB
, info
->oob_buff
,
352 DIV_ROUND_UP(info
->oob_size
, 4));
354 case STATE_PIO_READING
:
355 __raw_readsl(info
->mmio_base
+ NDDB
, info
->data_buff
,
356 DIV_ROUND_UP(info
->data_size
, 4));
357 if (info
->oob_size
> 0)
358 __raw_readsl(info
->mmio_base
+ NDDB
, info
->oob_buff
,
359 DIV_ROUND_UP(info
->oob_size
, 4));
362 printk(KERN_ERR
"%s: invalid state %d\n", __func__
,
368 static void start_data_dma(struct pxa3xx_nand_info
*info
)
370 struct pxa_dma_desc
*desc
= info
->data_desc
;
371 int dma_len
= ALIGN(info
->data_size
+ info
->oob_size
, 32);
373 desc
->ddadr
= DDADR_STOP
;
374 desc
->dcmd
= DCMD_ENDIRQEN
| DCMD_WIDTH4
| DCMD_BURST32
| dma_len
;
376 switch (info
->state
) {
377 case STATE_DMA_WRITING
:
378 desc
->dsadr
= info
->data_buff_phys
;
379 desc
->dtadr
= info
->mmio_phys
+ NDDB
;
380 desc
->dcmd
|= DCMD_INCSRCADDR
| DCMD_FLOWTRG
;
382 case STATE_DMA_READING
:
383 desc
->dtadr
= info
->data_buff_phys
;
384 desc
->dsadr
= info
->mmio_phys
+ NDDB
;
385 desc
->dcmd
|= DCMD_INCTRGADDR
| DCMD_FLOWSRC
;
388 printk(KERN_ERR
"%s: invalid state %d\n", __func__
,
393 DRCMR(info
->drcmr_dat
) = DRCMR_MAPVLD
| info
->data_dma_ch
;
394 DDADR(info
->data_dma_ch
) = info
->data_desc_addr
;
395 DCSR(info
->data_dma_ch
) |= DCSR_RUN
;
398 static void pxa3xx_nand_data_dma_irq(int channel
, void *data
)
400 struct pxa3xx_nand_info
*info
= data
;
403 dcsr
= DCSR(channel
);
404 DCSR(channel
) = dcsr
;
406 if (dcsr
& DCSR_BUSERR
) {
407 info
->retcode
= ERR_DMABUSERR
;
410 info
->state
= STATE_DMA_DONE
;
411 enable_int(info
, NDCR_INT_MASK
);
412 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
415 static irqreturn_t
pxa3xx_nand_irq(int irq
, void *devid
)
417 struct pxa3xx_nand_info
*info
= devid
;
418 unsigned int status
, is_completed
= 0;
420 status
= nand_readl(info
, NDSR
);
422 if (status
& NDSR_DBERR
)
423 info
->retcode
= ERR_DBERR
;
424 if (status
& NDSR_SBERR
)
425 info
->retcode
= ERR_SBERR
;
426 if (status
& (NDSR_RDDREQ
| NDSR_WRDREQ
)) {
427 /* whether use dma to transfer data */
429 disable_int(info
, NDCR_INT_MASK
);
430 info
->state
= (status
& NDSR_RDDREQ
) ?
431 STATE_DMA_READING
: STATE_DMA_WRITING
;
432 start_data_dma(info
);
433 goto NORMAL_IRQ_EXIT
;
435 info
->state
= (status
& NDSR_RDDREQ
) ?
436 STATE_PIO_READING
: STATE_PIO_WRITING
;
437 handle_data_pio(info
);
440 if (status
& NDSR_CS0_CMDD
) {
441 info
->state
= STATE_CMD_DONE
;
444 if (status
& NDSR_FLASH_RDY
) {
446 info
->state
= STATE_READY
;
449 if (status
& NDSR_WRCMDREQ
) {
450 nand_writel(info
, NDSR
, NDSR_WRCMDREQ
);
451 status
&= ~NDSR_WRCMDREQ
;
452 info
->state
= STATE_CMD_HANDLE
;
453 nand_writel(info
, NDCB0
, info
->ndcb0
);
454 nand_writel(info
, NDCB0
, info
->ndcb1
);
455 nand_writel(info
, NDCB0
, info
->ndcb2
);
458 /* clear NDSR to let the controller exit the IRQ */
459 nand_writel(info
, NDSR
, status
);
461 complete(&info
->cmd_complete
);
466 static int pxa3xx_nand_dev_ready(struct mtd_info
*mtd
)
468 struct pxa3xx_nand_info
*info
= mtd
->priv
;
469 return (nand_readl(info
, NDSR
) & NDSR_RDY
) ? 1 : 0;
472 static inline int is_buf_blank(uint8_t *buf
, size_t len
)
474 for (; len
> 0; len
--)
480 static int prepare_command_pool(struct pxa3xx_nand_info
*info
, int command
,
481 uint16_t column
, int page_addr
)
484 int addr_cycle
, exec_cmd
, ndcb0
;
485 struct mtd_info
*mtd
= info
->mtd
;
491 /* reset data and oob column point to handle data */
497 info
->retcode
= ERR_NONE
;
501 case NAND_CMD_PAGEPROG
:
503 case NAND_CMD_READOOB
:
504 pxa3xx_set_datasize(info
);
516 addr_cycle
= NDCB0_ADDR_CYC(info
->row_addr_cycles
517 + info
->col_addr_cycles
);
520 case NAND_CMD_READOOB
:
522 cmd
= info
->cmdset
->read1
;
523 if (command
== NAND_CMD_READOOB
)
524 info
->buf_start
= mtd
->writesize
+ column
;
526 info
->buf_start
= column
;
528 if (unlikely(info
->page_size
< PAGE_CHUNK_SIZE
))
529 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
531 | (cmd
& NDCB0_CMD1_MASK
);
533 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
539 /* small page addr setting */
540 if (unlikely(info
->page_size
< PAGE_CHUNK_SIZE
)) {
541 info
->ndcb1
= ((page_addr
& 0xFFFFFF) << 8)
546 info
->ndcb1
= ((page_addr
& 0xFFFF) << 16)
549 if (page_addr
& 0xFF0000)
550 info
->ndcb2
= (page_addr
& 0xFF0000) >> 16;
555 info
->buf_count
= mtd
->writesize
+ mtd
->oobsize
;
556 memset(info
->data_buff
, 0xFF, info
->buf_count
);
560 case NAND_CMD_PAGEPROG
:
561 if (is_buf_blank(info
->data_buff
,
562 (mtd
->writesize
+ mtd
->oobsize
))) {
567 cmd
= info
->cmdset
->program
;
568 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
576 case NAND_CMD_READID
:
577 cmd
= info
->cmdset
->read_id
;
578 info
->buf_count
= info
->read_id_bytes
;
579 info
->ndcb0
|= NDCB0_CMD_TYPE(3)
585 case NAND_CMD_STATUS
:
586 cmd
= info
->cmdset
->read_status
;
588 info
->ndcb0
|= NDCB0_CMD_TYPE(4)
595 case NAND_CMD_ERASE1
:
596 cmd
= info
->cmdset
->erase
;
597 info
->ndcb0
|= NDCB0_CMD_TYPE(2)
602 info
->ndcb1
= page_addr
;
607 cmd
= info
->cmdset
->reset
;
608 info
->ndcb0
|= NDCB0_CMD_TYPE(5)
613 case NAND_CMD_ERASE2
:
619 printk(KERN_ERR
"pxa3xx-nand: non-supported"
620 " command %x\n", command
);
627 static void pxa3xx_nand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
628 int column
, int page_addr
)
630 struct pxa3xx_nand_info
*info
= mtd
->priv
;
634 * if this is a x16 device ,then convert the input
635 * "byte" address into a "word" address appropriate
636 * for indexing a word-oriented device
638 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
641 exec_cmd
= prepare_command_pool(info
, command
, column
, page_addr
);
643 init_completion(&info
->cmd_complete
);
644 pxa3xx_nand_start(info
);
646 ret
= wait_for_completion_timeout(&info
->cmd_complete
,
649 printk(KERN_ERR
"Wait time out!!!\n");
650 /* Stop State Machine for next command cycle */
651 pxa3xx_nand_stop(info
);
653 info
->state
= STATE_IDLE
;
657 static void pxa3xx_nand_write_page_hwecc(struct mtd_info
*mtd
,
658 struct nand_chip
*chip
, const uint8_t *buf
)
660 chip
->write_buf(mtd
, buf
, mtd
->writesize
);
661 chip
->write_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
664 static int pxa3xx_nand_read_page_hwecc(struct mtd_info
*mtd
,
665 struct nand_chip
*chip
, uint8_t *buf
, int page
)
667 struct pxa3xx_nand_info
*info
= mtd
->priv
;
669 chip
->read_buf(mtd
, buf
, mtd
->writesize
);
670 chip
->read_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
672 if (info
->retcode
== ERR_SBERR
) {
673 switch (info
->use_ecc
) {
675 mtd
->ecc_stats
.corrected
++;
681 } else if (info
->retcode
== ERR_DBERR
) {
683 * for blank page (all 0xff), HW will calculate its ECC as
684 * 0, which is different from the ECC information within
685 * OOB, ignore such double bit errors
687 if (is_buf_blank(buf
, mtd
->writesize
))
688 mtd
->ecc_stats
.failed
++;
694 static uint8_t pxa3xx_nand_read_byte(struct mtd_info
*mtd
)
696 struct pxa3xx_nand_info
*info
= mtd
->priv
;
699 if (info
->buf_start
< info
->buf_count
)
700 /* Has just send a new command? */
701 retval
= info
->data_buff
[info
->buf_start
++];
706 static u16
pxa3xx_nand_read_word(struct mtd_info
*mtd
)
708 struct pxa3xx_nand_info
*info
= mtd
->priv
;
711 if (!(info
->buf_start
& 0x01) && info
->buf_start
< info
->buf_count
) {
712 retval
= *((u16
*)(info
->data_buff
+info
->buf_start
));
713 info
->buf_start
+= 2;
718 static void pxa3xx_nand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
720 struct pxa3xx_nand_info
*info
= mtd
->priv
;
721 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
723 memcpy(buf
, info
->data_buff
+ info
->buf_start
, real_len
);
724 info
->buf_start
+= real_len
;
727 static void pxa3xx_nand_write_buf(struct mtd_info
*mtd
,
728 const uint8_t *buf
, int len
)
730 struct pxa3xx_nand_info
*info
= mtd
->priv
;
731 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
733 memcpy(info
->data_buff
+ info
->buf_start
, buf
, real_len
);
734 info
->buf_start
+= real_len
;
737 static int pxa3xx_nand_verify_buf(struct mtd_info
*mtd
,
738 const uint8_t *buf
, int len
)
743 static void pxa3xx_nand_select_chip(struct mtd_info
*mtd
, int chip
)
748 static int pxa3xx_nand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
750 struct pxa3xx_nand_info
*info
= mtd
->priv
;
752 /* pxa3xx_nand_send_command has waited for command complete */
753 if (this->state
== FL_WRITING
|| this->state
== FL_ERASING
) {
754 if (info
->retcode
== ERR_NONE
)
758 * any error make it return 0x01 which will tell
759 * the caller the erase and write fail
768 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info
*info
,
769 const struct pxa3xx_nand_flash
*f
)
771 struct platform_device
*pdev
= info
->pdev
;
772 struct pxa3xx_nand_platform_data
*pdata
= pdev
->dev
.platform_data
;
773 uint32_t ndcr
= 0x0; /* enable all interrupts */
775 if (f
->page_size
!= 2048 && f
->page_size
!= 512)
778 if (f
->flash_width
!= 16 && f
->flash_width
!= 8)
781 /* calculate flash information */
782 info
->cmdset
= &default_cmdset
;
783 info
->page_size
= f
->page_size
;
784 info
->read_id_bytes
= (f
->page_size
== 2048) ? 4 : 2;
786 /* calculate addressing information */
787 info
->col_addr_cycles
= (f
->page_size
== 2048) ? 2 : 1;
789 if (f
->num_blocks
* f
->page_per_block
> 65536)
790 info
->row_addr_cycles
= 3;
792 info
->row_addr_cycles
= 2;
794 ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
795 ndcr
|= (info
->col_addr_cycles
== 2) ? NDCR_RA_START
: 0;
796 ndcr
|= (f
->page_per_block
== 64) ? NDCR_PG_PER_BLK
: 0;
797 ndcr
|= (f
->page_size
== 2048) ? NDCR_PAGE_SZ
: 0;
798 ndcr
|= (f
->flash_width
== 16) ? NDCR_DWIDTH_M
: 0;
799 ndcr
|= (f
->dfc_width
== 16) ? NDCR_DWIDTH_C
: 0;
801 ndcr
|= NDCR_RD_ID_CNT(info
->read_id_bytes
);
802 ndcr
|= NDCR_SPARE_EN
; /* enable spare by default */
804 info
->reg_ndcr
= ndcr
;
806 pxa3xx_nand_set_timing(info
, f
->timing
);
810 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info
*info
)
812 uint32_t ndcr
= nand_readl(info
, NDCR
);
813 info
->page_size
= ndcr
& NDCR_PAGE_SZ
? 2048 : 512;
814 /* set info fields needed to read id */
815 info
->read_id_bytes
= (info
->page_size
== 2048) ? 4 : 2;
816 info
->reg_ndcr
= ndcr
;
817 info
->cmdset
= &default_cmdset
;
819 info
->ndtr0cs0
= nand_readl(info
, NDTR0CS0
);
820 info
->ndtr1cs0
= nand_readl(info
, NDTR1CS0
);
825 /* the maximum possible buffer size for large page with OOB data
826 * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
827 * data buffer and the DMA descriptor
829 #define MAX_BUFF_SIZE PAGE_SIZE
831 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info
*info
)
833 struct platform_device
*pdev
= info
->pdev
;
834 int data_desc_offset
= MAX_BUFF_SIZE
- sizeof(struct pxa_dma_desc
);
837 info
->data_buff
= kmalloc(MAX_BUFF_SIZE
, GFP_KERNEL
);
838 if (info
->data_buff
== NULL
)
843 info
->data_buff
= dma_alloc_coherent(&pdev
->dev
, MAX_BUFF_SIZE
,
844 &info
->data_buff_phys
, GFP_KERNEL
);
845 if (info
->data_buff
== NULL
) {
846 dev_err(&pdev
->dev
, "failed to allocate dma buffer\n");
850 info
->data_buff_size
= MAX_BUFF_SIZE
;
851 info
->data_desc
= (void *)info
->data_buff
+ data_desc_offset
;
852 info
->data_desc_addr
= info
->data_buff_phys
+ data_desc_offset
;
854 info
->data_dma_ch
= pxa_request_dma("nand-data", DMA_PRIO_LOW
,
855 pxa3xx_nand_data_dma_irq
, info
);
856 if (info
->data_dma_ch
< 0) {
857 dev_err(&pdev
->dev
, "failed to request data dma\n");
858 dma_free_coherent(&pdev
->dev
, info
->data_buff_size
,
859 info
->data_buff
, info
->data_buff_phys
);
860 return info
->data_dma_ch
;
866 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info
*info
)
868 struct mtd_info
*mtd
= info
->mtd
;
869 struct nand_chip
*chip
= mtd
->priv
;
871 /* use the common timing to make a try */
872 pxa3xx_nand_config_flash(info
, &builtin_flash_types
[0]);
873 chip
->cmdfunc(mtd
, NAND_CMD_RESET
, 0, 0);
880 static int pxa3xx_nand_scan(struct mtd_info
*mtd
)
882 struct pxa3xx_nand_info
*info
= mtd
->priv
;
883 struct platform_device
*pdev
= info
->pdev
;
884 struct pxa3xx_nand_platform_data
*pdata
= pdev
->dev
.platform_data
;
885 struct nand_flash_dev pxa3xx_flash_ids
[2] = { {NULL
,}, {NULL
,} };
886 const struct pxa3xx_nand_flash
*f
= NULL
;
887 struct nand_chip
*chip
= mtd
->priv
;
892 if (pdata
->keep_config
&& !pxa3xx_nand_detect_config(info
))
895 ret
= pxa3xx_nand_sensing(info
);
899 printk(KERN_INFO
"There is no nand chip on cs 0!\n");
904 chip
->cmdfunc(mtd
, NAND_CMD_READID
, 0, 0);
905 id
= *((uint16_t *)(info
->data_buff
));
907 printk(KERN_INFO
"Detect a flash id %x\n", id
);
911 printk(KERN_WARNING
"Read out ID 0, potential timing set wrong!!\n");
916 num
= ARRAY_SIZE(builtin_flash_types
) + pdata
->num_flash
- 1;
917 for (i
= 0; i
< num
; i
++) {
918 if (i
< pdata
->num_flash
)
919 f
= pdata
->flash
+ i
;
921 f
= &builtin_flash_types
[i
- pdata
->num_flash
+ 1];
923 /* find the chip in default list */
924 if (f
->chip_id
== id
)
928 if (i
>= (ARRAY_SIZE(builtin_flash_types
) + pdata
->num_flash
- 1)) {
931 printk(KERN_ERR
"ERROR!! flash not defined!!!\n");
936 pxa3xx_nand_config_flash(info
, f
);
937 pxa3xx_flash_ids
[0].name
= f
->name
;
938 pxa3xx_flash_ids
[0].id
= (f
->chip_id
>> 8) & 0xffff;
939 pxa3xx_flash_ids
[0].pagesize
= f
->page_size
;
940 chipsize
= (uint64_t)f
->num_blocks
* f
->page_per_block
* f
->page_size
;
941 pxa3xx_flash_ids
[0].chipsize
= chipsize
>> 20;
942 pxa3xx_flash_ids
[0].erasesize
= f
->page_size
* f
->page_per_block
;
943 if (f
->flash_width
== 16)
944 pxa3xx_flash_ids
[0].options
= NAND_BUSWIDTH_16
;
946 if (nand_scan_ident(mtd
, 1, pxa3xx_flash_ids
))
948 /* calculate addressing information */
949 info
->col_addr_cycles
= (mtd
->writesize
>= 2048) ? 2 : 1;
950 info
->oob_buff
= info
->data_buff
+ mtd
->writesize
;
951 if ((mtd
->size
>> chip
->page_shift
) > 65536)
952 info
->row_addr_cycles
= 3;
954 info
->row_addr_cycles
= 2;
955 mtd
->name
= mtd_names
[0];
956 chip
->ecc
.mode
= NAND_ECC_HW
;
957 chip
->ecc
.size
= f
->page_size
;
959 chip
->options
= (f
->flash_width
== 16) ? NAND_BUSWIDTH_16
: 0;
960 chip
->options
|= NAND_NO_AUTOINCR
;
961 chip
->options
|= NAND_NO_READRDY
;
963 return nand_scan_tail(mtd
);
967 struct pxa3xx_nand_info
*alloc_nand_resource(struct platform_device
*pdev
)
969 struct pxa3xx_nand_info
*info
;
970 struct nand_chip
*chip
;
971 struct mtd_info
*mtd
;
975 mtd
= kzalloc(sizeof(struct mtd_info
) + sizeof(struct pxa3xx_nand_info
),
978 dev_err(&pdev
->dev
, "failed to allocate memory\n");
982 info
= (struct pxa3xx_nand_info
*)(&mtd
[1]);
983 chip
= (struct nand_chip
*)(&mtd
[1]);
987 mtd
->owner
= THIS_MODULE
;
989 chip
->ecc
.read_page
= pxa3xx_nand_read_page_hwecc
;
990 chip
->ecc
.write_page
= pxa3xx_nand_write_page_hwecc
;
991 chip
->controller
= &info
->controller
;
992 chip
->waitfunc
= pxa3xx_nand_waitfunc
;
993 chip
->select_chip
= pxa3xx_nand_select_chip
;
994 chip
->dev_ready
= pxa3xx_nand_dev_ready
;
995 chip
->cmdfunc
= pxa3xx_nand_cmdfunc
;
996 chip
->read_word
= pxa3xx_nand_read_word
;
997 chip
->read_byte
= pxa3xx_nand_read_byte
;
998 chip
->read_buf
= pxa3xx_nand_read_buf
;
999 chip
->write_buf
= pxa3xx_nand_write_buf
;
1000 chip
->verify_buf
= pxa3xx_nand_verify_buf
;
1002 spin_lock_init(&chip
->controller
->lock
);
1003 init_waitqueue_head(&chip
->controller
->wq
);
1004 info
->clk
= clk_get(&pdev
->dev
, NULL
);
1005 if (IS_ERR(info
->clk
)) {
1006 dev_err(&pdev
->dev
, "failed to get nand clock\n");
1007 ret
= PTR_ERR(info
->clk
);
1010 clk_enable(info
->clk
);
1012 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1014 dev_err(&pdev
->dev
, "no resource defined for data DMA\n");
1018 info
->drcmr_dat
= r
->start
;
1020 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
1022 dev_err(&pdev
->dev
, "no resource defined for command DMA\n");
1026 info
->drcmr_cmd
= r
->start
;
1028 irq
= platform_get_irq(pdev
, 0);
1030 dev_err(&pdev
->dev
, "no IRQ resource defined\n");
1035 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1037 dev_err(&pdev
->dev
, "no IO memory resource defined\n");
1042 r
= request_mem_region(r
->start
, resource_size(r
), pdev
->name
);
1044 dev_err(&pdev
->dev
, "failed to request memory resource\n");
1049 info
->mmio_base
= ioremap(r
->start
, resource_size(r
));
1050 if (info
->mmio_base
== NULL
) {
1051 dev_err(&pdev
->dev
, "ioremap() failed\n");
1055 info
->mmio_phys
= r
->start
;
1057 ret
= pxa3xx_nand_init_buff(info
);
1061 /* initialize all interrupts to be disabled */
1062 disable_int(info
, NDSR_MASK
);
1064 ret
= request_irq(irq
, pxa3xx_nand_irq
, IRQF_DISABLED
,
1067 dev_err(&pdev
->dev
, "failed to request IRQ\n");
1071 platform_set_drvdata(pdev
, info
);
1076 free_irq(irq
, info
);
1078 pxa_free_dma(info
->data_dma_ch
);
1079 dma_free_coherent(&pdev
->dev
, info
->data_buff_size
,
1080 info
->data_buff
, info
->data_buff_phys
);
1082 kfree(info
->data_buff
);
1084 iounmap(info
->mmio_base
);
1086 release_mem_region(r
->start
, resource_size(r
));
1088 clk_disable(info
->clk
);
1095 static int pxa3xx_nand_remove(struct platform_device
*pdev
)
1097 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1098 struct mtd_info
*mtd
= info
->mtd
;
1102 platform_set_drvdata(pdev
, NULL
);
1104 irq
= platform_get_irq(pdev
, 0);
1106 free_irq(irq
, info
);
1108 pxa_free_dma(info
->data_dma_ch
);
1109 dma_free_writecombine(&pdev
->dev
, info
->data_buff_size
,
1110 info
->data_buff
, info
->data_buff_phys
);
1112 kfree(info
->data_buff
);
1114 iounmap(info
->mmio_base
);
1115 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1116 release_mem_region(r
->start
, resource_size(r
));
1118 clk_disable(info
->clk
);
1122 mtd_device_unregister(mtd
);
1128 static int pxa3xx_nand_probe(struct platform_device
*pdev
)
1130 struct pxa3xx_nand_platform_data
*pdata
;
1131 struct pxa3xx_nand_info
*info
;
1133 pdata
= pdev
->dev
.platform_data
;
1135 dev_err(&pdev
->dev
, "no platform data defined\n");
1139 info
= alloc_nand_resource(pdev
);
1143 if (pxa3xx_nand_scan(info
->mtd
)) {
1144 dev_err(&pdev
->dev
, "failed to scan nand\n");
1145 pxa3xx_nand_remove(pdev
);
1149 if (mtd_has_cmdlinepart()) {
1150 const char *probes
[] = { "cmdlinepart", NULL
};
1151 struct mtd_partition
*parts
;
1154 nr_parts
= parse_mtd_partitions(info
->mtd
, probes
, &parts
, 0);
1157 return mtd_device_register(info
->mtd
, parts
, nr_parts
);
1160 return mtd_device_register(info
->mtd
, pdata
->parts
, pdata
->nr_parts
);
1164 static int pxa3xx_nand_suspend(struct platform_device
*pdev
, pm_message_t state
)
1166 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1167 struct mtd_info
*mtd
= info
->mtd
;
1170 dev_err(&pdev
->dev
, "driver busy, state = %d\n", info
->state
);
1177 static int pxa3xx_nand_resume(struct platform_device
*pdev
)
1179 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1180 struct mtd_info
*mtd
= info
->mtd
;
1182 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
1183 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
1184 clk_enable(info
->clk
);
1189 #define pxa3xx_nand_suspend NULL
1190 #define pxa3xx_nand_resume NULL
1193 static struct platform_driver pxa3xx_nand_driver
= {
1195 .name
= "pxa3xx-nand",
1197 .probe
= pxa3xx_nand_probe
,
1198 .remove
= pxa3xx_nand_remove
,
1199 .suspend
= pxa3xx_nand_suspend
,
1200 .resume
= pxa3xx_nand_resume
,
1203 static int __init
pxa3xx_nand_init(void)
1205 return platform_driver_register(&pxa3xx_nand_driver
);
1207 module_init(pxa3xx_nand_init
);
1209 static void __exit
pxa3xx_nand_exit(void)
1211 platform_driver_unregister(&pxa3xx_nand_driver
);
1213 module_exit(pxa3xx_nand_exit
);
1215 MODULE_LICENSE("GPL");
1216 MODULE_DESCRIPTION("PXA3xx NAND controller driver");