1 // SPDX-License-Identifier: GPL-2.0
3 * SuperH FLCTL nand controller
5 * Copyright (c) 2008 Renesas Solutions Corp.
6 * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
8 * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/completion.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/interrupt.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/sh_dma.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
26 #include <linux/mtd/mtd.h>
27 #include <linux/mtd/rawnand.h>
28 #include <linux/mtd/partitions.h>
29 #include <linux/mtd/sh_flctl.h>
31 static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info
*mtd
, int section
,
32 struct mtd_oob_region
*oobregion
)
34 struct nand_chip
*chip
= mtd_to_nand(mtd
);
39 oobregion
->offset
= 0;
40 oobregion
->length
= chip
->ecc
.bytes
;
45 static int flctl_4secc_ooblayout_sp_free(struct mtd_info
*mtd
, int section
,
46 struct mtd_oob_region
*oobregion
)
51 oobregion
->offset
= 12;
52 oobregion
->length
= 4;
57 static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops
= {
58 .ecc
= flctl_4secc_ooblayout_sp_ecc
,
59 .free
= flctl_4secc_ooblayout_sp_free
,
62 static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info
*mtd
, int section
,
63 struct mtd_oob_region
*oobregion
)
65 struct nand_chip
*chip
= mtd_to_nand(mtd
);
67 if (section
>= chip
->ecc
.steps
)
70 oobregion
->offset
= (section
* 16) + 6;
71 oobregion
->length
= chip
->ecc
.bytes
;
76 static int flctl_4secc_ooblayout_lp_free(struct mtd_info
*mtd
, int section
,
77 struct mtd_oob_region
*oobregion
)
79 struct nand_chip
*chip
= mtd_to_nand(mtd
);
81 if (section
>= chip
->ecc
.steps
)
84 oobregion
->offset
= section
* 16;
85 oobregion
->length
= 6;
88 oobregion
->offset
+= 2;
89 oobregion
->length
-= 2;
95 static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops
= {
96 .ecc
= flctl_4secc_ooblayout_lp_ecc
,
97 .free
= flctl_4secc_ooblayout_lp_free
,
100 static uint8_t scan_ff_pattern
[] = { 0xff, 0xff };
102 static struct nand_bbt_descr flctl_4secc_smallpage
= {
105 .pattern
= scan_ff_pattern
,
108 static struct nand_bbt_descr flctl_4secc_largepage
= {
111 .pattern
= scan_ff_pattern
,
114 static void empty_fifo(struct sh_flctl
*flctl
)
116 writel(flctl
->flintdmacr_base
| AC1CLR
| AC0CLR
, FLINTDMACR(flctl
));
117 writel(flctl
->flintdmacr_base
, FLINTDMACR(flctl
));
120 static void start_translation(struct sh_flctl
*flctl
)
122 writeb(TRSTRT
, FLTRCR(flctl
));
125 static void timeout_error(struct sh_flctl
*flctl
, const char *str
)
127 dev_err(&flctl
->pdev
->dev
, "Timeout occurred in %s\n", str
);
130 static void wait_completion(struct sh_flctl
*flctl
)
132 uint32_t timeout
= LOOP_TIMEOUT_MAX
;
135 if (readb(FLTRCR(flctl
)) & TREND
) {
136 writeb(0x0, FLTRCR(flctl
));
142 timeout_error(flctl
, __func__
);
143 writeb(0x0, FLTRCR(flctl
));
146 static void flctl_dma_complete(void *param
)
148 struct sh_flctl
*flctl
= param
;
150 complete(&flctl
->dma_complete
);
153 static void flctl_release_dma(struct sh_flctl
*flctl
)
155 if (flctl
->chan_fifo0_rx
) {
156 dma_release_channel(flctl
->chan_fifo0_rx
);
157 flctl
->chan_fifo0_rx
= NULL
;
159 if (flctl
->chan_fifo0_tx
) {
160 dma_release_channel(flctl
->chan_fifo0_tx
);
161 flctl
->chan_fifo0_tx
= NULL
;
165 static void flctl_setup_dma(struct sh_flctl
*flctl
)
168 struct dma_slave_config cfg
;
169 struct platform_device
*pdev
= flctl
->pdev
;
170 struct sh_flctl_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
176 if (pdata
->slave_id_fifo0_tx
<= 0 || pdata
->slave_id_fifo0_rx
<= 0)
179 /* We can only either use DMA for both Tx and Rx or not use it at all */
181 dma_cap_set(DMA_SLAVE
, mask
);
183 flctl
->chan_fifo0_tx
= dma_request_channel(mask
, shdma_chan_filter
,
184 (void *)(uintptr_t)pdata
->slave_id_fifo0_tx
);
185 dev_dbg(&pdev
->dev
, "%s: TX: got channel %p\n", __func__
,
186 flctl
->chan_fifo0_tx
);
188 if (!flctl
->chan_fifo0_tx
)
191 memset(&cfg
, 0, sizeof(cfg
));
192 cfg
.direction
= DMA_MEM_TO_DEV
;
193 cfg
.dst_addr
= flctl
->fifo
;
195 ret
= dmaengine_slave_config(flctl
->chan_fifo0_tx
, &cfg
);
199 flctl
->chan_fifo0_rx
= dma_request_channel(mask
, shdma_chan_filter
,
200 (void *)(uintptr_t)pdata
->slave_id_fifo0_rx
);
201 dev_dbg(&pdev
->dev
, "%s: RX: got channel %p\n", __func__
,
202 flctl
->chan_fifo0_rx
);
204 if (!flctl
->chan_fifo0_rx
)
207 cfg
.direction
= DMA_DEV_TO_MEM
;
209 cfg
.src_addr
= flctl
->fifo
;
210 ret
= dmaengine_slave_config(flctl
->chan_fifo0_rx
, &cfg
);
214 init_completion(&flctl
->dma_complete
);
219 flctl_release_dma(flctl
);
222 static void set_addr(struct mtd_info
*mtd
, int column
, int page_addr
)
224 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
228 addr
= page_addr
; /* ERASE1 */
229 } else if (page_addr
!= -1) {
230 /* SEQIN, READ0, etc.. */
231 if (flctl
->chip
.options
& NAND_BUSWIDTH_16
)
233 if (flctl
->page_size
) {
234 addr
= column
& 0x0FFF;
235 addr
|= (page_addr
& 0xff) << 16;
236 addr
|= ((page_addr
>> 8) & 0xff) << 24;
238 if (flctl
->rw_ADRCNT
== ADRCNT2_E
) {
240 addr2
= (page_addr
>> 16) & 0xff;
241 writel(addr2
, FLADR2(flctl
));
245 addr
|= (page_addr
& 0xff) << 8;
246 addr
|= ((page_addr
>> 8) & 0xff) << 16;
247 addr
|= ((page_addr
>> 16) & 0xff) << 24;
250 writel(addr
, FLADR(flctl
));
253 static void wait_rfifo_ready(struct sh_flctl
*flctl
)
255 uint32_t timeout
= LOOP_TIMEOUT_MAX
;
260 val
= readl(FLDTCNTR(flctl
)) >> 16;
265 timeout_error(flctl
, __func__
);
268 static void wait_wfifo_ready(struct sh_flctl
*flctl
)
270 uint32_t len
, timeout
= LOOP_TIMEOUT_MAX
;
274 len
= (readl(FLDTCNTR(flctl
)) >> 16) & 0xFF;
279 timeout_error(flctl
, __func__
);
282 static enum flctl_ecc_res_t wait_recfifo_ready
283 (struct sh_flctl
*flctl
, int sector_number
)
285 uint32_t timeout
= LOOP_TIMEOUT_MAX
;
286 void __iomem
*ecc_reg
[4];
288 int state
= FL_SUCCESS
;
292 * First this loops checks in FLDTCNTR if we are ready to read out the
293 * oob data. This is the case if either all went fine without errors or
294 * if the bottom part of the loop corrected the errors or marked them as
295 * uncorrectable and the controller is given time to push the data into
299 /* check if all is ok and we can read out the OOB */
300 size
= readl(FLDTCNTR(flctl
)) >> 24;
301 if ((size
& 0xFF) == 4)
304 /* check if a correction code has been calculated */
305 if (!(readl(FL4ECCCR(flctl
)) & _4ECCEND
)) {
307 * either we wait for the fifo to be filled or a
308 * correction pattern is being generated
314 /* check for an uncorrectable error */
315 if (readl(FL4ECCCR(flctl
)) & _4ECCFA
) {
316 /* check if we face a non-empty page */
317 for (i
= 0; i
< 512; i
++) {
318 if (flctl
->done_buff
[i
] != 0xff) {
319 state
= FL_ERROR
; /* can't correct */
324 if (state
== FL_SUCCESS
)
325 dev_dbg(&flctl
->pdev
->dev
,
326 "reading empty sector %d, ecc error ignored\n",
329 writel(0, FL4ECCCR(flctl
));
333 /* start error correction */
334 ecc_reg
[0] = FL4ECCRESULT0(flctl
);
335 ecc_reg
[1] = FL4ECCRESULT1(flctl
);
336 ecc_reg
[2] = FL4ECCRESULT2(flctl
);
337 ecc_reg
[3] = FL4ECCRESULT3(flctl
);
339 for (i
= 0; i
< 3; i
++) {
343 data
= readl(ecc_reg
[i
]);
345 if (flctl
->page_size
)
346 index
= (512 * sector_number
) +
351 org
= flctl
->done_buff
[index
];
352 flctl
->done_buff
[index
] = org
^ (data
& 0xFF);
354 state
= FL_REPAIRABLE
;
355 writel(0, FL4ECCCR(flctl
));
358 timeout_error(flctl
, __func__
);
359 return FL_TIMEOUT
; /* timeout */
362 static void wait_wecfifo_ready(struct sh_flctl
*flctl
)
364 uint32_t timeout
= LOOP_TIMEOUT_MAX
;
369 len
= (readl(FLDTCNTR(flctl
)) >> 24) & 0xFF;
374 timeout_error(flctl
, __func__
);
377 static int flctl_dma_fifo0_transfer(struct sh_flctl
*flctl
, unsigned long *buf
,
378 int len
, enum dma_data_direction dir
)
380 struct dma_async_tx_descriptor
*desc
= NULL
;
381 struct dma_chan
*chan
;
382 enum dma_transfer_direction tr_dir
;
387 unsigned long time_left
;
389 if (dir
== DMA_FROM_DEVICE
) {
390 chan
= flctl
->chan_fifo0_rx
;
391 tr_dir
= DMA_DEV_TO_MEM
;
393 chan
= flctl
->chan_fifo0_tx
;
394 tr_dir
= DMA_MEM_TO_DEV
;
397 dma_addr
= dma_map_single(chan
->device
->dev
, buf
, len
, dir
);
399 if (!dma_mapping_error(chan
->device
->dev
, dma_addr
))
400 desc
= dmaengine_prep_slave_single(chan
, dma_addr
, len
,
401 tr_dir
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
404 reg
= readl(FLINTDMACR(flctl
));
406 writel(reg
, FLINTDMACR(flctl
));
408 desc
->callback
= flctl_dma_complete
;
409 desc
->callback_param
= flctl
;
410 cookie
= dmaengine_submit(desc
);
411 if (dma_submit_error(cookie
)) {
412 ret
= dma_submit_error(cookie
);
413 dev_warn(&flctl
->pdev
->dev
,
414 "DMA submit failed, falling back to PIO\n");
418 dma_async_issue_pending(chan
);
420 /* DMA failed, fall back to PIO */
421 flctl_release_dma(flctl
);
422 dev_warn(&flctl
->pdev
->dev
,
423 "DMA failed, falling back to PIO\n");
429 wait_for_completion_timeout(&flctl
->dma_complete
,
430 msecs_to_jiffies(3000));
432 if (time_left
== 0) {
433 dmaengine_terminate_all(chan
);
434 dev_err(&flctl
->pdev
->dev
, "wait_for_completion_timeout\n");
439 reg
= readl(FLINTDMACR(flctl
));
441 writel(reg
, FLINTDMACR(flctl
));
443 dma_unmap_single(chan
->device
->dev
, dma_addr
, len
, dir
);
445 /* ret == 0 is success */
449 static void read_datareg(struct sh_flctl
*flctl
, int offset
)
452 unsigned long *buf
= (unsigned long *)&flctl
->done_buff
[offset
];
454 wait_completion(flctl
);
456 data
= readl(FLDATAR(flctl
));
457 *buf
= le32_to_cpu(data
);
460 static void read_fiforeg(struct sh_flctl
*flctl
, int rlen
, int offset
)
463 unsigned long *buf
= (unsigned long *)&flctl
->done_buff
[offset
];
465 len_4align
= (rlen
+ 3) / 4;
467 /* initiate DMA transfer */
468 if (flctl
->chan_fifo0_rx
&& rlen
>= 32 &&
469 !flctl_dma_fifo0_transfer(flctl
, buf
, rlen
, DMA_FROM_DEVICE
))
470 goto convert
; /* DMA success */
472 /* do polling transfer */
473 for (i
= 0; i
< len_4align
; i
++) {
474 wait_rfifo_ready(flctl
);
475 buf
[i
] = readl(FLDTFIFO(flctl
));
479 for (i
= 0; i
< len_4align
; i
++)
480 buf
[i
] = be32_to_cpu(buf
[i
]);
483 static enum flctl_ecc_res_t read_ecfiforeg
484 (struct sh_flctl
*flctl
, uint8_t *buff
, int sector
)
487 enum flctl_ecc_res_t res
;
488 unsigned long *ecc_buf
= (unsigned long *)buff
;
490 res
= wait_recfifo_ready(flctl
, sector
);
492 if (res
!= FL_ERROR
) {
493 for (i
= 0; i
< 4; i
++) {
494 ecc_buf
[i
] = readl(FLECFIFO(flctl
));
495 ecc_buf
[i
] = be32_to_cpu(ecc_buf
[i
]);
502 static void write_fiforeg(struct sh_flctl
*flctl
, int rlen
,
506 unsigned long *buf
= (unsigned long *)&flctl
->done_buff
[offset
];
508 len_4align
= (rlen
+ 3) / 4;
509 for (i
= 0; i
< len_4align
; i
++) {
510 wait_wfifo_ready(flctl
);
511 writel(cpu_to_be32(buf
[i
]), FLDTFIFO(flctl
));
515 static void write_ec_fiforeg(struct sh_flctl
*flctl
, int rlen
,
519 unsigned long *buf
= (unsigned long *)&flctl
->done_buff
[offset
];
521 len_4align
= (rlen
+ 3) / 4;
523 for (i
= 0; i
< len_4align
; i
++)
524 buf
[i
] = cpu_to_be32(buf
[i
]);
526 /* initiate DMA transfer */
527 if (flctl
->chan_fifo0_tx
&& rlen
>= 32 &&
528 !flctl_dma_fifo0_transfer(flctl
, buf
, rlen
, DMA_TO_DEVICE
))
529 return; /* DMA success */
531 /* do polling transfer */
532 for (i
= 0; i
< len_4align
; i
++) {
533 wait_wecfifo_ready(flctl
);
534 writel(buf
[i
], FLECFIFO(flctl
));
538 static void set_cmd_regs(struct mtd_info
*mtd
, uint32_t cmd
, uint32_t flcmcdr_val
)
540 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
541 uint32_t flcmncr_val
= flctl
->flcmncr_base
& ~SEL_16BIT
;
542 uint32_t flcmdcr_val
, addr_len_bytes
= 0;
544 /* Set SNAND bit if page size is 2048byte */
545 if (flctl
->page_size
)
546 flcmncr_val
|= SNAND_E
;
548 flcmncr_val
&= ~SNAND_E
;
550 /* default FLCMDCR val */
551 flcmdcr_val
= DOCMD1_E
| DOADR_E
;
553 /* Set for FLCMDCR */
555 case NAND_CMD_ERASE1
:
556 addr_len_bytes
= flctl
->erase_ADRCNT
;
557 flcmdcr_val
|= DOCMD2_E
;
560 case NAND_CMD_READOOB
:
561 case NAND_CMD_RNDOUT
:
562 addr_len_bytes
= flctl
->rw_ADRCNT
;
563 flcmdcr_val
|= CDSRC_E
;
564 if (flctl
->chip
.options
& NAND_BUSWIDTH_16
)
565 flcmncr_val
|= SEL_16BIT
;
568 /* This case is that cmd is READ0 or READ1 or READ00 */
569 flcmdcr_val
&= ~DOADR_E
; /* ONLY execute 1st cmd */
571 case NAND_CMD_PAGEPROG
:
572 addr_len_bytes
= flctl
->rw_ADRCNT
;
573 flcmdcr_val
|= DOCMD2_E
| CDSRC_E
| SELRW
;
574 if (flctl
->chip
.options
& NAND_BUSWIDTH_16
)
575 flcmncr_val
|= SEL_16BIT
;
577 case NAND_CMD_READID
:
578 flcmncr_val
&= ~SNAND_E
;
579 flcmdcr_val
|= CDSRC_E
;
580 addr_len_bytes
= ADRCNT_1
;
582 case NAND_CMD_STATUS
:
584 flcmncr_val
&= ~SNAND_E
;
585 flcmdcr_val
&= ~(DOADR_E
| DOSR_E
);
591 /* Set address bytes parameter */
592 flcmdcr_val
|= addr_len_bytes
;
594 /* Now actually write */
595 writel(flcmncr_val
, FLCMNCR(flctl
));
596 writel(flcmdcr_val
, FLCMDCR(flctl
));
597 writel(flcmcdr_val
, FLCMCDR(flctl
));
600 static int flctl_read_page_hwecc(struct nand_chip
*chip
, uint8_t *buf
,
601 int oob_required
, int page
)
603 struct mtd_info
*mtd
= nand_to_mtd(chip
);
605 nand_read_page_op(chip
, page
, 0, buf
, mtd
->writesize
);
607 chip
->legacy
.read_buf(chip
, chip
->oob_poi
, mtd
->oobsize
);
611 static int flctl_write_page_hwecc(struct nand_chip
*chip
, const uint8_t *buf
,
612 int oob_required
, int page
)
614 struct mtd_info
*mtd
= nand_to_mtd(chip
);
616 nand_prog_page_begin_op(chip
, page
, 0, buf
, mtd
->writesize
);
617 chip
->legacy
.write_buf(chip
, chip
->oob_poi
, mtd
->oobsize
);
618 return nand_prog_page_end_op(chip
);
621 static void execmd_read_page_sector(struct mtd_info
*mtd
, int page_addr
)
623 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
624 int sector
, page_sectors
;
625 enum flctl_ecc_res_t ecc_result
;
627 page_sectors
= flctl
->page_size
? 4 : 1;
629 set_cmd_regs(mtd
, NAND_CMD_READ0
,
630 (NAND_CMD_READSTART
<< 8) | NAND_CMD_READ0
);
632 writel(readl(FLCMNCR(flctl
)) | ACM_SACCES_MODE
| _4ECCCORRECT
,
634 writel(readl(FLCMDCR(flctl
)) | page_sectors
, FLCMDCR(flctl
));
635 writel(page_addr
<< 2, FLADR(flctl
));
638 start_translation(flctl
);
640 for (sector
= 0; sector
< page_sectors
; sector
++) {
641 read_fiforeg(flctl
, 512, 512 * sector
);
643 ecc_result
= read_ecfiforeg(flctl
,
644 &flctl
->done_buff
[mtd
->writesize
+ 16 * sector
],
647 switch (ecc_result
) {
649 dev_info(&flctl
->pdev
->dev
,
650 "applied ecc on page 0x%x", page_addr
);
651 mtd
->ecc_stats
.corrected
++;
654 dev_warn(&flctl
->pdev
->dev
,
655 "page 0x%x contains corrupted data\n",
657 mtd
->ecc_stats
.failed
++;
664 wait_completion(flctl
);
666 writel(readl(FLCMNCR(flctl
)) & ~(ACM_SACCES_MODE
| _4ECCCORRECT
),
670 static void execmd_read_oob(struct mtd_info
*mtd
, int page_addr
)
672 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
673 int page_sectors
= flctl
->page_size
? 4 : 1;
676 set_cmd_regs(mtd
, NAND_CMD_READ0
,
677 (NAND_CMD_READSTART
<< 8) | NAND_CMD_READ0
);
681 for (i
= 0; i
< page_sectors
; i
++) {
682 set_addr(mtd
, (512 + 16) * i
+ 512 , page_addr
);
683 writel(16, FLDTCNTR(flctl
));
685 start_translation(flctl
);
686 read_fiforeg(flctl
, 16, 16 * i
);
687 wait_completion(flctl
);
691 static void execmd_write_page_sector(struct mtd_info
*mtd
)
693 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
694 int page_addr
= flctl
->seqin_page_addr
;
695 int sector
, page_sectors
;
697 page_sectors
= flctl
->page_size
? 4 : 1;
699 set_cmd_regs(mtd
, NAND_CMD_PAGEPROG
,
700 (NAND_CMD_PAGEPROG
<< 8) | NAND_CMD_SEQIN
);
703 writel(readl(FLCMNCR(flctl
)) | ACM_SACCES_MODE
, FLCMNCR(flctl
));
704 writel(readl(FLCMDCR(flctl
)) | page_sectors
, FLCMDCR(flctl
));
705 writel(page_addr
<< 2, FLADR(flctl
));
706 start_translation(flctl
);
708 for (sector
= 0; sector
< page_sectors
; sector
++) {
709 write_fiforeg(flctl
, 512, 512 * sector
);
710 write_ec_fiforeg(flctl
, 16, mtd
->writesize
+ 16 * sector
);
713 wait_completion(flctl
);
714 writel(readl(FLCMNCR(flctl
)) & ~ACM_SACCES_MODE
, FLCMNCR(flctl
));
717 static void execmd_write_oob(struct mtd_info
*mtd
)
719 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
720 int page_addr
= flctl
->seqin_page_addr
;
721 int sector
, page_sectors
;
723 page_sectors
= flctl
->page_size
? 4 : 1;
725 set_cmd_regs(mtd
, NAND_CMD_PAGEPROG
,
726 (NAND_CMD_PAGEPROG
<< 8) | NAND_CMD_SEQIN
);
728 for (sector
= 0; sector
< page_sectors
; sector
++) {
730 set_addr(mtd
, sector
* 528 + 512, page_addr
);
731 writel(16, FLDTCNTR(flctl
)); /* set read size */
733 start_translation(flctl
);
734 write_fiforeg(flctl
, 16, 16 * sector
);
735 wait_completion(flctl
);
739 static void flctl_cmdfunc(struct nand_chip
*chip
, unsigned int command
,
740 int column
, int page_addr
)
742 struct mtd_info
*mtd
= nand_to_mtd(chip
);
743 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
744 uint32_t read_cmd
= 0;
746 pm_runtime_get_sync(&flctl
->pdev
->dev
);
748 flctl
->read_bytes
= 0;
749 if (command
!= NAND_CMD_PAGEPROG
)
756 /* read page with hwecc */
757 execmd_read_page_sector(mtd
, page_addr
);
760 if (flctl
->page_size
)
761 set_cmd_regs(mtd
, command
, (NAND_CMD_READSTART
<< 8)
764 set_cmd_regs(mtd
, command
, command
);
766 set_addr(mtd
, 0, page_addr
);
768 flctl
->read_bytes
= mtd
->writesize
+ mtd
->oobsize
;
769 if (flctl
->chip
.options
& NAND_BUSWIDTH_16
)
771 flctl
->index
+= column
;
772 goto read_normal_exit
;
774 case NAND_CMD_READOOB
:
776 /* read page with hwecc */
777 execmd_read_oob(mtd
, page_addr
);
781 if (flctl
->page_size
) {
782 set_cmd_regs(mtd
, command
, (NAND_CMD_READSTART
<< 8)
784 set_addr(mtd
, mtd
->writesize
, page_addr
);
786 set_cmd_regs(mtd
, command
, command
);
787 set_addr(mtd
, 0, page_addr
);
789 flctl
->read_bytes
= mtd
->oobsize
;
790 goto read_normal_exit
;
792 case NAND_CMD_RNDOUT
:
796 if (flctl
->page_size
)
797 set_cmd_regs(mtd
, command
, (NAND_CMD_RNDOUTSTART
<< 8)
800 set_cmd_regs(mtd
, command
, command
);
802 set_addr(mtd
, column
, 0);
804 flctl
->read_bytes
= mtd
->writesize
+ mtd
->oobsize
- column
;
805 goto read_normal_exit
;
807 case NAND_CMD_READID
:
808 set_cmd_regs(mtd
, command
, command
);
810 /* READID is always performed using an 8-bit bus */
811 if (flctl
->chip
.options
& NAND_BUSWIDTH_16
)
813 set_addr(mtd
, column
, 0);
815 flctl
->read_bytes
= 8;
816 writel(flctl
->read_bytes
, FLDTCNTR(flctl
)); /* set read size */
818 start_translation(flctl
);
819 read_fiforeg(flctl
, flctl
->read_bytes
, 0);
820 wait_completion(flctl
);
823 case NAND_CMD_ERASE1
:
824 flctl
->erase1_page_addr
= page_addr
;
827 case NAND_CMD_ERASE2
:
828 set_cmd_regs(mtd
, NAND_CMD_ERASE1
,
829 (command
<< 8) | NAND_CMD_ERASE1
);
830 set_addr(mtd
, -1, flctl
->erase1_page_addr
);
831 start_translation(flctl
);
832 wait_completion(flctl
);
836 if (!flctl
->page_size
) {
837 /* output read command */
838 if (column
>= mtd
->writesize
) {
839 column
-= mtd
->writesize
;
840 read_cmd
= NAND_CMD_READOOB
;
841 } else if (column
< 256) {
842 read_cmd
= NAND_CMD_READ0
;
845 read_cmd
= NAND_CMD_READ1
;
848 flctl
->seqin_column
= column
;
849 flctl
->seqin_page_addr
= page_addr
;
850 flctl
->seqin_read_cmd
= read_cmd
;
853 case NAND_CMD_PAGEPROG
:
855 if (!flctl
->page_size
) {
856 set_cmd_regs(mtd
, NAND_CMD_SEQIN
,
857 flctl
->seqin_read_cmd
);
858 set_addr(mtd
, -1, -1);
859 writel(0, FLDTCNTR(flctl
)); /* set 0 size */
860 start_translation(flctl
);
861 wait_completion(flctl
);
864 /* write page with hwecc */
865 if (flctl
->seqin_column
== mtd
->writesize
)
866 execmd_write_oob(mtd
);
867 else if (!flctl
->seqin_column
)
868 execmd_write_page_sector(mtd
);
870 pr_err("Invalid address !?\n");
873 set_cmd_regs(mtd
, command
, (command
<< 8) | NAND_CMD_SEQIN
);
874 set_addr(mtd
, flctl
->seqin_column
, flctl
->seqin_page_addr
);
875 writel(flctl
->index
, FLDTCNTR(flctl
)); /* set write size */
876 start_translation(flctl
);
877 write_fiforeg(flctl
, flctl
->index
, 0);
878 wait_completion(flctl
);
881 case NAND_CMD_STATUS
:
882 set_cmd_regs(mtd
, command
, command
);
883 set_addr(mtd
, -1, -1);
885 flctl
->read_bytes
= 1;
886 writel(flctl
->read_bytes
, FLDTCNTR(flctl
)); /* set read size */
887 start_translation(flctl
);
888 read_datareg(flctl
, 0); /* read and end */
892 set_cmd_regs(mtd
, command
, command
);
893 set_addr(mtd
, -1, -1);
895 writel(0, FLDTCNTR(flctl
)); /* set 0 size */
896 start_translation(flctl
);
897 wait_completion(flctl
);
906 writel(flctl
->read_bytes
, FLDTCNTR(flctl
)); /* set read size */
908 start_translation(flctl
);
909 read_fiforeg(flctl
, flctl
->read_bytes
, 0);
910 wait_completion(flctl
);
912 pm_runtime_put_sync(&flctl
->pdev
->dev
);
916 static void flctl_select_chip(struct nand_chip
*chip
, int chipnr
)
918 struct sh_flctl
*flctl
= mtd_to_flctl(nand_to_mtd(chip
));
923 flctl
->flcmncr_base
&= ~CE0_ENABLE
;
925 pm_runtime_get_sync(&flctl
->pdev
->dev
);
926 writel(flctl
->flcmncr_base
, FLCMNCR(flctl
));
928 if (flctl
->qos_request
) {
929 dev_pm_qos_remove_request(&flctl
->pm_qos
);
930 flctl
->qos_request
= 0;
933 pm_runtime_put_sync(&flctl
->pdev
->dev
);
936 flctl
->flcmncr_base
|= CE0_ENABLE
;
938 if (!flctl
->qos_request
) {
939 ret
= dev_pm_qos_add_request(&flctl
->pdev
->dev
,
941 DEV_PM_QOS_RESUME_LATENCY
,
944 dev_err(&flctl
->pdev
->dev
,
945 "PM QoS request failed: %d\n", ret
);
946 flctl
->qos_request
= 1;
950 pm_runtime_get_sync(&flctl
->pdev
->dev
);
951 writel(HOLDEN
, FLHOLDCR(flctl
));
952 pm_runtime_put_sync(&flctl
->pdev
->dev
);
960 static void flctl_write_buf(struct nand_chip
*chip
, const uint8_t *buf
, int len
)
962 struct sh_flctl
*flctl
= mtd_to_flctl(nand_to_mtd(chip
));
964 memcpy(&flctl
->done_buff
[flctl
->index
], buf
, len
);
968 static uint8_t flctl_read_byte(struct nand_chip
*chip
)
970 struct sh_flctl
*flctl
= mtd_to_flctl(nand_to_mtd(chip
));
973 data
= flctl
->done_buff
[flctl
->index
];
978 static void flctl_read_buf(struct nand_chip
*chip
, uint8_t *buf
, int len
)
980 struct sh_flctl
*flctl
= mtd_to_flctl(nand_to_mtd(chip
));
982 memcpy(buf
, &flctl
->done_buff
[flctl
->index
], len
);
986 static int flctl_chip_attach_chip(struct nand_chip
*chip
)
988 u64 targetsize
= nanddev_target_size(&chip
->base
);
989 struct mtd_info
*mtd
= nand_to_mtd(chip
);
990 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
993 * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
994 * Add the SEL_16BIT flag in flctl->flcmncr_base.
996 if (chip
->options
& NAND_BUSWIDTH_16
)
997 flctl
->flcmncr_base
|= SEL_16BIT
;
999 if (mtd
->writesize
== 512) {
1000 flctl
->page_size
= 0;
1001 if (targetsize
> (32 << 20)) {
1003 flctl
->rw_ADRCNT
= ADRCNT_4
;
1004 flctl
->erase_ADRCNT
= ADRCNT_3
;
1005 } else if (targetsize
> (2 << 16)) {
1006 /* big than 128KB */
1007 flctl
->rw_ADRCNT
= ADRCNT_3
;
1008 flctl
->erase_ADRCNT
= ADRCNT_2
;
1010 flctl
->rw_ADRCNT
= ADRCNT_2
;
1011 flctl
->erase_ADRCNT
= ADRCNT_1
;
1014 flctl
->page_size
= 1;
1015 if (targetsize
> (128 << 20)) {
1016 /* big than 128MB */
1017 flctl
->rw_ADRCNT
= ADRCNT2_E
;
1018 flctl
->erase_ADRCNT
= ADRCNT_3
;
1019 } else if (targetsize
> (8 << 16)) {
1020 /* big than 512KB */
1021 flctl
->rw_ADRCNT
= ADRCNT_4
;
1022 flctl
->erase_ADRCNT
= ADRCNT_2
;
1024 flctl
->rw_ADRCNT
= ADRCNT_3
;
1025 flctl
->erase_ADRCNT
= ADRCNT_1
;
1030 if (mtd
->writesize
== 512) {
1031 mtd_set_ooblayout(mtd
, &flctl_4secc_oob_smallpage_ops
);
1032 chip
->badblock_pattern
= &flctl_4secc_smallpage
;
1034 mtd_set_ooblayout(mtd
, &flctl_4secc_oob_largepage_ops
);
1035 chip
->badblock_pattern
= &flctl_4secc_largepage
;
1038 chip
->ecc
.size
= 512;
1039 chip
->ecc
.bytes
= 10;
1040 chip
->ecc
.strength
= 4;
1041 chip
->ecc
.read_page
= flctl_read_page_hwecc
;
1042 chip
->ecc
.write_page
= flctl_write_page_hwecc
;
1043 chip
->ecc
.engine_type
= NAND_ECC_ENGINE_TYPE_ON_HOST
;
1045 /* 4 symbols ECC enabled */
1046 flctl
->flcmncr_base
|= _4ECCEN
;
1048 chip
->ecc
.engine_type
= NAND_ECC_ENGINE_TYPE_SOFT
;
1049 chip
->ecc
.algo
= NAND_ECC_ALGO_HAMMING
;
1055 static const struct nand_controller_ops flctl_nand_controller_ops
= {
1056 .attach_chip
= flctl_chip_attach_chip
,
1059 static irqreturn_t
flctl_handle_flste(int irq
, void *dev_id
)
1061 struct sh_flctl
*flctl
= dev_id
;
1063 dev_err(&flctl
->pdev
->dev
, "flste irq: %x\n", readl(FLINTDMACR(flctl
)));
1064 writel(flctl
->flintdmacr_base
, FLINTDMACR(flctl
));
1069 struct flctl_soc_config
{
1070 unsigned long flcmncr_val
;
1071 unsigned has_hwecc
:1;
1072 unsigned use_holden
:1;
1075 static struct flctl_soc_config flctl_sh7372_config
= {
1076 .flcmncr_val
= CLK_16B_12L_4H
| TYPESEL_SET
| SHBUSSEL
,
1081 static const struct of_device_id of_flctl_match
[] = {
1082 { .compatible
= "renesas,shmobile-flctl-sh7372",
1083 .data
= &flctl_sh7372_config
},
1086 MODULE_DEVICE_TABLE(of
, of_flctl_match
);
1088 static struct sh_flctl_platform_data
*flctl_parse_dt(struct device
*dev
)
1090 const struct flctl_soc_config
*config
;
1091 struct sh_flctl_platform_data
*pdata
;
1093 config
= of_device_get_match_data(dev
);
1095 dev_err(dev
, "%s: no OF configuration attached\n", __func__
);
1099 pdata
= devm_kzalloc(dev
, sizeof(struct sh_flctl_platform_data
),
1104 /* set SoC specific options */
1105 pdata
->flcmncr_val
= config
->flcmncr_val
;
1106 pdata
->has_hwecc
= config
->has_hwecc
;
1107 pdata
->use_holden
= config
->use_holden
;
1112 static int flctl_probe(struct platform_device
*pdev
)
1114 struct resource
*res
;
1115 struct sh_flctl
*flctl
;
1116 struct mtd_info
*flctl_mtd
;
1117 struct nand_chip
*nand
;
1118 struct sh_flctl_platform_data
*pdata
;
1122 flctl
= devm_kzalloc(&pdev
->dev
, sizeof(struct sh_flctl
), GFP_KERNEL
);
1126 flctl
->reg
= devm_platform_get_and_ioremap_resource(pdev
, 0, &res
);
1127 if (IS_ERR(flctl
->reg
))
1128 return PTR_ERR(flctl
->reg
);
1129 flctl
->fifo
= res
->start
+ 0x24; /* FLDTFIFO */
1131 irq
= platform_get_irq(pdev
, 0);
1135 ret
= devm_request_irq(&pdev
->dev
, irq
, flctl_handle_flste
, IRQF_SHARED
,
1138 dev_err(&pdev
->dev
, "request interrupt failed.\n");
1142 if (pdev
->dev
.of_node
)
1143 pdata
= flctl_parse_dt(&pdev
->dev
);
1145 pdata
= dev_get_platdata(&pdev
->dev
);
1148 dev_err(&pdev
->dev
, "no setup data defined\n");
1152 platform_set_drvdata(pdev
, flctl
);
1153 nand
= &flctl
->chip
;
1154 flctl_mtd
= nand_to_mtd(nand
);
1155 nand_set_flash_node(nand
, pdev
->dev
.of_node
);
1156 flctl_mtd
->dev
.parent
= &pdev
->dev
;
1158 flctl
->hwecc
= pdata
->has_hwecc
;
1159 flctl
->holden
= pdata
->use_holden
;
1160 flctl
->flcmncr_base
= pdata
->flcmncr_val
;
1161 flctl
->flintdmacr_base
= flctl
->hwecc
? (STERINTE
| ECERB
) : STERINTE
;
1163 /* Set address of hardware control function */
1164 /* 20 us command delay time */
1165 nand
->legacy
.chip_delay
= 20;
1167 nand
->legacy
.read_byte
= flctl_read_byte
;
1168 nand
->legacy
.write_buf
= flctl_write_buf
;
1169 nand
->legacy
.read_buf
= flctl_read_buf
;
1170 nand
->legacy
.select_chip
= flctl_select_chip
;
1171 nand
->legacy
.cmdfunc
= flctl_cmdfunc
;
1172 nand
->legacy
.set_features
= nand_get_set_features_notsupp
;
1173 nand
->legacy
.get_features
= nand_get_set_features_notsupp
;
1175 if (pdata
->flcmncr_val
& SEL_16BIT
)
1176 nand
->options
|= NAND_BUSWIDTH_16
;
1178 nand
->options
|= NAND_BBM_FIRSTPAGE
| NAND_BBM_SECONDPAGE
;
1180 pm_runtime_enable(&pdev
->dev
);
1181 pm_runtime_resume(&pdev
->dev
);
1183 flctl_setup_dma(flctl
);
1185 nand
->legacy
.dummy_controller
.ops
= &flctl_nand_controller_ops
;
1186 ret
= nand_scan(nand
, 1);
1190 ret
= mtd_device_register(flctl_mtd
, pdata
->parts
, pdata
->nr_parts
);
1199 flctl_release_dma(flctl
);
1200 pm_runtime_disable(&pdev
->dev
);
1204 static void flctl_remove(struct platform_device
*pdev
)
1206 struct sh_flctl
*flctl
= platform_get_drvdata(pdev
);
1207 struct nand_chip
*chip
= &flctl
->chip
;
1210 flctl_release_dma(flctl
);
1211 ret
= mtd_device_unregister(nand_to_mtd(chip
));
1214 pm_runtime_disable(&pdev
->dev
);
1217 static struct platform_driver flctl_driver
= {
1218 .probe
= flctl_probe
,
1219 .remove
= flctl_remove
,
1222 .of_match_table
= of_flctl_match
,
1226 module_platform_driver(flctl_driver
);
1228 MODULE_LICENSE("GPL v2");
1229 MODULE_AUTHOR("Yoshihiro Shimoda");
1230 MODULE_DESCRIPTION("SuperH FLCTL driver");
1231 MODULE_ALIAS("platform:sh_flctl");