1 // SPDX-License-Identifier: GPL-2.0
3 * SuperH FLCTL nand controller
5 * Copyright (c) 2008 Renesas Solutions Corp.
6 * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
8 * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/completion.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/interrupt.h>
20 #include <linux/of_device.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/sh_dma.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
27 #include <linux/mtd/mtd.h>
28 #include <linux/mtd/rawnand.h>
29 #include <linux/mtd/partitions.h>
30 #include <linux/mtd/sh_flctl.h>
32 static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info
*mtd
, int section
,
33 struct mtd_oob_region
*oobregion
)
35 struct nand_chip
*chip
= mtd_to_nand(mtd
);
40 oobregion
->offset
= 0;
41 oobregion
->length
= chip
->ecc
.bytes
;
46 static int flctl_4secc_ooblayout_sp_free(struct mtd_info
*mtd
, int section
,
47 struct mtd_oob_region
*oobregion
)
52 oobregion
->offset
= 12;
53 oobregion
->length
= 4;
58 static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops
= {
59 .ecc
= flctl_4secc_ooblayout_sp_ecc
,
60 .free
= flctl_4secc_ooblayout_sp_free
,
63 static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info
*mtd
, int section
,
64 struct mtd_oob_region
*oobregion
)
66 struct nand_chip
*chip
= mtd_to_nand(mtd
);
68 if (section
>= chip
->ecc
.steps
)
71 oobregion
->offset
= (section
* 16) + 6;
72 oobregion
->length
= chip
->ecc
.bytes
;
77 static int flctl_4secc_ooblayout_lp_free(struct mtd_info
*mtd
, int section
,
78 struct mtd_oob_region
*oobregion
)
80 struct nand_chip
*chip
= mtd_to_nand(mtd
);
82 if (section
>= chip
->ecc
.steps
)
85 oobregion
->offset
= section
* 16;
86 oobregion
->length
= 6;
89 oobregion
->offset
+= 2;
90 oobregion
->length
-= 2;
96 static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops
= {
97 .ecc
= flctl_4secc_ooblayout_lp_ecc
,
98 .free
= flctl_4secc_ooblayout_lp_free
,
101 static uint8_t scan_ff_pattern
[] = { 0xff, 0xff };
103 static struct nand_bbt_descr flctl_4secc_smallpage
= {
106 .pattern
= scan_ff_pattern
,
109 static struct nand_bbt_descr flctl_4secc_largepage
= {
112 .pattern
= scan_ff_pattern
,
115 static void empty_fifo(struct sh_flctl
*flctl
)
117 writel(flctl
->flintdmacr_base
| AC1CLR
| AC0CLR
, FLINTDMACR(flctl
));
118 writel(flctl
->flintdmacr_base
, FLINTDMACR(flctl
));
121 static void start_translation(struct sh_flctl
*flctl
)
123 writeb(TRSTRT
, FLTRCR(flctl
));
126 static void timeout_error(struct sh_flctl
*flctl
, const char *str
)
128 dev_err(&flctl
->pdev
->dev
, "Timeout occurred in %s\n", str
);
131 static void wait_completion(struct sh_flctl
*flctl
)
133 uint32_t timeout
= LOOP_TIMEOUT_MAX
;
136 if (readb(FLTRCR(flctl
)) & TREND
) {
137 writeb(0x0, FLTRCR(flctl
));
143 timeout_error(flctl
, __func__
);
144 writeb(0x0, FLTRCR(flctl
));
147 static void flctl_dma_complete(void *param
)
149 struct sh_flctl
*flctl
= param
;
151 complete(&flctl
->dma_complete
);
154 static void flctl_release_dma(struct sh_flctl
*flctl
)
156 if (flctl
->chan_fifo0_rx
) {
157 dma_release_channel(flctl
->chan_fifo0_rx
);
158 flctl
->chan_fifo0_rx
= NULL
;
160 if (flctl
->chan_fifo0_tx
) {
161 dma_release_channel(flctl
->chan_fifo0_tx
);
162 flctl
->chan_fifo0_tx
= NULL
;
166 static void flctl_setup_dma(struct sh_flctl
*flctl
)
169 struct dma_slave_config cfg
;
170 struct platform_device
*pdev
= flctl
->pdev
;
171 struct sh_flctl_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
177 if (pdata
->slave_id_fifo0_tx
<= 0 || pdata
->slave_id_fifo0_rx
<= 0)
180 /* We can only either use DMA for both Tx and Rx or not use it at all */
182 dma_cap_set(DMA_SLAVE
, mask
);
184 flctl
->chan_fifo0_tx
= dma_request_channel(mask
, shdma_chan_filter
,
185 (void *)(uintptr_t)pdata
->slave_id_fifo0_tx
);
186 dev_dbg(&pdev
->dev
, "%s: TX: got channel %p\n", __func__
,
187 flctl
->chan_fifo0_tx
);
189 if (!flctl
->chan_fifo0_tx
)
192 memset(&cfg
, 0, sizeof(cfg
));
193 cfg
.direction
= DMA_MEM_TO_DEV
;
194 cfg
.dst_addr
= flctl
->fifo
;
196 ret
= dmaengine_slave_config(flctl
->chan_fifo0_tx
, &cfg
);
200 flctl
->chan_fifo0_rx
= dma_request_channel(mask
, shdma_chan_filter
,
201 (void *)(uintptr_t)pdata
->slave_id_fifo0_rx
);
202 dev_dbg(&pdev
->dev
, "%s: RX: got channel %p\n", __func__
,
203 flctl
->chan_fifo0_rx
);
205 if (!flctl
->chan_fifo0_rx
)
208 cfg
.direction
= DMA_DEV_TO_MEM
;
210 cfg
.src_addr
= flctl
->fifo
;
211 ret
= dmaengine_slave_config(flctl
->chan_fifo0_rx
, &cfg
);
215 init_completion(&flctl
->dma_complete
);
220 flctl_release_dma(flctl
);
223 static void set_addr(struct mtd_info
*mtd
, int column
, int page_addr
)
225 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
229 addr
= page_addr
; /* ERASE1 */
230 } else if (page_addr
!= -1) {
231 /* SEQIN, READ0, etc.. */
232 if (flctl
->chip
.options
& NAND_BUSWIDTH_16
)
234 if (flctl
->page_size
) {
235 addr
= column
& 0x0FFF;
236 addr
|= (page_addr
& 0xff) << 16;
237 addr
|= ((page_addr
>> 8) & 0xff) << 24;
239 if (flctl
->rw_ADRCNT
== ADRCNT2_E
) {
241 addr2
= (page_addr
>> 16) & 0xff;
242 writel(addr2
, FLADR2(flctl
));
246 addr
|= (page_addr
& 0xff) << 8;
247 addr
|= ((page_addr
>> 8) & 0xff) << 16;
248 addr
|= ((page_addr
>> 16) & 0xff) << 24;
251 writel(addr
, FLADR(flctl
));
254 static void wait_rfifo_ready(struct sh_flctl
*flctl
)
256 uint32_t timeout
= LOOP_TIMEOUT_MAX
;
261 val
= readl(FLDTCNTR(flctl
)) >> 16;
266 timeout_error(flctl
, __func__
);
269 static void wait_wfifo_ready(struct sh_flctl
*flctl
)
271 uint32_t len
, timeout
= LOOP_TIMEOUT_MAX
;
275 len
= (readl(FLDTCNTR(flctl
)) >> 16) & 0xFF;
280 timeout_error(flctl
, __func__
);
283 static enum flctl_ecc_res_t wait_recfifo_ready
284 (struct sh_flctl
*flctl
, int sector_number
)
286 uint32_t timeout
= LOOP_TIMEOUT_MAX
;
287 void __iomem
*ecc_reg
[4];
289 int state
= FL_SUCCESS
;
293 * First this loops checks in FLDTCNTR if we are ready to read out the
294 * oob data. This is the case if either all went fine without errors or
295 * if the bottom part of the loop corrected the errors or marked them as
296 * uncorrectable and the controller is given time to push the data into
300 /* check if all is ok and we can read out the OOB */
301 size
= readl(FLDTCNTR(flctl
)) >> 24;
302 if ((size
& 0xFF) == 4)
305 /* check if a correction code has been calculated */
306 if (!(readl(FL4ECCCR(flctl
)) & _4ECCEND
)) {
308 * either we wait for the fifo to be filled or a
309 * correction pattern is being generated
315 /* check for an uncorrectable error */
316 if (readl(FL4ECCCR(flctl
)) & _4ECCFA
) {
317 /* check if we face a non-empty page */
318 for (i
= 0; i
< 512; i
++) {
319 if (flctl
->done_buff
[i
] != 0xff) {
320 state
= FL_ERROR
; /* can't correct */
325 if (state
== FL_SUCCESS
)
326 dev_dbg(&flctl
->pdev
->dev
,
327 "reading empty sector %d, ecc error ignored\n",
330 writel(0, FL4ECCCR(flctl
));
334 /* start error correction */
335 ecc_reg
[0] = FL4ECCRESULT0(flctl
);
336 ecc_reg
[1] = FL4ECCRESULT1(flctl
);
337 ecc_reg
[2] = FL4ECCRESULT2(flctl
);
338 ecc_reg
[3] = FL4ECCRESULT3(flctl
);
340 for (i
= 0; i
< 3; i
++) {
344 data
= readl(ecc_reg
[i
]);
346 if (flctl
->page_size
)
347 index
= (512 * sector_number
) +
352 org
= flctl
->done_buff
[index
];
353 flctl
->done_buff
[index
] = org
^ (data
& 0xFF);
355 state
= FL_REPAIRABLE
;
356 writel(0, FL4ECCCR(flctl
));
359 timeout_error(flctl
, __func__
);
360 return FL_TIMEOUT
; /* timeout */
363 static void wait_wecfifo_ready(struct sh_flctl
*flctl
)
365 uint32_t timeout
= LOOP_TIMEOUT_MAX
;
370 len
= (readl(FLDTCNTR(flctl
)) >> 24) & 0xFF;
375 timeout_error(flctl
, __func__
);
378 static int flctl_dma_fifo0_transfer(struct sh_flctl
*flctl
, unsigned long *buf
,
379 int len
, enum dma_data_direction dir
)
381 struct dma_async_tx_descriptor
*desc
= NULL
;
382 struct dma_chan
*chan
;
383 enum dma_transfer_direction tr_dir
;
389 if (dir
== DMA_FROM_DEVICE
) {
390 chan
= flctl
->chan_fifo0_rx
;
391 tr_dir
= DMA_DEV_TO_MEM
;
393 chan
= flctl
->chan_fifo0_tx
;
394 tr_dir
= DMA_MEM_TO_DEV
;
397 dma_addr
= dma_map_single(chan
->device
->dev
, buf
, len
, dir
);
399 if (!dma_mapping_error(chan
->device
->dev
, dma_addr
))
400 desc
= dmaengine_prep_slave_single(chan
, dma_addr
, len
,
401 tr_dir
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
404 reg
= readl(FLINTDMACR(flctl
));
406 writel(reg
, FLINTDMACR(flctl
));
408 desc
->callback
= flctl_dma_complete
;
409 desc
->callback_param
= flctl
;
410 cookie
= dmaengine_submit(desc
);
411 if (dma_submit_error(cookie
)) {
412 ret
= dma_submit_error(cookie
);
413 dev_warn(&flctl
->pdev
->dev
,
414 "DMA submit failed, falling back to PIO\n");
418 dma_async_issue_pending(chan
);
420 /* DMA failed, fall back to PIO */
421 flctl_release_dma(flctl
);
422 dev_warn(&flctl
->pdev
->dev
,
423 "DMA failed, falling back to PIO\n");
429 wait_for_completion_timeout(&flctl
->dma_complete
,
430 msecs_to_jiffies(3000));
433 dmaengine_terminate_all(chan
);
434 dev_err(&flctl
->pdev
->dev
, "wait_for_completion_timeout\n");
438 reg
= readl(FLINTDMACR(flctl
));
440 writel(reg
, FLINTDMACR(flctl
));
442 dma_unmap_single(chan
->device
->dev
, dma_addr
, len
, dir
);
444 /* ret > 0 is success */
448 static void read_datareg(struct sh_flctl
*flctl
, int offset
)
451 unsigned long *buf
= (unsigned long *)&flctl
->done_buff
[offset
];
453 wait_completion(flctl
);
455 data
= readl(FLDATAR(flctl
));
456 *buf
= le32_to_cpu(data
);
459 static void read_fiforeg(struct sh_flctl
*flctl
, int rlen
, int offset
)
462 unsigned long *buf
= (unsigned long *)&flctl
->done_buff
[offset
];
464 len_4align
= (rlen
+ 3) / 4;
466 /* initiate DMA transfer */
467 if (flctl
->chan_fifo0_rx
&& rlen
>= 32 &&
468 flctl_dma_fifo0_transfer(flctl
, buf
, rlen
, DMA_FROM_DEVICE
) > 0)
469 goto convert
; /* DMA success */
471 /* do polling transfer */
472 for (i
= 0; i
< len_4align
; i
++) {
473 wait_rfifo_ready(flctl
);
474 buf
[i
] = readl(FLDTFIFO(flctl
));
478 for (i
= 0; i
< len_4align
; i
++)
479 buf
[i
] = be32_to_cpu(buf
[i
]);
482 static enum flctl_ecc_res_t read_ecfiforeg
483 (struct sh_flctl
*flctl
, uint8_t *buff
, int sector
)
486 enum flctl_ecc_res_t res
;
487 unsigned long *ecc_buf
= (unsigned long *)buff
;
489 res
= wait_recfifo_ready(flctl
, sector
);
491 if (res
!= FL_ERROR
) {
492 for (i
= 0; i
< 4; i
++) {
493 ecc_buf
[i
] = readl(FLECFIFO(flctl
));
494 ecc_buf
[i
] = be32_to_cpu(ecc_buf
[i
]);
501 static void write_fiforeg(struct sh_flctl
*flctl
, int rlen
,
505 unsigned long *buf
= (unsigned long *)&flctl
->done_buff
[offset
];
507 len_4align
= (rlen
+ 3) / 4;
508 for (i
= 0; i
< len_4align
; i
++) {
509 wait_wfifo_ready(flctl
);
510 writel(cpu_to_be32(buf
[i
]), FLDTFIFO(flctl
));
514 static void write_ec_fiforeg(struct sh_flctl
*flctl
, int rlen
,
518 unsigned long *buf
= (unsigned long *)&flctl
->done_buff
[offset
];
520 len_4align
= (rlen
+ 3) / 4;
522 for (i
= 0; i
< len_4align
; i
++)
523 buf
[i
] = cpu_to_be32(buf
[i
]);
525 /* initiate DMA transfer */
526 if (flctl
->chan_fifo0_tx
&& rlen
>= 32 &&
527 flctl_dma_fifo0_transfer(flctl
, buf
, rlen
, DMA_TO_DEVICE
) > 0)
528 return; /* DMA success */
530 /* do polling transfer */
531 for (i
= 0; i
< len_4align
; i
++) {
532 wait_wecfifo_ready(flctl
);
533 writel(buf
[i
], FLECFIFO(flctl
));
537 static void set_cmd_regs(struct mtd_info
*mtd
, uint32_t cmd
, uint32_t flcmcdr_val
)
539 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
540 uint32_t flcmncr_val
= flctl
->flcmncr_base
& ~SEL_16BIT
;
541 uint32_t flcmdcr_val
, addr_len_bytes
= 0;
543 /* Set SNAND bit if page size is 2048byte */
544 if (flctl
->page_size
)
545 flcmncr_val
|= SNAND_E
;
547 flcmncr_val
&= ~SNAND_E
;
549 /* default FLCMDCR val */
550 flcmdcr_val
= DOCMD1_E
| DOADR_E
;
552 /* Set for FLCMDCR */
554 case NAND_CMD_ERASE1
:
555 addr_len_bytes
= flctl
->erase_ADRCNT
;
556 flcmdcr_val
|= DOCMD2_E
;
559 case NAND_CMD_READOOB
:
560 case NAND_CMD_RNDOUT
:
561 addr_len_bytes
= flctl
->rw_ADRCNT
;
562 flcmdcr_val
|= CDSRC_E
;
563 if (flctl
->chip
.options
& NAND_BUSWIDTH_16
)
564 flcmncr_val
|= SEL_16BIT
;
567 /* This case is that cmd is READ0 or READ1 or READ00 */
568 flcmdcr_val
&= ~DOADR_E
; /* ONLY execute 1st cmd */
570 case NAND_CMD_PAGEPROG
:
571 addr_len_bytes
= flctl
->rw_ADRCNT
;
572 flcmdcr_val
|= DOCMD2_E
| CDSRC_E
| SELRW
;
573 if (flctl
->chip
.options
& NAND_BUSWIDTH_16
)
574 flcmncr_val
|= SEL_16BIT
;
576 case NAND_CMD_READID
:
577 flcmncr_val
&= ~SNAND_E
;
578 flcmdcr_val
|= CDSRC_E
;
579 addr_len_bytes
= ADRCNT_1
;
581 case NAND_CMD_STATUS
:
583 flcmncr_val
&= ~SNAND_E
;
584 flcmdcr_val
&= ~(DOADR_E
| DOSR_E
);
590 /* Set address bytes parameter */
591 flcmdcr_val
|= addr_len_bytes
;
593 /* Now actually write */
594 writel(flcmncr_val
, FLCMNCR(flctl
));
595 writel(flcmdcr_val
, FLCMDCR(flctl
));
596 writel(flcmcdr_val
, FLCMCDR(flctl
));
599 static int flctl_read_page_hwecc(struct nand_chip
*chip
, uint8_t *buf
,
600 int oob_required
, int page
)
602 struct mtd_info
*mtd
= nand_to_mtd(chip
);
604 nand_read_page_op(chip
, page
, 0, buf
, mtd
->writesize
);
606 chip
->legacy
.read_buf(chip
, chip
->oob_poi
, mtd
->oobsize
);
610 static int flctl_write_page_hwecc(struct nand_chip
*chip
, const uint8_t *buf
,
611 int oob_required
, int page
)
613 struct mtd_info
*mtd
= nand_to_mtd(chip
);
615 nand_prog_page_begin_op(chip
, page
, 0, buf
, mtd
->writesize
);
616 chip
->legacy
.write_buf(chip
, chip
->oob_poi
, mtd
->oobsize
);
617 return nand_prog_page_end_op(chip
);
620 static void execmd_read_page_sector(struct mtd_info
*mtd
, int page_addr
)
622 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
623 int sector
, page_sectors
;
624 enum flctl_ecc_res_t ecc_result
;
626 page_sectors
= flctl
->page_size
? 4 : 1;
628 set_cmd_regs(mtd
, NAND_CMD_READ0
,
629 (NAND_CMD_READSTART
<< 8) | NAND_CMD_READ0
);
631 writel(readl(FLCMNCR(flctl
)) | ACM_SACCES_MODE
| _4ECCCORRECT
,
633 writel(readl(FLCMDCR(flctl
)) | page_sectors
, FLCMDCR(flctl
));
634 writel(page_addr
<< 2, FLADR(flctl
));
637 start_translation(flctl
);
639 for (sector
= 0; sector
< page_sectors
; sector
++) {
640 read_fiforeg(flctl
, 512, 512 * sector
);
642 ecc_result
= read_ecfiforeg(flctl
,
643 &flctl
->done_buff
[mtd
->writesize
+ 16 * sector
],
646 switch (ecc_result
) {
648 dev_info(&flctl
->pdev
->dev
,
649 "applied ecc on page 0x%x", page_addr
);
650 mtd
->ecc_stats
.corrected
++;
653 dev_warn(&flctl
->pdev
->dev
,
654 "page 0x%x contains corrupted data\n",
656 mtd
->ecc_stats
.failed
++;
663 wait_completion(flctl
);
665 writel(readl(FLCMNCR(flctl
)) & ~(ACM_SACCES_MODE
| _4ECCCORRECT
),
669 static void execmd_read_oob(struct mtd_info
*mtd
, int page_addr
)
671 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
672 int page_sectors
= flctl
->page_size
? 4 : 1;
675 set_cmd_regs(mtd
, NAND_CMD_READ0
,
676 (NAND_CMD_READSTART
<< 8) | NAND_CMD_READ0
);
680 for (i
= 0; i
< page_sectors
; i
++) {
681 set_addr(mtd
, (512 + 16) * i
+ 512 , page_addr
);
682 writel(16, FLDTCNTR(flctl
));
684 start_translation(flctl
);
685 read_fiforeg(flctl
, 16, 16 * i
);
686 wait_completion(flctl
);
690 static void execmd_write_page_sector(struct mtd_info
*mtd
)
692 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
693 int page_addr
= flctl
->seqin_page_addr
;
694 int sector
, page_sectors
;
696 page_sectors
= flctl
->page_size
? 4 : 1;
698 set_cmd_regs(mtd
, NAND_CMD_PAGEPROG
,
699 (NAND_CMD_PAGEPROG
<< 8) | NAND_CMD_SEQIN
);
702 writel(readl(FLCMNCR(flctl
)) | ACM_SACCES_MODE
, FLCMNCR(flctl
));
703 writel(readl(FLCMDCR(flctl
)) | page_sectors
, FLCMDCR(flctl
));
704 writel(page_addr
<< 2, FLADR(flctl
));
705 start_translation(flctl
);
707 for (sector
= 0; sector
< page_sectors
; sector
++) {
708 write_fiforeg(flctl
, 512, 512 * sector
);
709 write_ec_fiforeg(flctl
, 16, mtd
->writesize
+ 16 * sector
);
712 wait_completion(flctl
);
713 writel(readl(FLCMNCR(flctl
)) & ~ACM_SACCES_MODE
, FLCMNCR(flctl
));
716 static void execmd_write_oob(struct mtd_info
*mtd
)
718 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
719 int page_addr
= flctl
->seqin_page_addr
;
720 int sector
, page_sectors
;
722 page_sectors
= flctl
->page_size
? 4 : 1;
724 set_cmd_regs(mtd
, NAND_CMD_PAGEPROG
,
725 (NAND_CMD_PAGEPROG
<< 8) | NAND_CMD_SEQIN
);
727 for (sector
= 0; sector
< page_sectors
; sector
++) {
729 set_addr(mtd
, sector
* 528 + 512, page_addr
);
730 writel(16, FLDTCNTR(flctl
)); /* set read size */
732 start_translation(flctl
);
733 write_fiforeg(flctl
, 16, 16 * sector
);
734 wait_completion(flctl
);
738 static void flctl_cmdfunc(struct nand_chip
*chip
, unsigned int command
,
739 int column
, int page_addr
)
741 struct mtd_info
*mtd
= nand_to_mtd(chip
);
742 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
743 uint32_t read_cmd
= 0;
745 pm_runtime_get_sync(&flctl
->pdev
->dev
);
747 flctl
->read_bytes
= 0;
748 if (command
!= NAND_CMD_PAGEPROG
)
755 /* read page with hwecc */
756 execmd_read_page_sector(mtd
, page_addr
);
759 if (flctl
->page_size
)
760 set_cmd_regs(mtd
, command
, (NAND_CMD_READSTART
<< 8)
763 set_cmd_regs(mtd
, command
, command
);
765 set_addr(mtd
, 0, page_addr
);
767 flctl
->read_bytes
= mtd
->writesize
+ mtd
->oobsize
;
768 if (flctl
->chip
.options
& NAND_BUSWIDTH_16
)
770 flctl
->index
+= column
;
771 goto read_normal_exit
;
773 case NAND_CMD_READOOB
:
775 /* read page with hwecc */
776 execmd_read_oob(mtd
, page_addr
);
780 if (flctl
->page_size
) {
781 set_cmd_regs(mtd
, command
, (NAND_CMD_READSTART
<< 8)
783 set_addr(mtd
, mtd
->writesize
, page_addr
);
785 set_cmd_regs(mtd
, command
, command
);
786 set_addr(mtd
, 0, page_addr
);
788 flctl
->read_bytes
= mtd
->oobsize
;
789 goto read_normal_exit
;
791 case NAND_CMD_RNDOUT
:
795 if (flctl
->page_size
)
796 set_cmd_regs(mtd
, command
, (NAND_CMD_RNDOUTSTART
<< 8)
799 set_cmd_regs(mtd
, command
, command
);
801 set_addr(mtd
, column
, 0);
803 flctl
->read_bytes
= mtd
->writesize
+ mtd
->oobsize
- column
;
804 goto read_normal_exit
;
806 case NAND_CMD_READID
:
807 set_cmd_regs(mtd
, command
, command
);
809 /* READID is always performed using an 8-bit bus */
810 if (flctl
->chip
.options
& NAND_BUSWIDTH_16
)
812 set_addr(mtd
, column
, 0);
814 flctl
->read_bytes
= 8;
815 writel(flctl
->read_bytes
, FLDTCNTR(flctl
)); /* set read size */
817 start_translation(flctl
);
818 read_fiforeg(flctl
, flctl
->read_bytes
, 0);
819 wait_completion(flctl
);
822 case NAND_CMD_ERASE1
:
823 flctl
->erase1_page_addr
= page_addr
;
826 case NAND_CMD_ERASE2
:
827 set_cmd_regs(mtd
, NAND_CMD_ERASE1
,
828 (command
<< 8) | NAND_CMD_ERASE1
);
829 set_addr(mtd
, -1, flctl
->erase1_page_addr
);
830 start_translation(flctl
);
831 wait_completion(flctl
);
835 if (!flctl
->page_size
) {
836 /* output read command */
837 if (column
>= mtd
->writesize
) {
838 column
-= mtd
->writesize
;
839 read_cmd
= NAND_CMD_READOOB
;
840 } else if (column
< 256) {
841 read_cmd
= NAND_CMD_READ0
;
844 read_cmd
= NAND_CMD_READ1
;
847 flctl
->seqin_column
= column
;
848 flctl
->seqin_page_addr
= page_addr
;
849 flctl
->seqin_read_cmd
= read_cmd
;
852 case NAND_CMD_PAGEPROG
:
854 if (!flctl
->page_size
) {
855 set_cmd_regs(mtd
, NAND_CMD_SEQIN
,
856 flctl
->seqin_read_cmd
);
857 set_addr(mtd
, -1, -1);
858 writel(0, FLDTCNTR(flctl
)); /* set 0 size */
859 start_translation(flctl
);
860 wait_completion(flctl
);
863 /* write page with hwecc */
864 if (flctl
->seqin_column
== mtd
->writesize
)
865 execmd_write_oob(mtd
);
866 else if (!flctl
->seqin_column
)
867 execmd_write_page_sector(mtd
);
869 pr_err("Invalid address !?\n");
872 set_cmd_regs(mtd
, command
, (command
<< 8) | NAND_CMD_SEQIN
);
873 set_addr(mtd
, flctl
->seqin_column
, flctl
->seqin_page_addr
);
874 writel(flctl
->index
, FLDTCNTR(flctl
)); /* set write size */
875 start_translation(flctl
);
876 write_fiforeg(flctl
, flctl
->index
, 0);
877 wait_completion(flctl
);
880 case NAND_CMD_STATUS
:
881 set_cmd_regs(mtd
, command
, command
);
882 set_addr(mtd
, -1, -1);
884 flctl
->read_bytes
= 1;
885 writel(flctl
->read_bytes
, FLDTCNTR(flctl
)); /* set read size */
886 start_translation(flctl
);
887 read_datareg(flctl
, 0); /* read and end */
891 set_cmd_regs(mtd
, command
, command
);
892 set_addr(mtd
, -1, -1);
894 writel(0, FLDTCNTR(flctl
)); /* set 0 size */
895 start_translation(flctl
);
896 wait_completion(flctl
);
905 writel(flctl
->read_bytes
, FLDTCNTR(flctl
)); /* set read size */
907 start_translation(flctl
);
908 read_fiforeg(flctl
, flctl
->read_bytes
, 0);
909 wait_completion(flctl
);
911 pm_runtime_put_sync(&flctl
->pdev
->dev
);
915 static void flctl_select_chip(struct nand_chip
*chip
, int chipnr
)
917 struct sh_flctl
*flctl
= mtd_to_flctl(nand_to_mtd(chip
));
922 flctl
->flcmncr_base
&= ~CE0_ENABLE
;
924 pm_runtime_get_sync(&flctl
->pdev
->dev
);
925 writel(flctl
->flcmncr_base
, FLCMNCR(flctl
));
927 if (flctl
->qos_request
) {
928 dev_pm_qos_remove_request(&flctl
->pm_qos
);
929 flctl
->qos_request
= 0;
932 pm_runtime_put_sync(&flctl
->pdev
->dev
);
935 flctl
->flcmncr_base
|= CE0_ENABLE
;
937 if (!flctl
->qos_request
) {
938 ret
= dev_pm_qos_add_request(&flctl
->pdev
->dev
,
940 DEV_PM_QOS_RESUME_LATENCY
,
943 dev_err(&flctl
->pdev
->dev
,
944 "PM QoS request failed: %d\n", ret
);
945 flctl
->qos_request
= 1;
949 pm_runtime_get_sync(&flctl
->pdev
->dev
);
950 writel(HOLDEN
, FLHOLDCR(flctl
));
951 pm_runtime_put_sync(&flctl
->pdev
->dev
);
959 static void flctl_write_buf(struct nand_chip
*chip
, const uint8_t *buf
, int len
)
961 struct sh_flctl
*flctl
= mtd_to_flctl(nand_to_mtd(chip
));
963 memcpy(&flctl
->done_buff
[flctl
->index
], buf
, len
);
967 static uint8_t flctl_read_byte(struct nand_chip
*chip
)
969 struct sh_flctl
*flctl
= mtd_to_flctl(nand_to_mtd(chip
));
972 data
= flctl
->done_buff
[flctl
->index
];
977 static void flctl_read_buf(struct nand_chip
*chip
, uint8_t *buf
, int len
)
979 struct sh_flctl
*flctl
= mtd_to_flctl(nand_to_mtd(chip
));
981 memcpy(buf
, &flctl
->done_buff
[flctl
->index
], len
);
985 static int flctl_chip_attach_chip(struct nand_chip
*chip
)
987 u64 targetsize
= nanddev_target_size(&chip
->base
);
988 struct mtd_info
*mtd
= nand_to_mtd(chip
);
989 struct sh_flctl
*flctl
= mtd_to_flctl(mtd
);
992 * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
993 * Add the SEL_16BIT flag in flctl->flcmncr_base.
995 if (chip
->options
& NAND_BUSWIDTH_16
)
996 flctl
->flcmncr_base
|= SEL_16BIT
;
998 if (mtd
->writesize
== 512) {
999 flctl
->page_size
= 0;
1000 if (targetsize
> (32 << 20)) {
1002 flctl
->rw_ADRCNT
= ADRCNT_4
;
1003 flctl
->erase_ADRCNT
= ADRCNT_3
;
1004 } else if (targetsize
> (2 << 16)) {
1005 /* big than 128KB */
1006 flctl
->rw_ADRCNT
= ADRCNT_3
;
1007 flctl
->erase_ADRCNT
= ADRCNT_2
;
1009 flctl
->rw_ADRCNT
= ADRCNT_2
;
1010 flctl
->erase_ADRCNT
= ADRCNT_1
;
1013 flctl
->page_size
= 1;
1014 if (targetsize
> (128 << 20)) {
1015 /* big than 128MB */
1016 flctl
->rw_ADRCNT
= ADRCNT2_E
;
1017 flctl
->erase_ADRCNT
= ADRCNT_3
;
1018 } else if (targetsize
> (8 << 16)) {
1019 /* big than 512KB */
1020 flctl
->rw_ADRCNT
= ADRCNT_4
;
1021 flctl
->erase_ADRCNT
= ADRCNT_2
;
1023 flctl
->rw_ADRCNT
= ADRCNT_3
;
1024 flctl
->erase_ADRCNT
= ADRCNT_1
;
1029 if (mtd
->writesize
== 512) {
1030 mtd_set_ooblayout(mtd
, &flctl_4secc_oob_smallpage_ops
);
1031 chip
->badblock_pattern
= &flctl_4secc_smallpage
;
1033 mtd_set_ooblayout(mtd
, &flctl_4secc_oob_largepage_ops
);
1034 chip
->badblock_pattern
= &flctl_4secc_largepage
;
1037 chip
->ecc
.size
= 512;
1038 chip
->ecc
.bytes
= 10;
1039 chip
->ecc
.strength
= 4;
1040 chip
->ecc
.read_page
= flctl_read_page_hwecc
;
1041 chip
->ecc
.write_page
= flctl_write_page_hwecc
;
1042 chip
->ecc
.engine_type
= NAND_ECC_ENGINE_TYPE_ON_HOST
;
1044 /* 4 symbols ECC enabled */
1045 flctl
->flcmncr_base
|= _4ECCEN
;
1047 chip
->ecc
.engine_type
= NAND_ECC_ENGINE_TYPE_SOFT
;
1048 chip
->ecc
.algo
= NAND_ECC_ALGO_HAMMING
;
1054 static const struct nand_controller_ops flctl_nand_controller_ops
= {
1055 .attach_chip
= flctl_chip_attach_chip
,
1058 static irqreturn_t
flctl_handle_flste(int irq
, void *dev_id
)
1060 struct sh_flctl
*flctl
= dev_id
;
1062 dev_err(&flctl
->pdev
->dev
, "flste irq: %x\n", readl(FLINTDMACR(flctl
)));
1063 writel(flctl
->flintdmacr_base
, FLINTDMACR(flctl
));
1068 struct flctl_soc_config
{
1069 unsigned long flcmncr_val
;
1070 unsigned has_hwecc
:1;
1071 unsigned use_holden
:1;
1074 static struct flctl_soc_config flctl_sh7372_config
= {
1075 .flcmncr_val
= CLK_16B_12L_4H
| TYPESEL_SET
| SHBUSSEL
,
1080 static const struct of_device_id of_flctl_match
[] = {
1081 { .compatible
= "renesas,shmobile-flctl-sh7372",
1082 .data
= &flctl_sh7372_config
},
1085 MODULE_DEVICE_TABLE(of
, of_flctl_match
);
1087 static struct sh_flctl_platform_data
*flctl_parse_dt(struct device
*dev
)
1089 const struct flctl_soc_config
*config
;
1090 struct sh_flctl_platform_data
*pdata
;
1092 config
= of_device_get_match_data(dev
);
1094 dev_err(dev
, "%s: no OF configuration attached\n", __func__
);
1098 pdata
= devm_kzalloc(dev
, sizeof(struct sh_flctl_platform_data
),
1103 /* set SoC specific options */
1104 pdata
->flcmncr_val
= config
->flcmncr_val
;
1105 pdata
->has_hwecc
= config
->has_hwecc
;
1106 pdata
->use_holden
= config
->use_holden
;
1111 static int flctl_probe(struct platform_device
*pdev
)
1113 struct resource
*res
;
1114 struct sh_flctl
*flctl
;
1115 struct mtd_info
*flctl_mtd
;
1116 struct nand_chip
*nand
;
1117 struct sh_flctl_platform_data
*pdata
;
1121 flctl
= devm_kzalloc(&pdev
->dev
, sizeof(struct sh_flctl
), GFP_KERNEL
);
1125 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1126 flctl
->reg
= devm_ioremap_resource(&pdev
->dev
, res
);
1127 if (IS_ERR(flctl
->reg
))
1128 return PTR_ERR(flctl
->reg
);
1129 flctl
->fifo
= res
->start
+ 0x24; /* FLDTFIFO */
1131 irq
= platform_get_irq(pdev
, 0);
1135 ret
= devm_request_irq(&pdev
->dev
, irq
, flctl_handle_flste
, IRQF_SHARED
,
1138 dev_err(&pdev
->dev
, "request interrupt failed.\n");
1142 if (pdev
->dev
.of_node
)
1143 pdata
= flctl_parse_dt(&pdev
->dev
);
1145 pdata
= dev_get_platdata(&pdev
->dev
);
1148 dev_err(&pdev
->dev
, "no setup data defined\n");
1152 platform_set_drvdata(pdev
, flctl
);
1153 nand
= &flctl
->chip
;
1154 flctl_mtd
= nand_to_mtd(nand
);
1155 nand_set_flash_node(nand
, pdev
->dev
.of_node
);
1156 flctl_mtd
->dev
.parent
= &pdev
->dev
;
1158 flctl
->hwecc
= pdata
->has_hwecc
;
1159 flctl
->holden
= pdata
->use_holden
;
1160 flctl
->flcmncr_base
= pdata
->flcmncr_val
;
1161 flctl
->flintdmacr_base
= flctl
->hwecc
? (STERINTE
| ECERB
) : STERINTE
;
1163 /* Set address of hardware control function */
1164 /* 20 us command delay time */
1165 nand
->legacy
.chip_delay
= 20;
1167 nand
->legacy
.read_byte
= flctl_read_byte
;
1168 nand
->legacy
.write_buf
= flctl_write_buf
;
1169 nand
->legacy
.read_buf
= flctl_read_buf
;
1170 nand
->legacy
.select_chip
= flctl_select_chip
;
1171 nand
->legacy
.cmdfunc
= flctl_cmdfunc
;
1172 nand
->legacy
.set_features
= nand_get_set_features_notsupp
;
1173 nand
->legacy
.get_features
= nand_get_set_features_notsupp
;
1175 if (pdata
->flcmncr_val
& SEL_16BIT
)
1176 nand
->options
|= NAND_BUSWIDTH_16
;
1178 nand
->options
|= NAND_BBM_FIRSTPAGE
| NAND_BBM_SECONDPAGE
;
1180 pm_runtime_enable(&pdev
->dev
);
1181 pm_runtime_resume(&pdev
->dev
);
1183 flctl_setup_dma(flctl
);
1185 nand
->legacy
.dummy_controller
.ops
= &flctl_nand_controller_ops
;
1186 ret
= nand_scan(nand
, 1);
1190 ret
= mtd_device_register(flctl_mtd
, pdata
->parts
, pdata
->nr_parts
);
1199 flctl_release_dma(flctl
);
1200 pm_runtime_disable(&pdev
->dev
);
1204 static int flctl_remove(struct platform_device
*pdev
)
1206 struct sh_flctl
*flctl
= platform_get_drvdata(pdev
);
1207 struct nand_chip
*chip
= &flctl
->chip
;
1210 flctl_release_dma(flctl
);
1211 ret
= mtd_device_unregister(nand_to_mtd(chip
));
1214 pm_runtime_disable(&pdev
->dev
);
1219 static struct platform_driver flctl_driver
= {
1220 .remove
= flctl_remove
,
1223 .of_match_table
= of_match_ptr(of_flctl_match
),
1227 module_platform_driver_probe(flctl_driver
, flctl_probe
);
1229 MODULE_LICENSE("GPL v2");
1230 MODULE_AUTHOR("Yoshihiro Shimoda");
1231 MODULE_DESCRIPTION("SuperH FLCTL driver");
1232 MODULE_ALIAS("platform:sh_flctl");