1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * MTK NAND Flash controller driver.
4 * Copyright (C) 2016 MediaTek Inc.
5 * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
6 * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
9 #include <linux/platform_device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/delay.h>
13 #include <linux/clk.h>
14 #include <linux/mtd/rawnand.h>
15 #include <linux/mtd/mtd.h>
16 #include <linux/module.h>
17 #include <linux/iopoll.h>
19 #include <linux/of_device.h>
22 /* NAND controller register definition */
23 #define NFI_CNFG (0x00)
24 #define CNFG_AHB BIT(0)
25 #define CNFG_READ_EN BIT(1)
26 #define CNFG_DMA_BURST_EN BIT(2)
27 #define CNFG_BYTE_RW BIT(6)
28 #define CNFG_HW_ECC_EN BIT(8)
29 #define CNFG_AUTO_FMT_EN BIT(9)
30 #define CNFG_OP_CUST (6 << 12)
31 #define NFI_PAGEFMT (0x04)
32 #define PAGEFMT_FDM_ECC_SHIFT (12)
33 #define PAGEFMT_FDM_SHIFT (8)
34 #define PAGEFMT_SEC_SEL_512 BIT(2)
35 #define PAGEFMT_512_2K (0)
36 #define PAGEFMT_2K_4K (1)
37 #define PAGEFMT_4K_8K (2)
38 #define PAGEFMT_8K_16K (3)
40 #define NFI_CON (0x08)
41 #define CON_FIFO_FLUSH BIT(0)
42 #define CON_NFI_RST BIT(1)
43 #define CON_BRD BIT(8) /* burst read */
44 #define CON_BWR BIT(9) /* burst write */
45 #define CON_SEC_SHIFT (12)
46 /* Timming control register */
47 #define NFI_ACCCON (0x0C)
48 #define NFI_INTR_EN (0x10)
49 #define INTR_AHB_DONE_EN BIT(6)
50 #define NFI_INTR_STA (0x14)
51 #define NFI_CMD (0x20)
52 #define NFI_ADDRNOB (0x30)
53 #define NFI_COLADDR (0x34)
54 #define NFI_ROWADDR (0x38)
55 #define NFI_STRDATA (0x40)
58 #define NFI_CNRNB (0x44)
59 #define NFI_DATAW (0x50)
60 #define NFI_DATAR (0x54)
61 #define NFI_PIO_DIRDY (0x58)
62 #define PIO_DI_RDY (0x01)
63 #define NFI_STA (0x60)
64 #define STA_CMD BIT(0)
65 #define STA_ADDR BIT(1)
66 #define STA_BUSY BIT(8)
67 #define STA_EMP_PAGE BIT(12)
68 #define NFI_FSM_CUSTDATA (0xe << 16)
69 #define NFI_FSM_MASK (0xf << 16)
70 #define NFI_ADDRCNTR (0x70)
71 #define CNTR_MASK GENMASK(16, 12)
72 #define ADDRCNTR_SEC_SHIFT (12)
73 #define ADDRCNTR_SEC(val) \
74 (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
75 #define NFI_STRADDR (0x80)
76 #define NFI_BYTELEN (0x84)
77 #define NFI_CSEL (0x90)
78 #define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2)
79 #define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
80 #define NFI_FDM_MAX_SIZE (8)
81 #define NFI_FDM_MIN_SIZE (1)
82 #define NFI_DEBUG_CON1 (0x220)
83 #define STROBE_MASK GENMASK(4, 3)
84 #define STROBE_SHIFT (3)
85 #define MAX_STROBE_DLY (3)
86 #define NFI_MASTER_STA (0x224)
87 #define MASTER_STA_MASK (0x0FFF)
88 #define NFI_EMPTY_THRESH (0x23C)
90 #define MTK_NAME "mtk-nand"
91 #define KB(x) ((x) * 1024UL)
92 #define MB(x) (KB(x) * 1024UL)
94 #define MTK_TIMEOUT (500000)
95 #define MTK_RESET_TIMEOUT (1000000)
96 #define MTK_NAND_MAX_NSELS (2)
97 #define MTK_NFC_MIN_SPARE (16)
98 #define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
99 ((tpoecs) << 28 | (tprecs) << 22 | (tc2r) << 16 | \
100 (tw2r) << 12 | (twh) << 8 | (twst) << 4 | (trlt))
102 struct mtk_nfc_caps
{
103 const u8
*spare_size
;
105 u8 pageformat_spare_shift
;
111 struct mtk_nfc_bad_mark_ctl
{
112 void (*bm_swap
)(struct mtd_info
*, u8
*buf
, int raw
);
118 * FDM: region used to store free OOB data
125 struct mtk_nfc_nand_chip
{
126 struct list_head node
;
127 struct nand_chip nand
;
129 struct mtk_nfc_bad_mark_ctl bad_mark
;
130 struct mtk_nfc_fdm fdm
;
131 u32 spare_per_sector
;
135 /* nothing after this field */
144 struct nand_controller controller
;
145 struct mtk_ecc_config ecc_cfg
;
146 struct mtk_nfc_clk clk
;
150 const struct mtk_nfc_caps
*caps
;
153 struct completion done
;
154 struct list_head chips
;
158 unsigned long assigned_cs
;
162 * supported spare size of each IP.
163 * order should be the same with the spare size bitfiled defination of
164 * register NFI_PAGEFMT.
166 static const u8 spare_size_mt2701
[] = {
167 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 63, 64
170 static const u8 spare_size_mt2712
[] = {
171 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64, 67,
175 static const u8 spare_size_mt7622
[] = {
179 static inline struct mtk_nfc_nand_chip
*to_mtk_nand(struct nand_chip
*nand
)
181 return container_of(nand
, struct mtk_nfc_nand_chip
, nand
);
184 static inline u8
*data_ptr(struct nand_chip
*chip
, const u8
*p
, int i
)
186 return (u8
*)p
+ i
* chip
->ecc
.size
;
189 static inline u8
*oob_ptr(struct nand_chip
*chip
, int i
)
191 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
194 /* map the sector's FDM data to free oob:
195 * the beginning of the oob area stores the FDM data of bad mark sectors
198 if (i
< mtk_nand
->bad_mark
.sec
)
199 poi
= chip
->oob_poi
+ (i
+ 1) * mtk_nand
->fdm
.reg_size
;
200 else if (i
== mtk_nand
->bad_mark
.sec
)
203 poi
= chip
->oob_poi
+ i
* mtk_nand
->fdm
.reg_size
;
208 static inline int mtk_data_len(struct nand_chip
*chip
)
210 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
212 return chip
->ecc
.size
+ mtk_nand
->spare_per_sector
;
215 static inline u8
*mtk_data_ptr(struct nand_chip
*chip
, int i
)
217 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
219 return nfc
->buffer
+ i
* mtk_data_len(chip
);
222 static inline u8
*mtk_oob_ptr(struct nand_chip
*chip
, int i
)
224 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
226 return nfc
->buffer
+ i
* mtk_data_len(chip
) + chip
->ecc
.size
;
229 static inline void nfi_writel(struct mtk_nfc
*nfc
, u32 val
, u32 reg
)
231 writel(val
, nfc
->regs
+ reg
);
234 static inline void nfi_writew(struct mtk_nfc
*nfc
, u16 val
, u32 reg
)
236 writew(val
, nfc
->regs
+ reg
);
239 static inline void nfi_writeb(struct mtk_nfc
*nfc
, u8 val
, u32 reg
)
241 writeb(val
, nfc
->regs
+ reg
);
244 static inline u32
nfi_readl(struct mtk_nfc
*nfc
, u32 reg
)
246 return readl_relaxed(nfc
->regs
+ reg
);
249 static inline u16
nfi_readw(struct mtk_nfc
*nfc
, u32 reg
)
251 return readw_relaxed(nfc
->regs
+ reg
);
254 static inline u8
nfi_readb(struct mtk_nfc
*nfc
, u32 reg
)
256 return readb_relaxed(nfc
->regs
+ reg
);
259 static void mtk_nfc_hw_reset(struct mtk_nfc
*nfc
)
261 struct device
*dev
= nfc
->dev
;
265 /* reset all registers and force the NFI master to terminate */
266 nfi_writel(nfc
, CON_FIFO_FLUSH
| CON_NFI_RST
, NFI_CON
);
268 /* wait for the master to finish the last transaction */
269 ret
= readl_poll_timeout(nfc
->regs
+ NFI_MASTER_STA
, val
,
270 !(val
& MASTER_STA_MASK
), 50,
273 dev_warn(dev
, "master active in reset [0x%x] = 0x%x\n",
274 NFI_MASTER_STA
, val
);
276 /* ensure any status register affected by the NFI master is reset */
277 nfi_writel(nfc
, CON_FIFO_FLUSH
| CON_NFI_RST
, NFI_CON
);
278 nfi_writew(nfc
, STAR_DE
, NFI_STRDATA
);
281 static int mtk_nfc_send_command(struct mtk_nfc
*nfc
, u8 command
)
283 struct device
*dev
= nfc
->dev
;
287 nfi_writel(nfc
, command
, NFI_CMD
);
289 ret
= readl_poll_timeout_atomic(nfc
->regs
+ NFI_STA
, val
,
290 !(val
& STA_CMD
), 10, MTK_TIMEOUT
);
292 dev_warn(dev
, "nfi core timed out entering command mode\n");
299 static int mtk_nfc_send_address(struct mtk_nfc
*nfc
, int addr
)
301 struct device
*dev
= nfc
->dev
;
305 nfi_writel(nfc
, addr
, NFI_COLADDR
);
306 nfi_writel(nfc
, 0, NFI_ROWADDR
);
307 nfi_writew(nfc
, 1, NFI_ADDRNOB
);
309 ret
= readl_poll_timeout_atomic(nfc
->regs
+ NFI_STA
, val
,
310 !(val
& STA_ADDR
), 10, MTK_TIMEOUT
);
312 dev_warn(dev
, "nfi core timed out entering address mode\n");
319 static int mtk_nfc_hw_runtime_config(struct mtd_info
*mtd
)
321 struct nand_chip
*chip
= mtd_to_nand(mtd
);
322 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
323 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
329 spare
= mtk_nand
->spare_per_sector
;
331 switch (mtd
->writesize
) {
333 fmt
= PAGEFMT_512_2K
| PAGEFMT_SEC_SEL_512
;
336 if (chip
->ecc
.size
== 512)
337 fmt
= PAGEFMT_2K_4K
| PAGEFMT_SEC_SEL_512
;
339 fmt
= PAGEFMT_512_2K
;
342 if (chip
->ecc
.size
== 512)
343 fmt
= PAGEFMT_4K_8K
| PAGEFMT_SEC_SEL_512
;
348 if (chip
->ecc
.size
== 512)
349 fmt
= PAGEFMT_8K_16K
| PAGEFMT_SEC_SEL_512
;
354 fmt
= PAGEFMT_8K_16K
;
357 dev_err(nfc
->dev
, "invalid page len: %d\n", mtd
->writesize
);
362 * the hardware will double the value for this eccsize, so we need to
365 if (chip
->ecc
.size
== 1024)
368 for (i
= 0; i
< nfc
->caps
->num_spare_size
; i
++) {
369 if (nfc
->caps
->spare_size
[i
] == spare
)
373 if (i
== nfc
->caps
->num_spare_size
) {
374 dev_err(nfc
->dev
, "invalid spare size %d\n", spare
);
378 fmt
|= i
<< nfc
->caps
->pageformat_spare_shift
;
380 fmt
|= mtk_nand
->fdm
.reg_size
<< PAGEFMT_FDM_SHIFT
;
381 fmt
|= mtk_nand
->fdm
.ecc_size
<< PAGEFMT_FDM_ECC_SHIFT
;
382 nfi_writel(nfc
, fmt
, NFI_PAGEFMT
);
384 nfc
->ecc_cfg
.strength
= chip
->ecc
.strength
;
385 nfc
->ecc_cfg
.len
= chip
->ecc
.size
+ mtk_nand
->fdm
.ecc_size
;
390 static void mtk_nfc_select_chip(struct nand_chip
*nand
, int chip
)
392 struct mtk_nfc
*nfc
= nand_get_controller_data(nand
);
393 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(nand
);
398 mtk_nfc_hw_runtime_config(nand_to_mtd(nand
));
400 nfi_writel(nfc
, mtk_nand
->sels
[chip
], NFI_CSEL
);
403 static int mtk_nfc_dev_ready(struct nand_chip
*nand
)
405 struct mtk_nfc
*nfc
= nand_get_controller_data(nand
);
407 if (nfi_readl(nfc
, NFI_STA
) & STA_BUSY
)
413 static void mtk_nfc_cmd_ctrl(struct nand_chip
*chip
, int dat
,
416 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
418 if (ctrl
& NAND_ALE
) {
419 mtk_nfc_send_address(nfc
, dat
);
420 } else if (ctrl
& NAND_CLE
) {
421 mtk_nfc_hw_reset(nfc
);
423 nfi_writew(nfc
, CNFG_OP_CUST
, NFI_CNFG
);
424 mtk_nfc_send_command(nfc
, dat
);
428 static inline void mtk_nfc_wait_ioready(struct mtk_nfc
*nfc
)
433 rc
= readb_poll_timeout_atomic(nfc
->regs
+ NFI_PIO_DIRDY
, val
,
434 val
& PIO_DI_RDY
, 10, MTK_TIMEOUT
);
436 dev_err(nfc
->dev
, "data not ready\n");
439 static inline u8
mtk_nfc_read_byte(struct nand_chip
*chip
)
441 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
444 /* after each byte read, the NFI_STA reg is reset by the hardware */
445 reg
= nfi_readl(nfc
, NFI_STA
) & NFI_FSM_MASK
;
446 if (reg
!= NFI_FSM_CUSTDATA
) {
447 reg
= nfi_readw(nfc
, NFI_CNFG
);
448 reg
|= CNFG_BYTE_RW
| CNFG_READ_EN
;
449 nfi_writew(nfc
, reg
, NFI_CNFG
);
452 * set to max sector to allow the HW to continue reading over
455 reg
= (nfc
->caps
->max_sector
<< CON_SEC_SHIFT
) | CON_BRD
;
456 nfi_writel(nfc
, reg
, NFI_CON
);
458 /* trigger to fetch data */
459 nfi_writew(nfc
, STAR_EN
, NFI_STRDATA
);
462 mtk_nfc_wait_ioready(nfc
);
464 return nfi_readb(nfc
, NFI_DATAR
);
467 static void mtk_nfc_read_buf(struct nand_chip
*chip
, u8
*buf
, int len
)
471 for (i
= 0; i
< len
; i
++)
472 buf
[i
] = mtk_nfc_read_byte(chip
);
475 static void mtk_nfc_write_byte(struct nand_chip
*chip
, u8 byte
)
477 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
480 reg
= nfi_readl(nfc
, NFI_STA
) & NFI_FSM_MASK
;
482 if (reg
!= NFI_FSM_CUSTDATA
) {
483 reg
= nfi_readw(nfc
, NFI_CNFG
) | CNFG_BYTE_RW
;
484 nfi_writew(nfc
, reg
, NFI_CNFG
);
486 reg
= nfc
->caps
->max_sector
<< CON_SEC_SHIFT
| CON_BWR
;
487 nfi_writel(nfc
, reg
, NFI_CON
);
489 nfi_writew(nfc
, STAR_EN
, NFI_STRDATA
);
492 mtk_nfc_wait_ioready(nfc
);
493 nfi_writeb(nfc
, byte
, NFI_DATAW
);
496 static void mtk_nfc_write_buf(struct nand_chip
*chip
, const u8
*buf
, int len
)
500 for (i
= 0; i
< len
; i
++)
501 mtk_nfc_write_byte(chip
, buf
[i
]);
504 static int mtk_nfc_setup_data_interface(struct nand_chip
*chip
, int csline
,
505 const struct nand_data_interface
*conf
)
507 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
508 const struct nand_sdr_timings
*timings
;
509 u32 rate
, tpoecs
, tprecs
, tc2r
, tw2r
, twh
, twst
= 0, trlt
= 0;
512 timings
= nand_get_sdr_timings(conf
);
516 if (csline
== NAND_DATA_IFACE_CHECK_ONLY
)
519 rate
= clk_get_rate(nfc
->clk
.nfi_clk
);
520 /* There is a frequency divider in some IPs */
521 rate
/= nfc
->caps
->nfi_clk_div
;
523 /* turn clock rate into KHZ */
526 tpoecs
= max(timings
->tALH_min
, timings
->tCLH_min
) / 1000;
527 tpoecs
= DIV_ROUND_UP(tpoecs
* rate
, 1000000);
530 tprecs
= max(timings
->tCLS_min
, timings
->tALS_min
) / 1000;
531 tprecs
= DIV_ROUND_UP(tprecs
* rate
, 1000000);
534 /* sdr interface has no tCR which means CE# low to RE# low */
537 tw2r
= timings
->tWHR_min
/ 1000;
538 tw2r
= DIV_ROUND_UP(tw2r
* rate
, 1000000);
539 tw2r
= DIV_ROUND_UP(tw2r
- 1, 2);
542 twh
= max(timings
->tREH_min
, timings
->tWH_min
) / 1000;
543 twh
= DIV_ROUND_UP(twh
* rate
, 1000000) - 1;
546 /* Calculate real WE#/RE# hold time in nanosecond */
547 temp
= (twh
+ 1) * 1000000 / rate
;
548 /* nanosecond to picosecond */
552 * WE# low level time should be expaned to meet WE# pulse time
553 * and WE# cycle time at the same time.
555 if (temp
< timings
->tWC_min
)
556 twst
= timings
->tWC_min
- temp
;
557 twst
= max(timings
->tWP_min
, twst
) / 1000;
558 twst
= DIV_ROUND_UP(twst
* rate
, 1000000) - 1;
562 * RE# low level time should be expaned to meet RE# pulse time
563 * and RE# cycle time at the same time.
565 if (temp
< timings
->tRC_min
)
566 trlt
= timings
->tRC_min
- temp
;
567 trlt
= max(trlt
, timings
->tRP_min
) / 1000;
568 trlt
= DIV_ROUND_UP(trlt
* rate
, 1000000) - 1;
571 /* Calculate RE# pulse time in nanosecond. */
572 temp
= (trlt
+ 1) * 1000000 / rate
;
573 /* nanosecond to picosecond */
576 * If RE# access time is bigger than RE# pulse time,
577 * delay sampling data timing.
579 if (temp
< timings
->tREA_max
) {
580 tsel
= timings
->tREA_max
/ 1000;
581 tsel
= DIV_ROUND_UP(tsel
* rate
, 1000000);
583 if (tsel
> MAX_STROBE_DLY
) {
584 trlt
+= tsel
- MAX_STROBE_DLY
;
585 tsel
= MAX_STROBE_DLY
;
588 temp
= nfi_readl(nfc
, NFI_DEBUG_CON1
);
589 temp
&= ~STROBE_MASK
;
590 temp
|= tsel
<< STROBE_SHIFT
;
591 nfi_writel(nfc
, temp
, NFI_DEBUG_CON1
);
594 * ACCON: access timing control register
595 * -------------------------------------
596 * 31:28: tpoecs, minimum required time for CS post pulling down after
597 * accessing the device
598 * 27:22: tprecs, minimum required time for CS pre pulling down before
599 * accessing the device
600 * 21:16: tc2r, minimum required time from NCEB low to NREB low
601 * 15:12: tw2r, minimum required time from NWEB high to NREB low.
602 * 11:08: twh, write enable hold time
603 * 07:04: twst, write wait states
604 * 03:00: trlt, read wait states
606 trlt
= ACCTIMING(tpoecs
, tprecs
, tc2r
, tw2r
, twh
, twst
, trlt
);
607 nfi_writel(nfc
, trlt
, NFI_ACCCON
);
612 static int mtk_nfc_sector_encode(struct nand_chip
*chip
, u8
*data
)
614 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
615 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
616 int size
= chip
->ecc
.size
+ mtk_nand
->fdm
.reg_size
;
618 nfc
->ecc_cfg
.mode
= ECC_DMA_MODE
;
619 nfc
->ecc_cfg
.op
= ECC_ENCODE
;
621 return mtk_ecc_encode(nfc
->ecc
, &nfc
->ecc_cfg
, data
, size
);
624 static void mtk_nfc_no_bad_mark_swap(struct mtd_info
*a
, u8
*b
, int c
)
629 static void mtk_nfc_bad_mark_swap(struct mtd_info
*mtd
, u8
*buf
, int raw
)
631 struct nand_chip
*chip
= mtd_to_nand(mtd
);
632 struct mtk_nfc_nand_chip
*nand
= to_mtk_nand(chip
);
633 u32 bad_pos
= nand
->bad_mark
.pos
;
636 bad_pos
+= nand
->bad_mark
.sec
* mtk_data_len(chip
);
638 bad_pos
+= nand
->bad_mark
.sec
* chip
->ecc
.size
;
640 swap(chip
->oob_poi
[0], buf
[bad_pos
]);
643 static int mtk_nfc_format_subpage(struct mtd_info
*mtd
, u32 offset
,
644 u32 len
, const u8
*buf
)
646 struct nand_chip
*chip
= mtd_to_nand(mtd
);
647 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
648 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
649 struct mtk_nfc_fdm
*fdm
= &mtk_nand
->fdm
;
653 start
= offset
/ chip
->ecc
.size
;
654 end
= DIV_ROUND_UP(offset
+ len
, chip
->ecc
.size
);
656 memset(nfc
->buffer
, 0xff, mtd
->writesize
+ mtd
->oobsize
);
657 for (i
= 0; i
< chip
->ecc
.steps
; i
++) {
658 memcpy(mtk_data_ptr(chip
, i
), data_ptr(chip
, buf
, i
),
661 if (start
> i
|| i
>= end
)
664 if (i
== mtk_nand
->bad_mark
.sec
)
665 mtk_nand
->bad_mark
.bm_swap(mtd
, nfc
->buffer
, 1);
667 memcpy(mtk_oob_ptr(chip
, i
), oob_ptr(chip
, i
), fdm
->reg_size
);
669 /* program the CRC back to the OOB */
670 ret
= mtk_nfc_sector_encode(chip
, mtk_data_ptr(chip
, i
));
678 static void mtk_nfc_format_page(struct mtd_info
*mtd
, const u8
*buf
)
680 struct nand_chip
*chip
= mtd_to_nand(mtd
);
681 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
682 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
683 struct mtk_nfc_fdm
*fdm
= &mtk_nand
->fdm
;
686 memset(nfc
->buffer
, 0xff, mtd
->writesize
+ mtd
->oobsize
);
687 for (i
= 0; i
< chip
->ecc
.steps
; i
++) {
689 memcpy(mtk_data_ptr(chip
, i
), data_ptr(chip
, buf
, i
),
692 if (i
== mtk_nand
->bad_mark
.sec
)
693 mtk_nand
->bad_mark
.bm_swap(mtd
, nfc
->buffer
, 1);
695 memcpy(mtk_oob_ptr(chip
, i
), oob_ptr(chip
, i
), fdm
->reg_size
);
699 static inline void mtk_nfc_read_fdm(struct nand_chip
*chip
, u32 start
,
702 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
703 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
704 struct mtk_nfc_fdm
*fdm
= &mtk_nand
->fdm
;
709 for (i
= 0; i
< sectors
; i
++) {
710 oobptr
= oob_ptr(chip
, start
+ i
);
711 vall
= nfi_readl(nfc
, NFI_FDML(i
));
712 valm
= nfi_readl(nfc
, NFI_FDMM(i
));
714 for (j
= 0; j
< fdm
->reg_size
; j
++)
715 oobptr
[j
] = (j
>= 4 ? valm
: vall
) >> ((j
% 4) * 8);
719 static inline void mtk_nfc_write_fdm(struct nand_chip
*chip
)
721 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
722 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
723 struct mtk_nfc_fdm
*fdm
= &mtk_nand
->fdm
;
728 for (i
= 0; i
< chip
->ecc
.steps
; i
++) {
729 oobptr
= oob_ptr(chip
, i
);
732 for (j
= 0; j
< 8; j
++) {
734 vall
|= (j
< fdm
->reg_size
? oobptr
[j
] : 0xff)
737 valm
|= (j
< fdm
->reg_size
? oobptr
[j
] : 0xff)
740 nfi_writel(nfc
, vall
, NFI_FDML(i
));
741 nfi_writel(nfc
, valm
, NFI_FDMM(i
));
745 static int mtk_nfc_do_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
746 const u8
*buf
, int page
, int len
)
748 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
749 struct device
*dev
= nfc
->dev
;
754 addr
= dma_map_single(dev
, (void *)buf
, len
, DMA_TO_DEVICE
);
755 ret
= dma_mapping_error(nfc
->dev
, addr
);
757 dev_err(nfc
->dev
, "dma mapping error\n");
761 reg
= nfi_readw(nfc
, NFI_CNFG
) | CNFG_AHB
| CNFG_DMA_BURST_EN
;
762 nfi_writew(nfc
, reg
, NFI_CNFG
);
764 nfi_writel(nfc
, chip
->ecc
.steps
<< CON_SEC_SHIFT
, NFI_CON
);
765 nfi_writel(nfc
, lower_32_bits(addr
), NFI_STRADDR
);
766 nfi_writew(nfc
, INTR_AHB_DONE_EN
, NFI_INTR_EN
);
768 init_completion(&nfc
->done
);
770 reg
= nfi_readl(nfc
, NFI_CON
) | CON_BWR
;
771 nfi_writel(nfc
, reg
, NFI_CON
);
772 nfi_writew(nfc
, STAR_EN
, NFI_STRDATA
);
774 ret
= wait_for_completion_timeout(&nfc
->done
, msecs_to_jiffies(500));
776 dev_err(dev
, "program ahb done timeout\n");
777 nfi_writew(nfc
, 0, NFI_INTR_EN
);
782 ret
= readl_poll_timeout_atomic(nfc
->regs
+ NFI_ADDRCNTR
, reg
,
783 ADDRCNTR_SEC(reg
) >= chip
->ecc
.steps
,
786 dev_err(dev
, "hwecc write timeout\n");
790 dma_unmap_single(nfc
->dev
, addr
, len
, DMA_TO_DEVICE
);
791 nfi_writel(nfc
, 0, NFI_CON
);
796 static int mtk_nfc_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
797 const u8
*buf
, int page
, int raw
)
799 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
800 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
806 nand_prog_page_begin_op(chip
, page
, 0, NULL
, 0);
809 /* OOB => FDM: from register, ECC: from HW */
810 reg
= nfi_readw(nfc
, NFI_CNFG
) | CNFG_AUTO_FMT_EN
;
811 nfi_writew(nfc
, reg
| CNFG_HW_ECC_EN
, NFI_CNFG
);
813 nfc
->ecc_cfg
.op
= ECC_ENCODE
;
814 nfc
->ecc_cfg
.mode
= ECC_NFI_MODE
;
815 ret
= mtk_ecc_enable(nfc
->ecc
, &nfc
->ecc_cfg
);
817 /* clear NFI config */
818 reg
= nfi_readw(nfc
, NFI_CNFG
);
819 reg
&= ~(CNFG_AUTO_FMT_EN
| CNFG_HW_ECC_EN
);
820 nfi_writew(nfc
, reg
, NFI_CNFG
);
825 memcpy(nfc
->buffer
, buf
, mtd
->writesize
);
826 mtk_nand
->bad_mark
.bm_swap(mtd
, nfc
->buffer
, raw
);
827 bufpoi
= nfc
->buffer
;
829 /* write OOB into the FDM registers (OOB area in MTK NAND) */
830 mtk_nfc_write_fdm(chip
);
835 len
= mtd
->writesize
+ (raw
? mtd
->oobsize
: 0);
836 ret
= mtk_nfc_do_write_page(mtd
, chip
, bufpoi
, page
, len
);
839 mtk_ecc_disable(nfc
->ecc
);
844 return nand_prog_page_end_op(chip
);
847 static int mtk_nfc_write_page_hwecc(struct nand_chip
*chip
, const u8
*buf
,
848 int oob_on
, int page
)
850 return mtk_nfc_write_page(nand_to_mtd(chip
), chip
, buf
, page
, 0);
853 static int mtk_nfc_write_page_raw(struct nand_chip
*chip
, const u8
*buf
,
856 struct mtd_info
*mtd
= nand_to_mtd(chip
);
857 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
859 mtk_nfc_format_page(mtd
, buf
);
860 return mtk_nfc_write_page(mtd
, chip
, nfc
->buffer
, pg
, 1);
863 static int mtk_nfc_write_subpage_hwecc(struct nand_chip
*chip
, u32 offset
,
864 u32 data_len
, const u8
*buf
,
865 int oob_on
, int page
)
867 struct mtd_info
*mtd
= nand_to_mtd(chip
);
868 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
871 ret
= mtk_nfc_format_subpage(mtd
, offset
, data_len
, buf
);
875 /* use the data in the private buffer (now with FDM and CRC) */
876 return mtk_nfc_write_page(mtd
, chip
, nfc
->buffer
, page
, 1);
879 static int mtk_nfc_write_oob_std(struct nand_chip
*chip
, int page
)
881 return mtk_nfc_write_page_raw(chip
, NULL
, 1, page
);
884 static int mtk_nfc_update_ecc_stats(struct mtd_info
*mtd
, u8
*buf
, u32 start
,
887 struct nand_chip
*chip
= mtd_to_nand(mtd
);
888 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
889 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
890 struct mtk_ecc_stats stats
;
891 u32 reg_size
= mtk_nand
->fdm
.reg_size
;
894 rc
= nfi_readl(nfc
, NFI_STA
) & STA_EMP_PAGE
;
896 memset(buf
, 0xff, sectors
* chip
->ecc
.size
);
897 for (i
= 0; i
< sectors
; i
++)
898 memset(oob_ptr(chip
, start
+ i
), 0xff, reg_size
);
902 mtk_ecc_get_stats(nfc
->ecc
, &stats
, sectors
);
903 mtd
->ecc_stats
.corrected
+= stats
.corrected
;
904 mtd
->ecc_stats
.failed
+= stats
.failed
;
906 return stats
.bitflips
;
909 static int mtk_nfc_read_subpage(struct mtd_info
*mtd
, struct nand_chip
*chip
,
910 u32 data_offs
, u32 readlen
,
911 u8
*bufpoi
, int page
, int raw
)
913 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
914 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
915 u32 spare
= mtk_nand
->spare_per_sector
;
916 u32 column
, sectors
, start
, end
, reg
;
923 start
= data_offs
/ chip
->ecc
.size
;
924 end
= DIV_ROUND_UP(data_offs
+ readlen
, chip
->ecc
.size
);
926 sectors
= end
- start
;
927 column
= start
* (chip
->ecc
.size
+ spare
);
929 len
= sectors
* chip
->ecc
.size
+ (raw
? sectors
* spare
: 0);
930 buf
= bufpoi
+ start
* chip
->ecc
.size
;
932 nand_read_page_op(chip
, page
, column
, NULL
, 0);
934 addr
= dma_map_single(nfc
->dev
, buf
, len
, DMA_FROM_DEVICE
);
935 rc
= dma_mapping_error(nfc
->dev
, addr
);
937 dev_err(nfc
->dev
, "dma mapping error\n");
942 reg
= nfi_readw(nfc
, NFI_CNFG
);
943 reg
|= CNFG_READ_EN
| CNFG_DMA_BURST_EN
| CNFG_AHB
;
945 reg
|= CNFG_AUTO_FMT_EN
| CNFG_HW_ECC_EN
;
946 nfi_writew(nfc
, reg
, NFI_CNFG
);
948 nfc
->ecc_cfg
.mode
= ECC_NFI_MODE
;
949 nfc
->ecc_cfg
.sectors
= sectors
;
950 nfc
->ecc_cfg
.op
= ECC_DECODE
;
951 rc
= mtk_ecc_enable(nfc
->ecc
, &nfc
->ecc_cfg
);
953 dev_err(nfc
->dev
, "ecc enable\n");
955 reg
&= ~(CNFG_DMA_BURST_EN
| CNFG_AHB
| CNFG_READ_EN
|
956 CNFG_AUTO_FMT_EN
| CNFG_HW_ECC_EN
);
957 nfi_writew(nfc
, reg
, NFI_CNFG
);
958 dma_unmap_single(nfc
->dev
, addr
, len
, DMA_FROM_DEVICE
);
963 nfi_writew(nfc
, reg
, NFI_CNFG
);
966 nfi_writel(nfc
, sectors
<< CON_SEC_SHIFT
, NFI_CON
);
967 nfi_writew(nfc
, INTR_AHB_DONE_EN
, NFI_INTR_EN
);
968 nfi_writel(nfc
, lower_32_bits(addr
), NFI_STRADDR
);
970 init_completion(&nfc
->done
);
971 reg
= nfi_readl(nfc
, NFI_CON
) | CON_BRD
;
972 nfi_writel(nfc
, reg
, NFI_CON
);
973 nfi_writew(nfc
, STAR_EN
, NFI_STRDATA
);
975 rc
= wait_for_completion_timeout(&nfc
->done
, msecs_to_jiffies(500));
977 dev_warn(nfc
->dev
, "read ahb/dma done timeout\n");
979 rc
= readl_poll_timeout_atomic(nfc
->regs
+ NFI_BYTELEN
, reg
,
980 ADDRCNTR_SEC(reg
) >= sectors
, 10,
983 dev_err(nfc
->dev
, "subpage done timeout\n");
986 rc
= mtk_ecc_wait_done(nfc
->ecc
, ECC_DECODE
);
987 bitflips
= rc
< 0 ? -ETIMEDOUT
:
988 mtk_nfc_update_ecc_stats(mtd
, buf
, start
, sectors
);
989 mtk_nfc_read_fdm(chip
, start
, sectors
);
992 dma_unmap_single(nfc
->dev
, addr
, len
, DMA_FROM_DEVICE
);
997 mtk_ecc_disable(nfc
->ecc
);
999 if (clamp(mtk_nand
->bad_mark
.sec
, start
, end
) == mtk_nand
->bad_mark
.sec
)
1000 mtk_nand
->bad_mark
.bm_swap(mtd
, bufpoi
, raw
);
1002 nfi_writel(nfc
, 0, NFI_CON
);
1007 static int mtk_nfc_read_subpage_hwecc(struct nand_chip
*chip
, u32 off
,
1008 u32 len
, u8
*p
, int pg
)
1010 return mtk_nfc_read_subpage(nand_to_mtd(chip
), chip
, off
, len
, p
, pg
,
1014 static int mtk_nfc_read_page_hwecc(struct nand_chip
*chip
, u8
*p
, int oob_on
,
1017 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1019 return mtk_nfc_read_subpage(mtd
, chip
, 0, mtd
->writesize
, p
, pg
, 0);
1022 static int mtk_nfc_read_page_raw(struct nand_chip
*chip
, u8
*buf
, int oob_on
,
1025 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1026 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
1027 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
1028 struct mtk_nfc_fdm
*fdm
= &mtk_nand
->fdm
;
1031 memset(nfc
->buffer
, 0xff, mtd
->writesize
+ mtd
->oobsize
);
1032 ret
= mtk_nfc_read_subpage(mtd
, chip
, 0, mtd
->writesize
, nfc
->buffer
,
1037 for (i
= 0; i
< chip
->ecc
.steps
; i
++) {
1038 memcpy(oob_ptr(chip
, i
), mtk_oob_ptr(chip
, i
), fdm
->reg_size
);
1040 if (i
== mtk_nand
->bad_mark
.sec
)
1041 mtk_nand
->bad_mark
.bm_swap(mtd
, nfc
->buffer
, 1);
1044 memcpy(data_ptr(chip
, buf
, i
), mtk_data_ptr(chip
, i
),
1051 static int mtk_nfc_read_oob_std(struct nand_chip
*chip
, int page
)
1053 return mtk_nfc_read_page_raw(chip
, NULL
, 1, page
);
1056 static inline void mtk_nfc_hw_init(struct mtk_nfc
*nfc
)
1059 * CNRNB: nand ready/busy register
1060 * -------------------------------
1061 * 7:4: timeout register for polling the NAND busy/ready signal
1062 * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
1064 nfi_writew(nfc
, 0xf1, NFI_CNRNB
);
1065 nfi_writel(nfc
, PAGEFMT_8K_16K
, NFI_PAGEFMT
);
1067 mtk_nfc_hw_reset(nfc
);
1069 nfi_readl(nfc
, NFI_INTR_STA
);
1070 nfi_writel(nfc
, 0, NFI_INTR_EN
);
1073 static irqreturn_t
mtk_nfc_irq(int irq
, void *id
)
1075 struct mtk_nfc
*nfc
= id
;
1078 sta
= nfi_readw(nfc
, NFI_INTR_STA
);
1079 ien
= nfi_readw(nfc
, NFI_INTR_EN
);
1084 nfi_writew(nfc
, ~sta
& ien
, NFI_INTR_EN
);
1085 complete(&nfc
->done
);
1090 static int mtk_nfc_enable_clk(struct device
*dev
, struct mtk_nfc_clk
*clk
)
1094 ret
= clk_prepare_enable(clk
->nfi_clk
);
1096 dev_err(dev
, "failed to enable nfi clk\n");
1100 ret
= clk_prepare_enable(clk
->pad_clk
);
1102 dev_err(dev
, "failed to enable pad clk\n");
1103 clk_disable_unprepare(clk
->nfi_clk
);
1110 static void mtk_nfc_disable_clk(struct mtk_nfc_clk
*clk
)
1112 clk_disable_unprepare(clk
->nfi_clk
);
1113 clk_disable_unprepare(clk
->pad_clk
);
1116 static int mtk_nfc_ooblayout_free(struct mtd_info
*mtd
, int section
,
1117 struct mtd_oob_region
*oob_region
)
1119 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1120 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
1121 struct mtk_nfc_fdm
*fdm
= &mtk_nand
->fdm
;
1124 eccsteps
= mtd
->writesize
/ chip
->ecc
.size
;
1126 if (section
>= eccsteps
)
1129 oob_region
->length
= fdm
->reg_size
- fdm
->ecc_size
;
1130 oob_region
->offset
= section
* fdm
->reg_size
+ fdm
->ecc_size
;
1135 static int mtk_nfc_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
1136 struct mtd_oob_region
*oob_region
)
1138 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1139 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
1145 eccsteps
= mtd
->writesize
/ chip
->ecc
.size
;
1146 oob_region
->offset
= mtk_nand
->fdm
.reg_size
* eccsteps
;
1147 oob_region
->length
= mtd
->oobsize
- oob_region
->offset
;
1152 static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops
= {
1153 .free
= mtk_nfc_ooblayout_free
,
1154 .ecc
= mtk_nfc_ooblayout_ecc
,
1157 static void mtk_nfc_set_fdm(struct mtk_nfc_fdm
*fdm
, struct mtd_info
*mtd
)
1159 struct nand_chip
*nand
= mtd_to_nand(mtd
);
1160 struct mtk_nfc_nand_chip
*chip
= to_mtk_nand(nand
);
1161 struct mtk_nfc
*nfc
= nand_get_controller_data(nand
);
1164 ecc_bytes
= DIV_ROUND_UP(nand
->ecc
.strength
*
1165 mtk_ecc_get_parity_bits(nfc
->ecc
), 8);
1167 fdm
->reg_size
= chip
->spare_per_sector
- ecc_bytes
;
1168 if (fdm
->reg_size
> NFI_FDM_MAX_SIZE
)
1169 fdm
->reg_size
= NFI_FDM_MAX_SIZE
;
1171 /* bad block mark storage */
1175 static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl
*bm_ctl
,
1176 struct mtd_info
*mtd
)
1178 struct nand_chip
*nand
= mtd_to_nand(mtd
);
1180 if (mtd
->writesize
== 512) {
1181 bm_ctl
->bm_swap
= mtk_nfc_no_bad_mark_swap
;
1183 bm_ctl
->bm_swap
= mtk_nfc_bad_mark_swap
;
1184 bm_ctl
->sec
= mtd
->writesize
/ mtk_data_len(nand
);
1185 bm_ctl
->pos
= mtd
->writesize
% mtk_data_len(nand
);
1189 static int mtk_nfc_set_spare_per_sector(u32
*sps
, struct mtd_info
*mtd
)
1191 struct nand_chip
*nand
= mtd_to_nand(mtd
);
1192 struct mtk_nfc
*nfc
= nand_get_controller_data(nand
);
1193 const u8
*spare
= nfc
->caps
->spare_size
;
1194 u32 eccsteps
, i
, closest_spare
= 0;
1196 eccsteps
= mtd
->writesize
/ nand
->ecc
.size
;
1197 *sps
= mtd
->oobsize
/ eccsteps
;
1199 if (nand
->ecc
.size
== 1024)
1202 if (*sps
< MTK_NFC_MIN_SPARE
)
1205 for (i
= 0; i
< nfc
->caps
->num_spare_size
; i
++) {
1206 if (*sps
>= spare
[i
] && spare
[i
] >= spare
[closest_spare
]) {
1208 if (*sps
== spare
[i
])
1213 *sps
= spare
[closest_spare
];
1215 if (nand
->ecc
.size
== 1024)
1221 static int mtk_nfc_ecc_init(struct device
*dev
, struct mtd_info
*mtd
)
1223 struct nand_chip
*nand
= mtd_to_nand(mtd
);
1224 struct mtk_nfc
*nfc
= nand_get_controller_data(nand
);
1228 /* support only ecc hw mode */
1229 if (nand
->ecc
.mode
!= NAND_ECC_HW
) {
1230 dev_err(dev
, "ecc.mode not supported\n");
1234 /* if optional dt settings not present */
1235 if (!nand
->ecc
.size
|| !nand
->ecc
.strength
) {
1236 /* use datasheet requirements */
1237 nand
->ecc
.strength
= nand
->base
.eccreq
.strength
;
1238 nand
->ecc
.size
= nand
->base
.eccreq
.step_size
;
1241 * align eccstrength and eccsize
1242 * this controller only supports 512 and 1024 sizes
1244 if (nand
->ecc
.size
< 1024) {
1245 if (mtd
->writesize
> 512 &&
1246 nfc
->caps
->max_sector_size
> 512) {
1247 nand
->ecc
.size
= 1024;
1248 nand
->ecc
.strength
<<= 1;
1250 nand
->ecc
.size
= 512;
1253 nand
->ecc
.size
= 1024;
1256 ret
= mtk_nfc_set_spare_per_sector(&spare
, mtd
);
1260 /* calculate oob bytes except ecc parity data */
1261 free
= (nand
->ecc
.strength
* mtk_ecc_get_parity_bits(nfc
->ecc
)
1263 free
= spare
- free
;
1266 * enhance ecc strength if oob left is bigger than max FDM size
1267 * or reduce ecc strength if oob size is not enough for ecc
1270 if (free
> NFI_FDM_MAX_SIZE
) {
1271 spare
-= NFI_FDM_MAX_SIZE
;
1272 nand
->ecc
.strength
= (spare
<< 3) /
1273 mtk_ecc_get_parity_bits(nfc
->ecc
);
1274 } else if (free
< 0) {
1275 spare
-= NFI_FDM_MIN_SIZE
;
1276 nand
->ecc
.strength
= (spare
<< 3) /
1277 mtk_ecc_get_parity_bits(nfc
->ecc
);
1281 mtk_ecc_adjust_strength(nfc
->ecc
, &nand
->ecc
.strength
);
1283 dev_info(dev
, "eccsize %d eccstrength %d\n",
1284 nand
->ecc
.size
, nand
->ecc
.strength
);
1289 static int mtk_nfc_attach_chip(struct nand_chip
*chip
)
1291 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1292 struct device
*dev
= mtd
->dev
.parent
;
1293 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
1294 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
1298 if (chip
->options
& NAND_BUSWIDTH_16
) {
1299 dev_err(dev
, "16bits buswidth not supported");
1303 /* store bbt magic in page, cause OOB is not protected */
1304 if (chip
->bbt_options
& NAND_BBT_USE_FLASH
)
1305 chip
->bbt_options
|= NAND_BBT_NO_OOB
;
1307 ret
= mtk_nfc_ecc_init(dev
, mtd
);
1311 ret
= mtk_nfc_set_spare_per_sector(&mtk_nand
->spare_per_sector
, mtd
);
1315 mtk_nfc_set_fdm(&mtk_nand
->fdm
, mtd
);
1316 mtk_nfc_set_bad_mark_ctl(&mtk_nand
->bad_mark
, mtd
);
1318 len
= mtd
->writesize
+ mtd
->oobsize
;
1319 nfc
->buffer
= devm_kzalloc(dev
, len
, GFP_KERNEL
);
1326 static const struct nand_controller_ops mtk_nfc_controller_ops
= {
1327 .attach_chip
= mtk_nfc_attach_chip
,
1328 .setup_data_interface
= mtk_nfc_setup_data_interface
,
1331 static int mtk_nfc_nand_chip_init(struct device
*dev
, struct mtk_nfc
*nfc
,
1332 struct device_node
*np
)
1334 struct mtk_nfc_nand_chip
*chip
;
1335 struct nand_chip
*nand
;
1336 struct mtd_info
*mtd
;
1342 if (!of_get_property(np
, "reg", &nsels
))
1345 nsels
/= sizeof(u32
);
1346 if (!nsels
|| nsels
> MTK_NAND_MAX_NSELS
) {
1347 dev_err(dev
, "invalid reg property size %d\n", nsels
);
1351 chip
= devm_kzalloc(dev
, sizeof(*chip
) + nsels
* sizeof(u8
),
1356 chip
->nsels
= nsels
;
1357 for (i
= 0; i
< nsels
; i
++) {
1358 ret
= of_property_read_u32_index(np
, "reg", i
, &tmp
);
1360 dev_err(dev
, "reg property failure : %d\n", ret
);
1364 if (tmp
>= MTK_NAND_MAX_NSELS
) {
1365 dev_err(dev
, "invalid CS: %u\n", tmp
);
1369 if (test_and_set_bit(tmp
, &nfc
->assigned_cs
)) {
1370 dev_err(dev
, "CS %u already assigned\n", tmp
);
1374 chip
->sels
[i
] = tmp
;
1378 nand
->controller
= &nfc
->controller
;
1380 nand_set_flash_node(nand
, np
);
1381 nand_set_controller_data(nand
, nfc
);
1383 nand
->options
|= NAND_USE_BOUNCE_BUFFER
| NAND_SUBPAGE_READ
;
1384 nand
->legacy
.dev_ready
= mtk_nfc_dev_ready
;
1385 nand
->legacy
.select_chip
= mtk_nfc_select_chip
;
1386 nand
->legacy
.write_byte
= mtk_nfc_write_byte
;
1387 nand
->legacy
.write_buf
= mtk_nfc_write_buf
;
1388 nand
->legacy
.read_byte
= mtk_nfc_read_byte
;
1389 nand
->legacy
.read_buf
= mtk_nfc_read_buf
;
1390 nand
->legacy
.cmd_ctrl
= mtk_nfc_cmd_ctrl
;
1392 /* set default mode in case dt entry is missing */
1393 nand
->ecc
.mode
= NAND_ECC_HW
;
1395 nand
->ecc
.write_subpage
= mtk_nfc_write_subpage_hwecc
;
1396 nand
->ecc
.write_page_raw
= mtk_nfc_write_page_raw
;
1397 nand
->ecc
.write_page
= mtk_nfc_write_page_hwecc
;
1398 nand
->ecc
.write_oob_raw
= mtk_nfc_write_oob_std
;
1399 nand
->ecc
.write_oob
= mtk_nfc_write_oob_std
;
1401 nand
->ecc
.read_subpage
= mtk_nfc_read_subpage_hwecc
;
1402 nand
->ecc
.read_page_raw
= mtk_nfc_read_page_raw
;
1403 nand
->ecc
.read_page
= mtk_nfc_read_page_hwecc
;
1404 nand
->ecc
.read_oob_raw
= mtk_nfc_read_oob_std
;
1405 nand
->ecc
.read_oob
= mtk_nfc_read_oob_std
;
1407 mtd
= nand_to_mtd(nand
);
1408 mtd
->owner
= THIS_MODULE
;
1409 mtd
->dev
.parent
= dev
;
1410 mtd
->name
= MTK_NAME
;
1411 mtd_set_ooblayout(mtd
, &mtk_nfc_ooblayout_ops
);
1413 mtk_nfc_hw_init(nfc
);
1415 ret
= nand_scan(nand
, nsels
);
1419 ret
= mtd_device_register(mtd
, NULL
, 0);
1421 dev_err(dev
, "mtd parse partition error\n");
1426 list_add_tail(&chip
->node
, &nfc
->chips
);
1431 static int mtk_nfc_nand_chips_init(struct device
*dev
, struct mtk_nfc
*nfc
)
1433 struct device_node
*np
= dev
->of_node
;
1434 struct device_node
*nand_np
;
1437 for_each_child_of_node(np
, nand_np
) {
1438 ret
= mtk_nfc_nand_chip_init(dev
, nfc
, nand_np
);
1440 of_node_put(nand_np
);
1448 static const struct mtk_nfc_caps mtk_nfc_caps_mt2701
= {
1449 .spare_size
= spare_size_mt2701
,
1450 .num_spare_size
= 16,
1451 .pageformat_spare_shift
= 4,
1454 .max_sector_size
= 1024,
1457 static const struct mtk_nfc_caps mtk_nfc_caps_mt2712
= {
1458 .spare_size
= spare_size_mt2712
,
1459 .num_spare_size
= 19,
1460 .pageformat_spare_shift
= 16,
1463 .max_sector_size
= 1024,
1466 static const struct mtk_nfc_caps mtk_nfc_caps_mt7622
= {
1467 .spare_size
= spare_size_mt7622
,
1468 .num_spare_size
= 4,
1469 .pageformat_spare_shift
= 4,
1472 .max_sector_size
= 512,
1475 static const struct of_device_id mtk_nfc_id_table
[] = {
1477 .compatible
= "mediatek,mt2701-nfc",
1478 .data
= &mtk_nfc_caps_mt2701
,
1480 .compatible
= "mediatek,mt2712-nfc",
1481 .data
= &mtk_nfc_caps_mt2712
,
1483 .compatible
= "mediatek,mt7622-nfc",
1484 .data
= &mtk_nfc_caps_mt7622
,
1488 MODULE_DEVICE_TABLE(of
, mtk_nfc_id_table
);
1490 static int mtk_nfc_probe(struct platform_device
*pdev
)
1492 struct device
*dev
= &pdev
->dev
;
1493 struct device_node
*np
= dev
->of_node
;
1494 struct mtk_nfc
*nfc
;
1495 struct resource
*res
;
1498 nfc
= devm_kzalloc(dev
, sizeof(*nfc
), GFP_KERNEL
);
1502 nand_controller_init(&nfc
->controller
);
1503 INIT_LIST_HEAD(&nfc
->chips
);
1504 nfc
->controller
.ops
= &mtk_nfc_controller_ops
;
1506 /* probe defer if not ready */
1507 nfc
->ecc
= of_mtk_ecc_get(np
);
1508 if (IS_ERR(nfc
->ecc
))
1509 return PTR_ERR(nfc
->ecc
);
1513 nfc
->caps
= of_device_get_match_data(dev
);
1516 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1517 nfc
->regs
= devm_ioremap_resource(dev
, res
);
1518 if (IS_ERR(nfc
->regs
)) {
1519 ret
= PTR_ERR(nfc
->regs
);
1523 nfc
->clk
.nfi_clk
= devm_clk_get(dev
, "nfi_clk");
1524 if (IS_ERR(nfc
->clk
.nfi_clk
)) {
1525 dev_err(dev
, "no clk\n");
1526 ret
= PTR_ERR(nfc
->clk
.nfi_clk
);
1530 nfc
->clk
.pad_clk
= devm_clk_get(dev
, "pad_clk");
1531 if (IS_ERR(nfc
->clk
.pad_clk
)) {
1532 dev_err(dev
, "no pad clk\n");
1533 ret
= PTR_ERR(nfc
->clk
.pad_clk
);
1537 ret
= mtk_nfc_enable_clk(dev
, &nfc
->clk
);
1541 irq
= platform_get_irq(pdev
, 0);
1547 ret
= devm_request_irq(dev
, irq
, mtk_nfc_irq
, 0x0, "mtk-nand", nfc
);
1549 dev_err(dev
, "failed to request nfi irq\n");
1553 ret
= dma_set_mask(dev
, DMA_BIT_MASK(32));
1555 dev_err(dev
, "failed to set dma mask\n");
1559 platform_set_drvdata(pdev
, nfc
);
1561 ret
= mtk_nfc_nand_chips_init(dev
, nfc
);
1563 dev_err(dev
, "failed to init nand chips\n");
1570 mtk_nfc_disable_clk(&nfc
->clk
);
1573 mtk_ecc_release(nfc
->ecc
);
1578 static int mtk_nfc_remove(struct platform_device
*pdev
)
1580 struct mtk_nfc
*nfc
= platform_get_drvdata(pdev
);
1581 struct mtk_nfc_nand_chip
*chip
;
1583 while (!list_empty(&nfc
->chips
)) {
1584 chip
= list_first_entry(&nfc
->chips
, struct mtk_nfc_nand_chip
,
1586 nand_release(&chip
->nand
);
1587 list_del(&chip
->node
);
1590 mtk_ecc_release(nfc
->ecc
);
1591 mtk_nfc_disable_clk(&nfc
->clk
);
1596 #ifdef CONFIG_PM_SLEEP
1597 static int mtk_nfc_suspend(struct device
*dev
)
1599 struct mtk_nfc
*nfc
= dev_get_drvdata(dev
);
1601 mtk_nfc_disable_clk(&nfc
->clk
);
1606 static int mtk_nfc_resume(struct device
*dev
)
1608 struct mtk_nfc
*nfc
= dev_get_drvdata(dev
);
1609 struct mtk_nfc_nand_chip
*chip
;
1610 struct nand_chip
*nand
;
1616 ret
= mtk_nfc_enable_clk(dev
, &nfc
->clk
);
1620 /* reset NAND chip if VCC was powered off */
1621 list_for_each_entry(chip
, &nfc
->chips
, node
) {
1623 for (i
= 0; i
< chip
->nsels
; i
++)
1624 nand_reset(nand
, i
);
1630 static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops
, mtk_nfc_suspend
, mtk_nfc_resume
);
1633 static struct platform_driver mtk_nfc_driver
= {
1634 .probe
= mtk_nfc_probe
,
1635 .remove
= mtk_nfc_remove
,
1638 .of_match_table
= mtk_nfc_id_table
,
1639 #ifdef CONFIG_PM_SLEEP
1640 .pm
= &mtk_nfc_pm_ops
,
1645 module_platform_driver(mtk_nfc_driver
);
1647 MODULE_LICENSE("Dual MIT/GPL");
1648 MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
1649 MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");