2 * MTK NAND Flash controller driver.
3 * Copyright (C) 2016 MediaTek Inc.
4 * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
5 * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/interrupt.h>
20 #include <linux/delay.h>
21 #include <linux/clk.h>
22 #include <linux/mtd/rawnand.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/module.h>
25 #include <linux/iopoll.h>
27 #include <linux/of_device.h>
30 /* NAND controller register definition */
31 #define NFI_CNFG (0x00)
32 #define CNFG_AHB BIT(0)
33 #define CNFG_READ_EN BIT(1)
34 #define CNFG_DMA_BURST_EN BIT(2)
35 #define CNFG_BYTE_RW BIT(6)
36 #define CNFG_HW_ECC_EN BIT(8)
37 #define CNFG_AUTO_FMT_EN BIT(9)
38 #define CNFG_OP_CUST (6 << 12)
39 #define NFI_PAGEFMT (0x04)
40 #define PAGEFMT_FDM_ECC_SHIFT (12)
41 #define PAGEFMT_FDM_SHIFT (8)
42 #define PAGEFMT_SEC_SEL_512 BIT(2)
43 #define PAGEFMT_512_2K (0)
44 #define PAGEFMT_2K_4K (1)
45 #define PAGEFMT_4K_8K (2)
46 #define PAGEFMT_8K_16K (3)
48 #define NFI_CON (0x08)
49 #define CON_FIFO_FLUSH BIT(0)
50 #define CON_NFI_RST BIT(1)
51 #define CON_BRD BIT(8) /* burst read */
52 #define CON_BWR BIT(9) /* burst write */
53 #define CON_SEC_SHIFT (12)
54 /* Timming control register */
55 #define NFI_ACCCON (0x0C)
56 #define NFI_INTR_EN (0x10)
57 #define INTR_AHB_DONE_EN BIT(6)
58 #define NFI_INTR_STA (0x14)
59 #define NFI_CMD (0x20)
60 #define NFI_ADDRNOB (0x30)
61 #define NFI_COLADDR (0x34)
62 #define NFI_ROWADDR (0x38)
63 #define NFI_STRDATA (0x40)
66 #define NFI_CNRNB (0x44)
67 #define NFI_DATAW (0x50)
68 #define NFI_DATAR (0x54)
69 #define NFI_PIO_DIRDY (0x58)
70 #define PIO_DI_RDY (0x01)
71 #define NFI_STA (0x60)
72 #define STA_CMD BIT(0)
73 #define STA_ADDR BIT(1)
74 #define STA_BUSY BIT(8)
75 #define STA_EMP_PAGE BIT(12)
76 #define NFI_FSM_CUSTDATA (0xe << 16)
77 #define NFI_FSM_MASK (0xf << 16)
78 #define NFI_ADDRCNTR (0x70)
79 #define CNTR_MASK GENMASK(16, 12)
80 #define ADDRCNTR_SEC_SHIFT (12)
81 #define ADDRCNTR_SEC(val) \
82 (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
83 #define NFI_STRADDR (0x80)
84 #define NFI_BYTELEN (0x84)
85 #define NFI_CSEL (0x90)
86 #define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2)
87 #define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
88 #define NFI_FDM_MAX_SIZE (8)
89 #define NFI_FDM_MIN_SIZE (1)
90 #define NFI_MASTER_STA (0x224)
91 #define MASTER_STA_MASK (0x0FFF)
92 #define NFI_EMPTY_THRESH (0x23C)
94 #define MTK_NAME "mtk-nand"
95 #define KB(x) ((x) * 1024UL)
96 #define MB(x) (KB(x) * 1024UL)
98 #define MTK_TIMEOUT (500000)
99 #define MTK_RESET_TIMEOUT (1000000)
100 #define MTK_MAX_SECTOR (16)
101 #define MTK_NAND_MAX_NSELS (2)
102 #define MTK_NFC_MIN_SPARE (16)
103 #define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
104 ((tpoecs) << 28 | (tprecs) << 22 | (tc2r) << 16 | \
105 (tw2r) << 12 | (twh) << 8 | (twst) << 4 | (trlt))
107 struct mtk_nfc_caps
{
108 const u8
*spare_size
;
110 u8 pageformat_spare_shift
;
114 struct mtk_nfc_bad_mark_ctl
{
115 void (*bm_swap
)(struct mtd_info
*, u8
*buf
, int raw
);
121 * FDM: region used to store free OOB data
128 struct mtk_nfc_nand_chip
{
129 struct list_head node
;
130 struct nand_chip nand
;
132 struct mtk_nfc_bad_mark_ctl bad_mark
;
133 struct mtk_nfc_fdm fdm
;
134 u32 spare_per_sector
;
138 /* nothing after this field */
147 struct nand_hw_control controller
;
148 struct mtk_ecc_config ecc_cfg
;
149 struct mtk_nfc_clk clk
;
153 const struct mtk_nfc_caps
*caps
;
156 struct completion done
;
157 struct list_head chips
;
163 * supported spare size of each IP.
164 * order should be the same with the spare size bitfiled defination of
165 * register NFI_PAGEFMT.
167 static const u8 spare_size_mt2701
[] = {
168 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 63, 64
171 static const u8 spare_size_mt2712
[] = {
172 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64, 67,
176 static inline struct mtk_nfc_nand_chip
*to_mtk_nand(struct nand_chip
*nand
)
178 return container_of(nand
, struct mtk_nfc_nand_chip
, nand
);
181 static inline u8
*data_ptr(struct nand_chip
*chip
, const u8
*p
, int i
)
183 return (u8
*)p
+ i
* chip
->ecc
.size
;
186 static inline u8
*oob_ptr(struct nand_chip
*chip
, int i
)
188 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
191 /* map the sector's FDM data to free oob:
192 * the beginning of the oob area stores the FDM data of bad mark sectors
195 if (i
< mtk_nand
->bad_mark
.sec
)
196 poi
= chip
->oob_poi
+ (i
+ 1) * mtk_nand
->fdm
.reg_size
;
197 else if (i
== mtk_nand
->bad_mark
.sec
)
200 poi
= chip
->oob_poi
+ i
* mtk_nand
->fdm
.reg_size
;
205 static inline int mtk_data_len(struct nand_chip
*chip
)
207 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
209 return chip
->ecc
.size
+ mtk_nand
->spare_per_sector
;
212 static inline u8
*mtk_data_ptr(struct nand_chip
*chip
, int i
)
214 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
216 return nfc
->buffer
+ i
* mtk_data_len(chip
);
219 static inline u8
*mtk_oob_ptr(struct nand_chip
*chip
, int i
)
221 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
223 return nfc
->buffer
+ i
* mtk_data_len(chip
) + chip
->ecc
.size
;
226 static inline void nfi_writel(struct mtk_nfc
*nfc
, u32 val
, u32 reg
)
228 writel(val
, nfc
->regs
+ reg
);
231 static inline void nfi_writew(struct mtk_nfc
*nfc
, u16 val
, u32 reg
)
233 writew(val
, nfc
->regs
+ reg
);
236 static inline void nfi_writeb(struct mtk_nfc
*nfc
, u8 val
, u32 reg
)
238 writeb(val
, nfc
->regs
+ reg
);
241 static inline u32
nfi_readl(struct mtk_nfc
*nfc
, u32 reg
)
243 return readl_relaxed(nfc
->regs
+ reg
);
246 static inline u16
nfi_readw(struct mtk_nfc
*nfc
, u32 reg
)
248 return readw_relaxed(nfc
->regs
+ reg
);
251 static inline u8
nfi_readb(struct mtk_nfc
*nfc
, u32 reg
)
253 return readb_relaxed(nfc
->regs
+ reg
);
256 static void mtk_nfc_hw_reset(struct mtk_nfc
*nfc
)
258 struct device
*dev
= nfc
->dev
;
262 /* reset all registers and force the NFI master to terminate */
263 nfi_writel(nfc
, CON_FIFO_FLUSH
| CON_NFI_RST
, NFI_CON
);
265 /* wait for the master to finish the last transaction */
266 ret
= readl_poll_timeout(nfc
->regs
+ NFI_MASTER_STA
, val
,
267 !(val
& MASTER_STA_MASK
), 50,
270 dev_warn(dev
, "master active in reset [0x%x] = 0x%x\n",
271 NFI_MASTER_STA
, val
);
273 /* ensure any status register affected by the NFI master is reset */
274 nfi_writel(nfc
, CON_FIFO_FLUSH
| CON_NFI_RST
, NFI_CON
);
275 nfi_writew(nfc
, STAR_DE
, NFI_STRDATA
);
278 static int mtk_nfc_send_command(struct mtk_nfc
*nfc
, u8 command
)
280 struct device
*dev
= nfc
->dev
;
284 nfi_writel(nfc
, command
, NFI_CMD
);
286 ret
= readl_poll_timeout_atomic(nfc
->regs
+ NFI_STA
, val
,
287 !(val
& STA_CMD
), 10, MTK_TIMEOUT
);
289 dev_warn(dev
, "nfi core timed out entering command mode\n");
296 static int mtk_nfc_send_address(struct mtk_nfc
*nfc
, int addr
)
298 struct device
*dev
= nfc
->dev
;
302 nfi_writel(nfc
, addr
, NFI_COLADDR
);
303 nfi_writel(nfc
, 0, NFI_ROWADDR
);
304 nfi_writew(nfc
, 1, NFI_ADDRNOB
);
306 ret
= readl_poll_timeout_atomic(nfc
->regs
+ NFI_STA
, val
,
307 !(val
& STA_ADDR
), 10, MTK_TIMEOUT
);
309 dev_warn(dev
, "nfi core timed out entering address mode\n");
316 static int mtk_nfc_hw_runtime_config(struct mtd_info
*mtd
)
318 struct nand_chip
*chip
= mtd_to_nand(mtd
);
319 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
320 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
326 spare
= mtk_nand
->spare_per_sector
;
328 switch (mtd
->writesize
) {
330 fmt
= PAGEFMT_512_2K
| PAGEFMT_SEC_SEL_512
;
333 if (chip
->ecc
.size
== 512)
334 fmt
= PAGEFMT_2K_4K
| PAGEFMT_SEC_SEL_512
;
336 fmt
= PAGEFMT_512_2K
;
339 if (chip
->ecc
.size
== 512)
340 fmt
= PAGEFMT_4K_8K
| PAGEFMT_SEC_SEL_512
;
345 if (chip
->ecc
.size
== 512)
346 fmt
= PAGEFMT_8K_16K
| PAGEFMT_SEC_SEL_512
;
351 fmt
= PAGEFMT_8K_16K
;
354 dev_err(nfc
->dev
, "invalid page len: %d\n", mtd
->writesize
);
359 * the hardware will double the value for this eccsize, so we need to
362 if (chip
->ecc
.size
== 1024)
365 for (i
= 0; i
< nfc
->caps
->num_spare_size
; i
++) {
366 if (nfc
->caps
->spare_size
[i
] == spare
)
370 if (i
== nfc
->caps
->num_spare_size
) {
371 dev_err(nfc
->dev
, "invalid spare size %d\n", spare
);
375 fmt
|= i
<< nfc
->caps
->pageformat_spare_shift
;
377 fmt
|= mtk_nand
->fdm
.reg_size
<< PAGEFMT_FDM_SHIFT
;
378 fmt
|= mtk_nand
->fdm
.ecc_size
<< PAGEFMT_FDM_ECC_SHIFT
;
379 nfi_writel(nfc
, fmt
, NFI_PAGEFMT
);
381 nfc
->ecc_cfg
.strength
= chip
->ecc
.strength
;
382 nfc
->ecc_cfg
.len
= chip
->ecc
.size
+ mtk_nand
->fdm
.ecc_size
;
387 static void mtk_nfc_select_chip(struct mtd_info
*mtd
, int chip
)
389 struct nand_chip
*nand
= mtd_to_nand(mtd
);
390 struct mtk_nfc
*nfc
= nand_get_controller_data(nand
);
391 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(nand
);
396 mtk_nfc_hw_runtime_config(mtd
);
398 nfi_writel(nfc
, mtk_nand
->sels
[chip
], NFI_CSEL
);
401 static int mtk_nfc_dev_ready(struct mtd_info
*mtd
)
403 struct mtk_nfc
*nfc
= nand_get_controller_data(mtd_to_nand(mtd
));
405 if (nfi_readl(nfc
, NFI_STA
) & STA_BUSY
)
411 static void mtk_nfc_cmd_ctrl(struct mtd_info
*mtd
, int dat
, unsigned int ctrl
)
413 struct mtk_nfc
*nfc
= nand_get_controller_data(mtd_to_nand(mtd
));
415 if (ctrl
& NAND_ALE
) {
416 mtk_nfc_send_address(nfc
, dat
);
417 } else if (ctrl
& NAND_CLE
) {
418 mtk_nfc_hw_reset(nfc
);
420 nfi_writew(nfc
, CNFG_OP_CUST
, NFI_CNFG
);
421 mtk_nfc_send_command(nfc
, dat
);
425 static inline void mtk_nfc_wait_ioready(struct mtk_nfc
*nfc
)
430 rc
= readb_poll_timeout_atomic(nfc
->regs
+ NFI_PIO_DIRDY
, val
,
431 val
& PIO_DI_RDY
, 10, MTK_TIMEOUT
);
433 dev_err(nfc
->dev
, "data not ready\n");
436 static inline u8
mtk_nfc_read_byte(struct mtd_info
*mtd
)
438 struct nand_chip
*chip
= mtd_to_nand(mtd
);
439 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
442 /* after each byte read, the NFI_STA reg is reset by the hardware */
443 reg
= nfi_readl(nfc
, NFI_STA
) & NFI_FSM_MASK
;
444 if (reg
!= NFI_FSM_CUSTDATA
) {
445 reg
= nfi_readw(nfc
, NFI_CNFG
);
446 reg
|= CNFG_BYTE_RW
| CNFG_READ_EN
;
447 nfi_writew(nfc
, reg
, NFI_CNFG
);
450 * set to max sector to allow the HW to continue reading over
453 reg
= (MTK_MAX_SECTOR
<< CON_SEC_SHIFT
) | CON_BRD
;
454 nfi_writel(nfc
, reg
, NFI_CON
);
456 /* trigger to fetch data */
457 nfi_writew(nfc
, STAR_EN
, NFI_STRDATA
);
460 mtk_nfc_wait_ioready(nfc
);
462 return nfi_readb(nfc
, NFI_DATAR
);
465 static void mtk_nfc_read_buf(struct mtd_info
*mtd
, u8
*buf
, int len
)
469 for (i
= 0; i
< len
; i
++)
470 buf
[i
] = mtk_nfc_read_byte(mtd
);
473 static void mtk_nfc_write_byte(struct mtd_info
*mtd
, u8 byte
)
475 struct mtk_nfc
*nfc
= nand_get_controller_data(mtd_to_nand(mtd
));
478 reg
= nfi_readl(nfc
, NFI_STA
) & NFI_FSM_MASK
;
480 if (reg
!= NFI_FSM_CUSTDATA
) {
481 reg
= nfi_readw(nfc
, NFI_CNFG
) | CNFG_BYTE_RW
;
482 nfi_writew(nfc
, reg
, NFI_CNFG
);
484 reg
= MTK_MAX_SECTOR
<< CON_SEC_SHIFT
| CON_BWR
;
485 nfi_writel(nfc
, reg
, NFI_CON
);
487 nfi_writew(nfc
, STAR_EN
, NFI_STRDATA
);
490 mtk_nfc_wait_ioready(nfc
);
491 nfi_writeb(nfc
, byte
, NFI_DATAW
);
494 static void mtk_nfc_write_buf(struct mtd_info
*mtd
, const u8
*buf
, int len
)
498 for (i
= 0; i
< len
; i
++)
499 mtk_nfc_write_byte(mtd
, buf
[i
]);
502 static int mtk_nfc_setup_data_interface(struct mtd_info
*mtd
, int csline
,
503 const struct nand_data_interface
*conf
)
505 struct mtk_nfc
*nfc
= nand_get_controller_data(mtd_to_nand(mtd
));
506 const struct nand_sdr_timings
*timings
;
507 u32 rate
, tpoecs
, tprecs
, tc2r
, tw2r
, twh
, twst
, trlt
;
509 timings
= nand_get_sdr_timings(conf
);
513 if (csline
== NAND_DATA_IFACE_CHECK_ONLY
)
516 rate
= clk_get_rate(nfc
->clk
.nfi_clk
);
517 /* There is a frequency divider in some IPs */
518 rate
/= nfc
->caps
->nfi_clk_div
;
520 /* turn clock rate into KHZ */
523 tpoecs
= max(timings
->tALH_min
, timings
->tCLH_min
) / 1000;
524 tpoecs
= DIV_ROUND_UP(tpoecs
* rate
, 1000000);
527 tprecs
= max(timings
->tCLS_min
, timings
->tALS_min
) / 1000;
528 tprecs
= DIV_ROUND_UP(tprecs
* rate
, 1000000);
531 /* sdr interface has no tCR which means CE# low to RE# low */
534 tw2r
= timings
->tWHR_min
/ 1000;
535 tw2r
= DIV_ROUND_UP(tw2r
* rate
, 1000000);
536 tw2r
= DIV_ROUND_UP(tw2r
- 1, 2);
539 twh
= max(timings
->tREH_min
, timings
->tWH_min
) / 1000;
540 twh
= DIV_ROUND_UP(twh
* rate
, 1000000) - 1;
543 twst
= timings
->tWP_min
/ 1000;
544 twst
= DIV_ROUND_UP(twst
* rate
, 1000000) - 1;
547 trlt
= max(timings
->tREA_max
, timings
->tRP_min
) / 1000;
548 trlt
= DIV_ROUND_UP(trlt
* rate
, 1000000) - 1;
552 * ACCON: access timing control register
553 * -------------------------------------
554 * 31:28: tpoecs, minimum required time for CS post pulling down after
555 * accessing the device
556 * 27:22: tprecs, minimum required time for CS pre pulling down before
557 * accessing the device
558 * 21:16: tc2r, minimum required time from NCEB low to NREB low
559 * 15:12: tw2r, minimum required time from NWEB high to NREB low.
560 * 11:08: twh, write enable hold time
561 * 07:04: twst, write wait states
562 * 03:00: trlt, read wait states
564 trlt
= ACCTIMING(tpoecs
, tprecs
, tc2r
, tw2r
, twh
, twst
, trlt
);
565 nfi_writel(nfc
, trlt
, NFI_ACCCON
);
570 static int mtk_nfc_sector_encode(struct nand_chip
*chip
, u8
*data
)
572 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
573 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
574 int size
= chip
->ecc
.size
+ mtk_nand
->fdm
.reg_size
;
576 nfc
->ecc_cfg
.mode
= ECC_DMA_MODE
;
577 nfc
->ecc_cfg
.op
= ECC_ENCODE
;
579 return mtk_ecc_encode(nfc
->ecc
, &nfc
->ecc_cfg
, data
, size
);
582 static void mtk_nfc_no_bad_mark_swap(struct mtd_info
*a
, u8
*b
, int c
)
587 static void mtk_nfc_bad_mark_swap(struct mtd_info
*mtd
, u8
*buf
, int raw
)
589 struct nand_chip
*chip
= mtd_to_nand(mtd
);
590 struct mtk_nfc_nand_chip
*nand
= to_mtk_nand(chip
);
591 u32 bad_pos
= nand
->bad_mark
.pos
;
594 bad_pos
+= nand
->bad_mark
.sec
* mtk_data_len(chip
);
596 bad_pos
+= nand
->bad_mark
.sec
* chip
->ecc
.size
;
598 swap(chip
->oob_poi
[0], buf
[bad_pos
]);
601 static int mtk_nfc_format_subpage(struct mtd_info
*mtd
, u32 offset
,
602 u32 len
, const u8
*buf
)
604 struct nand_chip
*chip
= mtd_to_nand(mtd
);
605 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
606 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
607 struct mtk_nfc_fdm
*fdm
= &mtk_nand
->fdm
;
611 start
= offset
/ chip
->ecc
.size
;
612 end
= DIV_ROUND_UP(offset
+ len
, chip
->ecc
.size
);
614 memset(nfc
->buffer
, 0xff, mtd
->writesize
+ mtd
->oobsize
);
615 for (i
= 0; i
< chip
->ecc
.steps
; i
++) {
616 memcpy(mtk_data_ptr(chip
, i
), data_ptr(chip
, buf
, i
),
619 if (start
> i
|| i
>= end
)
622 if (i
== mtk_nand
->bad_mark
.sec
)
623 mtk_nand
->bad_mark
.bm_swap(mtd
, nfc
->buffer
, 1);
625 memcpy(mtk_oob_ptr(chip
, i
), oob_ptr(chip
, i
), fdm
->reg_size
);
627 /* program the CRC back to the OOB */
628 ret
= mtk_nfc_sector_encode(chip
, mtk_data_ptr(chip
, i
));
636 static void mtk_nfc_format_page(struct mtd_info
*mtd
, const u8
*buf
)
638 struct nand_chip
*chip
= mtd_to_nand(mtd
);
639 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
640 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
641 struct mtk_nfc_fdm
*fdm
= &mtk_nand
->fdm
;
644 memset(nfc
->buffer
, 0xff, mtd
->writesize
+ mtd
->oobsize
);
645 for (i
= 0; i
< chip
->ecc
.steps
; i
++) {
647 memcpy(mtk_data_ptr(chip
, i
), data_ptr(chip
, buf
, i
),
650 if (i
== mtk_nand
->bad_mark
.sec
)
651 mtk_nand
->bad_mark
.bm_swap(mtd
, nfc
->buffer
, 1);
653 memcpy(mtk_oob_ptr(chip
, i
), oob_ptr(chip
, i
), fdm
->reg_size
);
657 static inline void mtk_nfc_read_fdm(struct nand_chip
*chip
, u32 start
,
660 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
661 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
662 struct mtk_nfc_fdm
*fdm
= &mtk_nand
->fdm
;
667 for (i
= 0; i
< sectors
; i
++) {
668 oobptr
= oob_ptr(chip
, start
+ i
);
669 vall
= nfi_readl(nfc
, NFI_FDML(i
));
670 valm
= nfi_readl(nfc
, NFI_FDMM(i
));
672 for (j
= 0; j
< fdm
->reg_size
; j
++)
673 oobptr
[j
] = (j
>= 4 ? valm
: vall
) >> ((j
% 4) * 8);
677 static inline void mtk_nfc_write_fdm(struct nand_chip
*chip
)
679 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
680 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
681 struct mtk_nfc_fdm
*fdm
= &mtk_nand
->fdm
;
686 for (i
= 0; i
< chip
->ecc
.steps
; i
++) {
687 oobptr
= oob_ptr(chip
, i
);
690 for (j
= 0; j
< 8; j
++) {
692 vall
|= (j
< fdm
->reg_size
? oobptr
[j
] : 0xff)
695 valm
|= (j
< fdm
->reg_size
? oobptr
[j
] : 0xff)
698 nfi_writel(nfc
, vall
, NFI_FDML(i
));
699 nfi_writel(nfc
, valm
, NFI_FDMM(i
));
703 static int mtk_nfc_do_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
704 const u8
*buf
, int page
, int len
)
706 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
707 struct device
*dev
= nfc
->dev
;
712 addr
= dma_map_single(dev
, (void *)buf
, len
, DMA_TO_DEVICE
);
713 ret
= dma_mapping_error(nfc
->dev
, addr
);
715 dev_err(nfc
->dev
, "dma mapping error\n");
719 reg
= nfi_readw(nfc
, NFI_CNFG
) | CNFG_AHB
| CNFG_DMA_BURST_EN
;
720 nfi_writew(nfc
, reg
, NFI_CNFG
);
722 nfi_writel(nfc
, chip
->ecc
.steps
<< CON_SEC_SHIFT
, NFI_CON
);
723 nfi_writel(nfc
, lower_32_bits(addr
), NFI_STRADDR
);
724 nfi_writew(nfc
, INTR_AHB_DONE_EN
, NFI_INTR_EN
);
726 init_completion(&nfc
->done
);
728 reg
= nfi_readl(nfc
, NFI_CON
) | CON_BWR
;
729 nfi_writel(nfc
, reg
, NFI_CON
);
730 nfi_writew(nfc
, STAR_EN
, NFI_STRDATA
);
732 ret
= wait_for_completion_timeout(&nfc
->done
, msecs_to_jiffies(500));
734 dev_err(dev
, "program ahb done timeout\n");
735 nfi_writew(nfc
, 0, NFI_INTR_EN
);
740 ret
= readl_poll_timeout_atomic(nfc
->regs
+ NFI_ADDRCNTR
, reg
,
741 ADDRCNTR_SEC(reg
) >= chip
->ecc
.steps
,
744 dev_err(dev
, "hwecc write timeout\n");
748 dma_unmap_single(nfc
->dev
, addr
, len
, DMA_TO_DEVICE
);
749 nfi_writel(nfc
, 0, NFI_CON
);
754 static int mtk_nfc_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
755 const u8
*buf
, int page
, int raw
)
757 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
758 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
765 /* OOB => FDM: from register, ECC: from HW */
766 reg
= nfi_readw(nfc
, NFI_CNFG
) | CNFG_AUTO_FMT_EN
;
767 nfi_writew(nfc
, reg
| CNFG_HW_ECC_EN
, NFI_CNFG
);
769 nfc
->ecc_cfg
.op
= ECC_ENCODE
;
770 nfc
->ecc_cfg
.mode
= ECC_NFI_MODE
;
771 ret
= mtk_ecc_enable(nfc
->ecc
, &nfc
->ecc_cfg
);
773 /* clear NFI config */
774 reg
= nfi_readw(nfc
, NFI_CNFG
);
775 reg
&= ~(CNFG_AUTO_FMT_EN
| CNFG_HW_ECC_EN
);
776 nfi_writew(nfc
, reg
, NFI_CNFG
);
781 memcpy(nfc
->buffer
, buf
, mtd
->writesize
);
782 mtk_nand
->bad_mark
.bm_swap(mtd
, nfc
->buffer
, raw
);
783 bufpoi
= nfc
->buffer
;
785 /* write OOB into the FDM registers (OOB area in MTK NAND) */
786 mtk_nfc_write_fdm(chip
);
791 len
= mtd
->writesize
+ (raw
? mtd
->oobsize
: 0);
792 ret
= mtk_nfc_do_write_page(mtd
, chip
, bufpoi
, page
, len
);
795 mtk_ecc_disable(nfc
->ecc
);
800 static int mtk_nfc_write_page_hwecc(struct mtd_info
*mtd
,
801 struct nand_chip
*chip
, const u8
*buf
,
802 int oob_on
, int page
)
804 return mtk_nfc_write_page(mtd
, chip
, buf
, page
, 0);
807 static int mtk_nfc_write_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
808 const u8
*buf
, int oob_on
, int pg
)
810 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
812 mtk_nfc_format_page(mtd
, buf
);
813 return mtk_nfc_write_page(mtd
, chip
, nfc
->buffer
, pg
, 1);
816 static int mtk_nfc_write_subpage_hwecc(struct mtd_info
*mtd
,
817 struct nand_chip
*chip
, u32 offset
,
818 u32 data_len
, const u8
*buf
,
819 int oob_on
, int page
)
821 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
824 ret
= mtk_nfc_format_subpage(mtd
, offset
, data_len
, buf
);
828 /* use the data in the private buffer (now with FDM and CRC) */
829 return mtk_nfc_write_page(mtd
, chip
, nfc
->buffer
, page
, 1);
832 static int mtk_nfc_write_oob_std(struct mtd_info
*mtd
, struct nand_chip
*chip
,
837 chip
->cmdfunc(mtd
, NAND_CMD_SEQIN
, 0x00, page
);
839 ret
= mtk_nfc_write_page_raw(mtd
, chip
, NULL
, 1, page
);
843 chip
->cmdfunc(mtd
, NAND_CMD_PAGEPROG
, -1, -1);
844 ret
= chip
->waitfunc(mtd
, chip
);
846 return ret
& NAND_STATUS_FAIL
? -EIO
: 0;
849 static int mtk_nfc_update_ecc_stats(struct mtd_info
*mtd
, u8
*buf
, u32 sectors
)
851 struct nand_chip
*chip
= mtd_to_nand(mtd
);
852 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
853 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
854 struct mtk_ecc_stats stats
;
857 rc
= nfi_readl(nfc
, NFI_STA
) & STA_EMP_PAGE
;
859 memset(buf
, 0xff, sectors
* chip
->ecc
.size
);
860 for (i
= 0; i
< sectors
; i
++)
861 memset(oob_ptr(chip
, i
), 0xff, mtk_nand
->fdm
.reg_size
);
865 mtk_ecc_get_stats(nfc
->ecc
, &stats
, sectors
);
866 mtd
->ecc_stats
.corrected
+= stats
.corrected
;
867 mtd
->ecc_stats
.failed
+= stats
.failed
;
869 return stats
.bitflips
;
872 static int mtk_nfc_read_subpage(struct mtd_info
*mtd
, struct nand_chip
*chip
,
873 u32 data_offs
, u32 readlen
,
874 u8
*bufpoi
, int page
, int raw
)
876 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
877 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
878 u32 spare
= mtk_nand
->spare_per_sector
;
879 u32 column
, sectors
, start
, end
, reg
;
886 start
= data_offs
/ chip
->ecc
.size
;
887 end
= DIV_ROUND_UP(data_offs
+ readlen
, chip
->ecc
.size
);
889 sectors
= end
- start
;
890 column
= start
* (chip
->ecc
.size
+ spare
);
892 len
= sectors
* chip
->ecc
.size
+ (raw
? sectors
* spare
: 0);
893 buf
= bufpoi
+ start
* chip
->ecc
.size
;
896 chip
->cmdfunc(mtd
, NAND_CMD_RNDOUT
, column
, -1);
898 addr
= dma_map_single(nfc
->dev
, buf
, len
, DMA_FROM_DEVICE
);
899 rc
= dma_mapping_error(nfc
->dev
, addr
);
901 dev_err(nfc
->dev
, "dma mapping error\n");
906 reg
= nfi_readw(nfc
, NFI_CNFG
);
907 reg
|= CNFG_READ_EN
| CNFG_DMA_BURST_EN
| CNFG_AHB
;
909 reg
|= CNFG_AUTO_FMT_EN
| CNFG_HW_ECC_EN
;
910 nfi_writew(nfc
, reg
, NFI_CNFG
);
912 nfc
->ecc_cfg
.mode
= ECC_NFI_MODE
;
913 nfc
->ecc_cfg
.sectors
= sectors
;
914 nfc
->ecc_cfg
.op
= ECC_DECODE
;
915 rc
= mtk_ecc_enable(nfc
->ecc
, &nfc
->ecc_cfg
);
917 dev_err(nfc
->dev
, "ecc enable\n");
919 reg
&= ~(CNFG_DMA_BURST_EN
| CNFG_AHB
| CNFG_READ_EN
|
920 CNFG_AUTO_FMT_EN
| CNFG_HW_ECC_EN
);
921 nfi_writew(nfc
, reg
, NFI_CNFG
);
922 dma_unmap_single(nfc
->dev
, addr
, len
, DMA_FROM_DEVICE
);
927 nfi_writew(nfc
, reg
, NFI_CNFG
);
930 nfi_writel(nfc
, sectors
<< CON_SEC_SHIFT
, NFI_CON
);
931 nfi_writew(nfc
, INTR_AHB_DONE_EN
, NFI_INTR_EN
);
932 nfi_writel(nfc
, lower_32_bits(addr
), NFI_STRADDR
);
934 init_completion(&nfc
->done
);
935 reg
= nfi_readl(nfc
, NFI_CON
) | CON_BRD
;
936 nfi_writel(nfc
, reg
, NFI_CON
);
937 nfi_writew(nfc
, STAR_EN
, NFI_STRDATA
);
939 rc
= wait_for_completion_timeout(&nfc
->done
, msecs_to_jiffies(500));
941 dev_warn(nfc
->dev
, "read ahb/dma done timeout\n");
943 rc
= readl_poll_timeout_atomic(nfc
->regs
+ NFI_BYTELEN
, reg
,
944 ADDRCNTR_SEC(reg
) >= sectors
, 10,
947 dev_err(nfc
->dev
, "subpage done timeout\n");
952 rc
= mtk_ecc_wait_done(nfc
->ecc
, ECC_DECODE
);
953 bitflips
= rc
< 0 ? -ETIMEDOUT
:
954 mtk_nfc_update_ecc_stats(mtd
, buf
, sectors
);
955 mtk_nfc_read_fdm(chip
, start
, sectors
);
959 dma_unmap_single(nfc
->dev
, addr
, len
, DMA_FROM_DEVICE
);
964 mtk_ecc_disable(nfc
->ecc
);
966 if (clamp(mtk_nand
->bad_mark
.sec
, start
, end
) == mtk_nand
->bad_mark
.sec
)
967 mtk_nand
->bad_mark
.bm_swap(mtd
, bufpoi
, raw
);
969 nfi_writel(nfc
, 0, NFI_CON
);
974 static int mtk_nfc_read_subpage_hwecc(struct mtd_info
*mtd
,
975 struct nand_chip
*chip
, u32 off
,
976 u32 len
, u8
*p
, int pg
)
978 return mtk_nfc_read_subpage(mtd
, chip
, off
, len
, p
, pg
, 0);
981 static int mtk_nfc_read_page_hwecc(struct mtd_info
*mtd
,
982 struct nand_chip
*chip
, u8
*p
,
985 return mtk_nfc_read_subpage(mtd
, chip
, 0, mtd
->writesize
, p
, pg
, 0);
988 static int mtk_nfc_read_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
989 u8
*buf
, int oob_on
, int page
)
991 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
992 struct mtk_nfc
*nfc
= nand_get_controller_data(chip
);
993 struct mtk_nfc_fdm
*fdm
= &mtk_nand
->fdm
;
996 memset(nfc
->buffer
, 0xff, mtd
->writesize
+ mtd
->oobsize
);
997 ret
= mtk_nfc_read_subpage(mtd
, chip
, 0, mtd
->writesize
, nfc
->buffer
,
1002 for (i
= 0; i
< chip
->ecc
.steps
; i
++) {
1003 memcpy(oob_ptr(chip
, i
), mtk_oob_ptr(chip
, i
), fdm
->reg_size
);
1005 if (i
== mtk_nand
->bad_mark
.sec
)
1006 mtk_nand
->bad_mark
.bm_swap(mtd
, nfc
->buffer
, 1);
1009 memcpy(data_ptr(chip
, buf
, i
), mtk_data_ptr(chip
, i
),
1016 static int mtk_nfc_read_oob_std(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1019 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, 0, page
);
1021 return mtk_nfc_read_page_raw(mtd
, chip
, NULL
, 1, page
);
1024 static inline void mtk_nfc_hw_init(struct mtk_nfc
*nfc
)
1027 * CNRNB: nand ready/busy register
1028 * -------------------------------
1029 * 7:4: timeout register for polling the NAND busy/ready signal
1030 * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
1032 nfi_writew(nfc
, 0xf1, NFI_CNRNB
);
1033 nfi_writel(nfc
, PAGEFMT_8K_16K
, NFI_PAGEFMT
);
1035 mtk_nfc_hw_reset(nfc
);
1037 nfi_readl(nfc
, NFI_INTR_STA
);
1038 nfi_writel(nfc
, 0, NFI_INTR_EN
);
1041 static irqreturn_t
mtk_nfc_irq(int irq
, void *id
)
1043 struct mtk_nfc
*nfc
= id
;
1046 sta
= nfi_readw(nfc
, NFI_INTR_STA
);
1047 ien
= nfi_readw(nfc
, NFI_INTR_EN
);
1052 nfi_writew(nfc
, ~sta
& ien
, NFI_INTR_EN
);
1053 complete(&nfc
->done
);
1058 static int mtk_nfc_enable_clk(struct device
*dev
, struct mtk_nfc_clk
*clk
)
1062 ret
= clk_prepare_enable(clk
->nfi_clk
);
1064 dev_err(dev
, "failed to enable nfi clk\n");
1068 ret
= clk_prepare_enable(clk
->pad_clk
);
1070 dev_err(dev
, "failed to enable pad clk\n");
1071 clk_disable_unprepare(clk
->nfi_clk
);
1078 static void mtk_nfc_disable_clk(struct mtk_nfc_clk
*clk
)
1080 clk_disable_unprepare(clk
->nfi_clk
);
1081 clk_disable_unprepare(clk
->pad_clk
);
1084 static int mtk_nfc_ooblayout_free(struct mtd_info
*mtd
, int section
,
1085 struct mtd_oob_region
*oob_region
)
1087 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1088 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
1089 struct mtk_nfc_fdm
*fdm
= &mtk_nand
->fdm
;
1092 eccsteps
= mtd
->writesize
/ chip
->ecc
.size
;
1094 if (section
>= eccsteps
)
1097 oob_region
->length
= fdm
->reg_size
- fdm
->ecc_size
;
1098 oob_region
->offset
= section
* fdm
->reg_size
+ fdm
->ecc_size
;
1103 static int mtk_nfc_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
1104 struct mtd_oob_region
*oob_region
)
1106 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1107 struct mtk_nfc_nand_chip
*mtk_nand
= to_mtk_nand(chip
);
1113 eccsteps
= mtd
->writesize
/ chip
->ecc
.size
;
1114 oob_region
->offset
= mtk_nand
->fdm
.reg_size
* eccsteps
;
1115 oob_region
->length
= mtd
->oobsize
- oob_region
->offset
;
1120 static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops
= {
1121 .free
= mtk_nfc_ooblayout_free
,
1122 .ecc
= mtk_nfc_ooblayout_ecc
,
1125 static void mtk_nfc_set_fdm(struct mtk_nfc_fdm
*fdm
, struct mtd_info
*mtd
)
1127 struct nand_chip
*nand
= mtd_to_nand(mtd
);
1128 struct mtk_nfc_nand_chip
*chip
= to_mtk_nand(nand
);
1131 ecc_bytes
= DIV_ROUND_UP(nand
->ecc
.strength
* ECC_PARITY_BITS
, 8);
1133 fdm
->reg_size
= chip
->spare_per_sector
- ecc_bytes
;
1134 if (fdm
->reg_size
> NFI_FDM_MAX_SIZE
)
1135 fdm
->reg_size
= NFI_FDM_MAX_SIZE
;
1137 /* bad block mark storage */
1141 static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl
*bm_ctl
,
1142 struct mtd_info
*mtd
)
1144 struct nand_chip
*nand
= mtd_to_nand(mtd
);
1146 if (mtd
->writesize
== 512) {
1147 bm_ctl
->bm_swap
= mtk_nfc_no_bad_mark_swap
;
1149 bm_ctl
->bm_swap
= mtk_nfc_bad_mark_swap
;
1150 bm_ctl
->sec
= mtd
->writesize
/ mtk_data_len(nand
);
1151 bm_ctl
->pos
= mtd
->writesize
% mtk_data_len(nand
);
1155 static int mtk_nfc_set_spare_per_sector(u32
*sps
, struct mtd_info
*mtd
)
1157 struct nand_chip
*nand
= mtd_to_nand(mtd
);
1158 struct mtk_nfc
*nfc
= nand_get_controller_data(nand
);
1159 const u8
*spare
= nfc
->caps
->spare_size
;
1160 u32 eccsteps
, i
, closest_spare
= 0;
1162 eccsteps
= mtd
->writesize
/ nand
->ecc
.size
;
1163 *sps
= mtd
->oobsize
/ eccsteps
;
1165 if (nand
->ecc
.size
== 1024)
1168 if (*sps
< MTK_NFC_MIN_SPARE
)
1171 for (i
= 0; i
< nfc
->caps
->num_spare_size
; i
++) {
1172 if (*sps
>= spare
[i
] && spare
[i
] >= spare
[closest_spare
]) {
1174 if (*sps
== spare
[i
])
1179 *sps
= spare
[closest_spare
];
1181 if (nand
->ecc
.size
== 1024)
1187 static int mtk_nfc_ecc_init(struct device
*dev
, struct mtd_info
*mtd
)
1189 struct nand_chip
*nand
= mtd_to_nand(mtd
);
1190 struct mtk_nfc
*nfc
= nand_get_controller_data(nand
);
1194 /* support only ecc hw mode */
1195 if (nand
->ecc
.mode
!= NAND_ECC_HW
) {
1196 dev_err(dev
, "ecc.mode not supported\n");
1200 /* if optional dt settings not present */
1201 if (!nand
->ecc
.size
|| !nand
->ecc
.strength
) {
1202 /* use datasheet requirements */
1203 nand
->ecc
.strength
= nand
->ecc_strength_ds
;
1204 nand
->ecc
.size
= nand
->ecc_step_ds
;
1207 * align eccstrength and eccsize
1208 * this controller only supports 512 and 1024 sizes
1210 if (nand
->ecc
.size
< 1024) {
1211 if (mtd
->writesize
> 512) {
1212 nand
->ecc
.size
= 1024;
1213 nand
->ecc
.strength
<<= 1;
1215 nand
->ecc
.size
= 512;
1218 nand
->ecc
.size
= 1024;
1221 ret
= mtk_nfc_set_spare_per_sector(&spare
, mtd
);
1225 /* calculate oob bytes except ecc parity data */
1226 free
= ((nand
->ecc
.strength
* ECC_PARITY_BITS
) + 7) >> 3;
1227 free
= spare
- free
;
1230 * enhance ecc strength if oob left is bigger than max FDM size
1231 * or reduce ecc strength if oob size is not enough for ecc
1234 if (free
> NFI_FDM_MAX_SIZE
) {
1235 spare
-= NFI_FDM_MAX_SIZE
;
1236 nand
->ecc
.strength
= (spare
<< 3) / ECC_PARITY_BITS
;
1237 } else if (free
< 0) {
1238 spare
-= NFI_FDM_MIN_SIZE
;
1239 nand
->ecc
.strength
= (spare
<< 3) / ECC_PARITY_BITS
;
1243 mtk_ecc_adjust_strength(nfc
->ecc
, &nand
->ecc
.strength
);
1245 dev_info(dev
, "eccsize %d eccstrength %d\n",
1246 nand
->ecc
.size
, nand
->ecc
.strength
);
1251 static int mtk_nfc_nand_chip_init(struct device
*dev
, struct mtk_nfc
*nfc
,
1252 struct device_node
*np
)
1254 struct mtk_nfc_nand_chip
*chip
;
1255 struct nand_chip
*nand
;
1256 struct mtd_info
*mtd
;
1262 if (!of_get_property(np
, "reg", &nsels
))
1265 nsels
/= sizeof(u32
);
1266 if (!nsels
|| nsels
> MTK_NAND_MAX_NSELS
) {
1267 dev_err(dev
, "invalid reg property size %d\n", nsels
);
1271 chip
= devm_kzalloc(dev
, sizeof(*chip
) + nsels
* sizeof(u8
),
1276 chip
->nsels
= nsels
;
1277 for (i
= 0; i
< nsels
; i
++) {
1278 ret
= of_property_read_u32_index(np
, "reg", i
, &tmp
);
1280 dev_err(dev
, "reg property failure : %d\n", ret
);
1283 chip
->sels
[i
] = tmp
;
1287 nand
->controller
= &nfc
->controller
;
1289 nand_set_flash_node(nand
, np
);
1290 nand_set_controller_data(nand
, nfc
);
1292 nand
->options
|= NAND_USE_BOUNCE_BUFFER
| NAND_SUBPAGE_READ
;
1293 nand
->dev_ready
= mtk_nfc_dev_ready
;
1294 nand
->select_chip
= mtk_nfc_select_chip
;
1295 nand
->write_byte
= mtk_nfc_write_byte
;
1296 nand
->write_buf
= mtk_nfc_write_buf
;
1297 nand
->read_byte
= mtk_nfc_read_byte
;
1298 nand
->read_buf
= mtk_nfc_read_buf
;
1299 nand
->cmd_ctrl
= mtk_nfc_cmd_ctrl
;
1300 nand
->setup_data_interface
= mtk_nfc_setup_data_interface
;
1302 /* set default mode in case dt entry is missing */
1303 nand
->ecc
.mode
= NAND_ECC_HW
;
1305 nand
->ecc
.write_subpage
= mtk_nfc_write_subpage_hwecc
;
1306 nand
->ecc
.write_page_raw
= mtk_nfc_write_page_raw
;
1307 nand
->ecc
.write_page
= mtk_nfc_write_page_hwecc
;
1308 nand
->ecc
.write_oob_raw
= mtk_nfc_write_oob_std
;
1309 nand
->ecc
.write_oob
= mtk_nfc_write_oob_std
;
1311 nand
->ecc
.read_subpage
= mtk_nfc_read_subpage_hwecc
;
1312 nand
->ecc
.read_page_raw
= mtk_nfc_read_page_raw
;
1313 nand
->ecc
.read_page
= mtk_nfc_read_page_hwecc
;
1314 nand
->ecc
.read_oob_raw
= mtk_nfc_read_oob_std
;
1315 nand
->ecc
.read_oob
= mtk_nfc_read_oob_std
;
1317 mtd
= nand_to_mtd(nand
);
1318 mtd
->owner
= THIS_MODULE
;
1319 mtd
->dev
.parent
= dev
;
1320 mtd
->name
= MTK_NAME
;
1321 mtd_set_ooblayout(mtd
, &mtk_nfc_ooblayout_ops
);
1323 mtk_nfc_hw_init(nfc
);
1325 ret
= nand_scan_ident(mtd
, nsels
, NULL
);
1329 /* store bbt magic in page, cause OOB is not protected */
1330 if (nand
->bbt_options
& NAND_BBT_USE_FLASH
)
1331 nand
->bbt_options
|= NAND_BBT_NO_OOB
;
1333 ret
= mtk_nfc_ecc_init(dev
, mtd
);
1337 if (nand
->options
& NAND_BUSWIDTH_16
) {
1338 dev_err(dev
, "16bits buswidth not supported");
1342 ret
= mtk_nfc_set_spare_per_sector(&chip
->spare_per_sector
, mtd
);
1346 mtk_nfc_set_fdm(&chip
->fdm
, mtd
);
1347 mtk_nfc_set_bad_mark_ctl(&chip
->bad_mark
, mtd
);
1349 len
= mtd
->writesize
+ mtd
->oobsize
;
1350 nfc
->buffer
= devm_kzalloc(dev
, len
, GFP_KERNEL
);
1354 ret
= nand_scan_tail(mtd
);
1358 ret
= mtd_device_parse_register(mtd
, NULL
, NULL
, NULL
, 0);
1360 dev_err(dev
, "mtd parse partition error\n");
1365 list_add_tail(&chip
->node
, &nfc
->chips
);
1370 static int mtk_nfc_nand_chips_init(struct device
*dev
, struct mtk_nfc
*nfc
)
1372 struct device_node
*np
= dev
->of_node
;
1373 struct device_node
*nand_np
;
1376 for_each_child_of_node(np
, nand_np
) {
1377 ret
= mtk_nfc_nand_chip_init(dev
, nfc
, nand_np
);
1379 of_node_put(nand_np
);
1387 static const struct mtk_nfc_caps mtk_nfc_caps_mt2701
= {
1388 .spare_size
= spare_size_mt2701
,
1389 .num_spare_size
= 16,
1390 .pageformat_spare_shift
= 4,
1394 static const struct mtk_nfc_caps mtk_nfc_caps_mt2712
= {
1395 .spare_size
= spare_size_mt2712
,
1396 .num_spare_size
= 19,
1397 .pageformat_spare_shift
= 16,
1401 static const struct of_device_id mtk_nfc_id_table
[] = {
1403 .compatible
= "mediatek,mt2701-nfc",
1404 .data
= &mtk_nfc_caps_mt2701
,
1406 .compatible
= "mediatek,mt2712-nfc",
1407 .data
= &mtk_nfc_caps_mt2712
,
1411 MODULE_DEVICE_TABLE(of
, mtk_nfc_id_table
);
1413 static int mtk_nfc_probe(struct platform_device
*pdev
)
1415 struct device
*dev
= &pdev
->dev
;
1416 struct device_node
*np
= dev
->of_node
;
1417 struct mtk_nfc
*nfc
;
1418 struct resource
*res
;
1419 const struct of_device_id
*of_nfc_id
= NULL
;
1422 nfc
= devm_kzalloc(dev
, sizeof(*nfc
), GFP_KERNEL
);
1426 spin_lock_init(&nfc
->controller
.lock
);
1427 init_waitqueue_head(&nfc
->controller
.wq
);
1428 INIT_LIST_HEAD(&nfc
->chips
);
1430 /* probe defer if not ready */
1431 nfc
->ecc
= of_mtk_ecc_get(np
);
1432 if (IS_ERR(nfc
->ecc
))
1433 return PTR_ERR(nfc
->ecc
);
1439 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1440 nfc
->regs
= devm_ioremap_resource(dev
, res
);
1441 if (IS_ERR(nfc
->regs
)) {
1442 ret
= PTR_ERR(nfc
->regs
);
1446 nfc
->clk
.nfi_clk
= devm_clk_get(dev
, "nfi_clk");
1447 if (IS_ERR(nfc
->clk
.nfi_clk
)) {
1448 dev_err(dev
, "no clk\n");
1449 ret
= PTR_ERR(nfc
->clk
.nfi_clk
);
1453 nfc
->clk
.pad_clk
= devm_clk_get(dev
, "pad_clk");
1454 if (IS_ERR(nfc
->clk
.pad_clk
)) {
1455 dev_err(dev
, "no pad clk\n");
1456 ret
= PTR_ERR(nfc
->clk
.pad_clk
);
1460 ret
= mtk_nfc_enable_clk(dev
, &nfc
->clk
);
1464 irq
= platform_get_irq(pdev
, 0);
1466 dev_err(dev
, "no nfi irq resource\n");
1471 ret
= devm_request_irq(dev
, irq
, mtk_nfc_irq
, 0x0, "mtk-nand", nfc
);
1473 dev_err(dev
, "failed to request nfi irq\n");
1477 ret
= dma_set_mask(dev
, DMA_BIT_MASK(32));
1479 dev_err(dev
, "failed to set dma mask\n");
1483 of_nfc_id
= of_match_device(mtk_nfc_id_table
, &pdev
->dev
);
1489 nfc
->caps
= of_nfc_id
->data
;
1491 platform_set_drvdata(pdev
, nfc
);
1493 ret
= mtk_nfc_nand_chips_init(dev
, nfc
);
1495 dev_err(dev
, "failed to init nand chips\n");
1502 mtk_nfc_disable_clk(&nfc
->clk
);
1505 mtk_ecc_release(nfc
->ecc
);
1510 static int mtk_nfc_remove(struct platform_device
*pdev
)
1512 struct mtk_nfc
*nfc
= platform_get_drvdata(pdev
);
1513 struct mtk_nfc_nand_chip
*chip
;
1515 while (!list_empty(&nfc
->chips
)) {
1516 chip
= list_first_entry(&nfc
->chips
, struct mtk_nfc_nand_chip
,
1518 nand_release(nand_to_mtd(&chip
->nand
));
1519 list_del(&chip
->node
);
1522 mtk_ecc_release(nfc
->ecc
);
1523 mtk_nfc_disable_clk(&nfc
->clk
);
1528 #ifdef CONFIG_PM_SLEEP
1529 static int mtk_nfc_suspend(struct device
*dev
)
1531 struct mtk_nfc
*nfc
= dev_get_drvdata(dev
);
1533 mtk_nfc_disable_clk(&nfc
->clk
);
1538 static int mtk_nfc_resume(struct device
*dev
)
1540 struct mtk_nfc
*nfc
= dev_get_drvdata(dev
);
1541 struct mtk_nfc_nand_chip
*chip
;
1542 struct nand_chip
*nand
;
1543 struct mtd_info
*mtd
;
1549 ret
= mtk_nfc_enable_clk(dev
, &nfc
->clk
);
1553 /* reset NAND chip if VCC was powered off */
1554 list_for_each_entry(chip
, &nfc
->chips
, node
) {
1556 mtd
= nand_to_mtd(nand
);
1557 for (i
= 0; i
< chip
->nsels
; i
++) {
1558 nand
->select_chip(mtd
, i
);
1559 nand
->cmdfunc(mtd
, NAND_CMD_RESET
, -1, -1);
1566 static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops
, mtk_nfc_suspend
, mtk_nfc_resume
);
1569 static struct platform_driver mtk_nfc_driver
= {
1570 .probe
= mtk_nfc_probe
,
1571 .remove
= mtk_nfc_remove
,
1574 .of_match_table
= mtk_nfc_id_table
,
1575 #ifdef CONFIG_PM_SLEEP
1576 .pm
= &mtk_nfc_pm_ops
,
1581 module_platform_driver(mtk_nfc_driver
);
1583 MODULE_LICENSE("GPL");
1584 MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
1585 MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");