1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Stefan Agner <stefan@agner.ch>
4 * Copyright (C) 2014-2015 Lucas Stach <dev@lynxeye.de>
5 * Copyright (C) 2012 Avionic Design GmbH
9 #include <linux/completion.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/gpio/consumer.h>
13 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/mtd/partitions.h>
17 #include <linux/mtd/rawnand.h>
19 #include <linux/platform_device.h>
20 #include <linux/reset.h>
23 #define COMMAND_GO BIT(31)
24 #define COMMAND_CLE BIT(30)
25 #define COMMAND_ALE BIT(29)
26 #define COMMAND_PIO BIT(28)
27 #define COMMAND_TX BIT(27)
28 #define COMMAND_RX BIT(26)
29 #define COMMAND_SEC_CMD BIT(25)
30 #define COMMAND_AFT_DAT BIT(24)
31 #define COMMAND_TRANS_SIZE(size) ((((size) - 1) & 0xf) << 20)
32 #define COMMAND_A_VALID BIT(19)
33 #define COMMAND_B_VALID BIT(18)
34 #define COMMAND_RD_STATUS_CHK BIT(17)
35 #define COMMAND_RBSY_CHK BIT(16)
36 #define COMMAND_CE(x) BIT(8 + ((x) & 0x7))
37 #define COMMAND_CLE_SIZE(size) ((((size) - 1) & 0x3) << 4)
38 #define COMMAND_ALE_SIZE(size) ((((size) - 1) & 0xf) << 0)
43 #define ISR_CORRFAIL_ERR BIT(24)
44 #define ISR_UND BIT(7)
45 #define ISR_OVR BIT(6)
46 #define ISR_CMD_DONE BIT(5)
47 #define ISR_ECC_ERR BIT(4)
50 #define IER_ERR_TRIG_VAL(x) (((x) & 0xf) << 16)
51 #define IER_UND BIT(7)
52 #define IER_OVR BIT(6)
53 #define IER_CMD_DONE BIT(5)
54 #define IER_ECC_ERR BIT(4)
55 #define IER_GIE BIT(0)
58 #define CONFIG_HW_ECC BIT(31)
59 #define CONFIG_ECC_SEL BIT(30)
60 #define CONFIG_ERR_COR BIT(29)
61 #define CONFIG_PIPE_EN BIT(28)
62 #define CONFIG_TVAL_4 (0 << 24)
63 #define CONFIG_TVAL_6 (1 << 24)
64 #define CONFIG_TVAL_8 (2 << 24)
65 #define CONFIG_SKIP_SPARE BIT(23)
66 #define CONFIG_BUS_WIDTH_16 BIT(21)
67 #define CONFIG_COM_BSY BIT(20)
68 #define CONFIG_PS_256 (0 << 16)
69 #define CONFIG_PS_512 (1 << 16)
70 #define CONFIG_PS_1024 (2 << 16)
71 #define CONFIG_PS_2048 (3 << 16)
72 #define CONFIG_PS_4096 (4 << 16)
73 #define CONFIG_SKIP_SPARE_SIZE_4 (0 << 14)
74 #define CONFIG_SKIP_SPARE_SIZE_8 (1 << 14)
75 #define CONFIG_SKIP_SPARE_SIZE_12 (2 << 14)
76 #define CONFIG_SKIP_SPARE_SIZE_16 (3 << 14)
77 #define CONFIG_TAG_BYTE_SIZE(x) ((x) & 0xff)
80 #define TIMING_TRP_RESP(x) (((x) & 0xf) << 28)
81 #define TIMING_TWB(x) (((x) & 0xf) << 24)
82 #define TIMING_TCR_TAR_TRR(x) (((x) & 0xf) << 20)
83 #define TIMING_TWHR(x) (((x) & 0xf) << 16)
84 #define TIMING_TCS(x) (((x) & 0x3) << 14)
85 #define TIMING_TWH(x) (((x) & 0x3) << 12)
86 #define TIMING_TWP(x) (((x) & 0xf) << 8)
87 #define TIMING_TRH(x) (((x) & 0x3) << 4)
88 #define TIMING_TRP(x) (((x) & 0xf) << 0)
93 #define TIMING_TADL(x) ((x) & 0xf)
97 #define ADDR_REG1 0x28
98 #define ADDR_REG2 0x2c
100 #define DMA_MST_CTRL 0x30
101 #define DMA_MST_CTRL_GO BIT(31)
102 #define DMA_MST_CTRL_IN (0 << 30)
103 #define DMA_MST_CTRL_OUT BIT(30)
104 #define DMA_MST_CTRL_PERF_EN BIT(29)
105 #define DMA_MST_CTRL_IE_DONE BIT(28)
106 #define DMA_MST_CTRL_REUSE BIT(27)
107 #define DMA_MST_CTRL_BURST_1 (2 << 24)
108 #define DMA_MST_CTRL_BURST_4 (3 << 24)
109 #define DMA_MST_CTRL_BURST_8 (4 << 24)
110 #define DMA_MST_CTRL_BURST_16 (5 << 24)
111 #define DMA_MST_CTRL_IS_DONE BIT(20)
112 #define DMA_MST_CTRL_EN_A BIT(2)
113 #define DMA_MST_CTRL_EN_B BIT(1)
115 #define DMA_CFG_A 0x34
116 #define DMA_CFG_B 0x38
118 #define FIFO_CTRL 0x3c
119 #define FIFO_CTRL_CLR_ALL BIT(3)
121 #define DATA_PTR 0x40
125 #define DEC_STATUS 0x4c
126 #define DEC_STATUS_A_ECC_FAIL BIT(1)
127 #define DEC_STATUS_ERR_COUNT_MASK 0x00ff0000
128 #define DEC_STATUS_ERR_COUNT_SHIFT 16
130 #define HWSTATUS_CMD 0x50
131 #define HWSTATUS_MASK 0x54
132 #define HWSTATUS_RDSTATUS_MASK(x) (((x) & 0xff) << 24)
133 #define HWSTATUS_RDSTATUS_VALUE(x) (((x) & 0xff) << 16)
134 #define HWSTATUS_RBSY_MASK(x) (((x) & 0xff) << 8)
135 #define HWSTATUS_RBSY_VALUE(x) (((x) & 0xff) << 0)
137 #define BCH_CONFIG 0xcc
138 #define BCH_ENABLE BIT(0)
139 #define BCH_TVAL_4 (0 << 4)
140 #define BCH_TVAL_8 (1 << 4)
141 #define BCH_TVAL_14 (2 << 4)
142 #define BCH_TVAL_16 (3 << 4)
144 #define DEC_STAT_RESULT 0xd0
145 #define DEC_STAT_BUF 0xd4
146 #define DEC_STAT_BUF_FAIL_SEC_FLAG_MASK 0xff000000
147 #define DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT 24
148 #define DEC_STAT_BUF_CORR_SEC_FLAG_MASK 0x00ff0000
149 #define DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT 16
150 #define DEC_STAT_BUF_MAX_CORR_CNT_MASK 0x00001f00
151 #define DEC_STAT_BUF_MAX_CORR_CNT_SHIFT 8
153 #define OFFSET(val, off) ((val) < (off) ? 0 : (val) - (off))
155 #define SKIP_SPARE_BYTES 4
156 #define BITS_PER_STEP_RS 18
157 #define BITS_PER_STEP_BCH 13
159 #define INT_MASK (IER_UND | IER_OVR | IER_CMD_DONE | IER_GIE)
160 #define HWSTATUS_CMD_DEFAULT NAND_STATUS_READY
161 #define HWSTATUS_MASK_DEFAULT (HWSTATUS_RDSTATUS_MASK(1) | \
162 HWSTATUS_RDSTATUS_VALUE(0) | \
163 HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | \
164 HWSTATUS_RBSY_VALUE(NAND_STATUS_READY))
166 struct tegra_nand_controller
{
167 struct nand_controller controller
;
172 struct completion command_complete
;
173 struct completion dma_complete
;
174 bool last_read_error
;
176 struct nand_chip
*chip
;
179 struct tegra_nand_chip
{
180 struct nand_chip chip
;
181 struct gpio_desc
*wp_gpio
;
182 struct mtd_oob_region ecc
;
189 static inline struct tegra_nand_controller
*
190 to_tegra_ctrl(struct nand_controller
*hw_ctrl
)
192 return container_of(hw_ctrl
, struct tegra_nand_controller
, controller
);
195 static inline struct tegra_nand_chip
*to_tegra_chip(struct nand_chip
*chip
)
197 return container_of(chip
, struct tegra_nand_chip
, chip
);
200 static int tegra_nand_ooblayout_rs_ecc(struct mtd_info
*mtd
, int section
,
201 struct mtd_oob_region
*oobregion
)
203 struct nand_chip
*chip
= mtd_to_nand(mtd
);
204 int bytes_per_step
= DIV_ROUND_UP(BITS_PER_STEP_RS
* chip
->ecc
.strength
,
210 oobregion
->offset
= SKIP_SPARE_BYTES
;
211 oobregion
->length
= round_up(bytes_per_step
* chip
->ecc
.steps
, 4);
216 static int tegra_nand_ooblayout_no_free(struct mtd_info
*mtd
, int section
,
217 struct mtd_oob_region
*oobregion
)
222 static const struct mtd_ooblayout_ops tegra_nand_oob_rs_ops
= {
223 .ecc
= tegra_nand_ooblayout_rs_ecc
,
224 .free
= tegra_nand_ooblayout_no_free
,
227 static int tegra_nand_ooblayout_bch_ecc(struct mtd_info
*mtd
, int section
,
228 struct mtd_oob_region
*oobregion
)
230 struct nand_chip
*chip
= mtd_to_nand(mtd
);
231 int bytes_per_step
= DIV_ROUND_UP(BITS_PER_STEP_BCH
* chip
->ecc
.strength
,
237 oobregion
->offset
= SKIP_SPARE_BYTES
;
238 oobregion
->length
= round_up(bytes_per_step
* chip
->ecc
.steps
, 4);
243 static const struct mtd_ooblayout_ops tegra_nand_oob_bch_ops
= {
244 .ecc
= tegra_nand_ooblayout_bch_ecc
,
245 .free
= tegra_nand_ooblayout_no_free
,
248 static irqreturn_t
tegra_nand_irq(int irq
, void *data
)
250 struct tegra_nand_controller
*ctrl
= data
;
253 isr
= readl_relaxed(ctrl
->regs
+ ISR
);
254 dma
= readl_relaxed(ctrl
->regs
+ DMA_MST_CTRL
);
255 dev_dbg(ctrl
->dev
, "isr %08x\n", isr
);
257 if (!isr
&& !(dma
& DMA_MST_CTRL_IS_DONE
))
261 * The bit name is somewhat missleading: This is also set when
262 * HW ECC was successful. The data sheet states:
263 * Correctable OR Un-correctable errors occurred in the DMA transfer...
265 if (isr
& ISR_CORRFAIL_ERR
)
266 ctrl
->last_read_error
= true;
268 if (isr
& ISR_CMD_DONE
)
269 complete(&ctrl
->command_complete
);
272 dev_err(ctrl
->dev
, "FIFO underrun\n");
275 dev_err(ctrl
->dev
, "FIFO overrun\n");
277 /* handle DMA interrupts */
278 if (dma
& DMA_MST_CTRL_IS_DONE
) {
279 writel_relaxed(dma
, ctrl
->regs
+ DMA_MST_CTRL
);
280 complete(&ctrl
->dma_complete
);
283 /* clear interrupts */
284 writel_relaxed(isr
, ctrl
->regs
+ ISR
);
289 static const char * const tegra_nand_reg_names
[] = {
308 static void tegra_nand_dump_reg(struct tegra_nand_controller
*ctrl
)
313 dev_err(ctrl
->dev
, "Tegra NAND controller register dump\n");
314 for (i
= 0; i
< ARRAY_SIZE(tegra_nand_reg_names
); i
++) {
315 const char *reg_name
= tegra_nand_reg_names
[i
];
320 reg
= readl_relaxed(ctrl
->regs
+ (i
* 4));
321 dev_err(ctrl
->dev
, "%s: 0x%08x\n", reg_name
, reg
);
325 static void tegra_nand_controller_abort(struct tegra_nand_controller
*ctrl
)
329 disable_irq(ctrl
->irq
);
331 /* Abort current command/DMA operation */
332 writel_relaxed(0, ctrl
->regs
+ DMA_MST_CTRL
);
333 writel_relaxed(0, ctrl
->regs
+ COMMAND
);
335 /* clear interrupts */
336 isr
= readl_relaxed(ctrl
->regs
+ ISR
);
337 writel_relaxed(isr
, ctrl
->regs
+ ISR
);
338 dma
= readl_relaxed(ctrl
->regs
+ DMA_MST_CTRL
);
339 writel_relaxed(dma
, ctrl
->regs
+ DMA_MST_CTRL
);
341 reinit_completion(&ctrl
->command_complete
);
342 reinit_completion(&ctrl
->dma_complete
);
344 enable_irq(ctrl
->irq
);
347 static int tegra_nand_cmd(struct nand_chip
*chip
,
348 const struct nand_subop
*subop
)
350 const struct nand_op_instr
*instr
;
351 const struct nand_op_instr
*instr_data_in
= NULL
;
352 struct tegra_nand_controller
*ctrl
= to_tegra_ctrl(chip
->controller
);
353 unsigned int op_id
, size
= 0, offset
= 0;
354 bool first_cmd
= true;
358 for (op_id
= 0; op_id
< subop
->ninstrs
; op_id
++) {
359 unsigned int naddrs
, i
;
361 u32 addr1
= 0, addr2
= 0;
363 instr
= &subop
->instrs
[op_id
];
365 switch (instr
->type
) {
366 case NAND_OP_CMD_INSTR
:
369 writel_relaxed(instr
->ctx
.cmd
.opcode
,
370 ctrl
->regs
+ CMD_REG1
);
372 cmd
|= COMMAND_SEC_CMD
;
373 writel_relaxed(instr
->ctx
.cmd
.opcode
,
374 ctrl
->regs
+ CMD_REG2
);
379 case NAND_OP_ADDR_INSTR
:
380 offset
= nand_subop_get_addr_start_off(subop
, op_id
);
381 naddrs
= nand_subop_get_num_addr_cyc(subop
, op_id
);
382 addrs
= &instr
->ctx
.addr
.addrs
[offset
];
384 cmd
|= COMMAND_ALE
| COMMAND_ALE_SIZE(naddrs
);
385 for (i
= 0; i
< min_t(unsigned int, 4, naddrs
); i
++)
386 addr1
|= *addrs
++ << (BITS_PER_BYTE
* i
);
388 for (i
= 0; i
< min_t(unsigned int, 4, naddrs
); i
++)
389 addr2
|= *addrs
++ << (BITS_PER_BYTE
* i
);
391 writel_relaxed(addr1
, ctrl
->regs
+ ADDR_REG1
);
392 writel_relaxed(addr2
, ctrl
->regs
+ ADDR_REG2
);
395 case NAND_OP_DATA_IN_INSTR
:
396 size
= nand_subop_get_data_len(subop
, op_id
);
397 offset
= nand_subop_get_data_start_off(subop
, op_id
);
399 cmd
|= COMMAND_TRANS_SIZE(size
) | COMMAND_PIO
|
400 COMMAND_RX
| COMMAND_A_VALID
;
402 instr_data_in
= instr
;
405 case NAND_OP_DATA_OUT_INSTR
:
406 size
= nand_subop_get_data_len(subop
, op_id
);
407 offset
= nand_subop_get_data_start_off(subop
, op_id
);
409 cmd
|= COMMAND_TRANS_SIZE(size
) | COMMAND_PIO
|
410 COMMAND_TX
| COMMAND_A_VALID
;
411 memcpy(®
, instr
->ctx
.data
.buf
.out
+ offset
, size
);
413 writel_relaxed(reg
, ctrl
->regs
+ RESP
);
416 case NAND_OP_WAITRDY_INSTR
:
417 cmd
|= COMMAND_RBSY_CHK
;
422 cmd
|= COMMAND_GO
| COMMAND_CE(ctrl
->cur_cs
);
423 writel_relaxed(cmd
, ctrl
->regs
+ COMMAND
);
424 ret
= wait_for_completion_timeout(&ctrl
->command_complete
,
425 msecs_to_jiffies(500));
427 dev_err(ctrl
->dev
, "COMMAND timeout\n");
428 tegra_nand_dump_reg(ctrl
);
429 tegra_nand_controller_abort(ctrl
);
434 reg
= readl_relaxed(ctrl
->regs
+ RESP
);
435 memcpy(instr_data_in
->ctx
.data
.buf
.in
+ offset
, ®
, size
);
441 static const struct nand_op_parser tegra_nand_op_parser
= NAND_OP_PARSER(
442 NAND_OP_PARSER_PATTERN(tegra_nand_cmd
,
443 NAND_OP_PARSER_PAT_CMD_ELEM(true),
444 NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
445 NAND_OP_PARSER_PAT_CMD_ELEM(true),
446 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
447 NAND_OP_PARSER_PATTERN(tegra_nand_cmd
,
448 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 4)),
449 NAND_OP_PARSER_PATTERN(tegra_nand_cmd
,
450 NAND_OP_PARSER_PAT_CMD_ELEM(true),
451 NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
452 NAND_OP_PARSER_PAT_CMD_ELEM(true),
453 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
454 NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
457 static void tegra_nand_select_target(struct nand_chip
*chip
,
460 struct tegra_nand_chip
*nand
= to_tegra_chip(chip
);
461 struct tegra_nand_controller
*ctrl
= to_tegra_ctrl(chip
->controller
);
463 ctrl
->cur_cs
= nand
->cs
[die_nr
];
466 static int tegra_nand_exec_op(struct nand_chip
*chip
,
467 const struct nand_operation
*op
,
471 tegra_nand_select_target(chip
, op
->cs
);
473 return nand_op_parser_exec_op(chip
, &tegra_nand_op_parser
, op
,
477 static void tegra_nand_hw_ecc(struct tegra_nand_controller
*ctrl
,
478 struct nand_chip
*chip
, bool enable
)
480 struct tegra_nand_chip
*nand
= to_tegra_chip(chip
);
482 if (chip
->ecc
.algo
== NAND_ECC_ALGO_BCH
&& enable
)
483 writel_relaxed(nand
->bch_config
, ctrl
->regs
+ BCH_CONFIG
);
485 writel_relaxed(0, ctrl
->regs
+ BCH_CONFIG
);
488 writel_relaxed(nand
->config_ecc
, ctrl
->regs
+ CONFIG
);
490 writel_relaxed(nand
->config
, ctrl
->regs
+ CONFIG
);
493 static int tegra_nand_page_xfer(struct mtd_info
*mtd
, struct nand_chip
*chip
,
494 void *buf
, void *oob_buf
, int oob_len
, int page
,
497 struct tegra_nand_controller
*ctrl
= to_tegra_ctrl(chip
->controller
);
498 enum dma_data_direction dir
= read
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
499 dma_addr_t dma_addr
= 0, dma_addr_oob
= 0;
500 u32 addr1
, cmd
, dma_ctrl
;
503 tegra_nand_select_target(chip
, chip
->cur_cs
);
506 writel_relaxed(NAND_CMD_READ0
, ctrl
->regs
+ CMD_REG1
);
507 writel_relaxed(NAND_CMD_READSTART
, ctrl
->regs
+ CMD_REG2
);
509 writel_relaxed(NAND_CMD_SEQIN
, ctrl
->regs
+ CMD_REG1
);
510 writel_relaxed(NAND_CMD_PAGEPROG
, ctrl
->regs
+ CMD_REG2
);
512 cmd
= COMMAND_CLE
| COMMAND_SEC_CMD
;
514 /* Lower 16-bits are column, by default 0 */
518 addr1
|= mtd
->writesize
;
519 writel_relaxed(addr1
, ctrl
->regs
+ ADDR_REG1
);
521 if (chip
->options
& NAND_ROW_ADDR_3
) {
522 writel_relaxed(page
>> 16, ctrl
->regs
+ ADDR_REG2
);
523 cmd
|= COMMAND_ALE
| COMMAND_ALE_SIZE(5);
525 cmd
|= COMMAND_ALE
| COMMAND_ALE_SIZE(4);
529 dma_addr
= dma_map_single(ctrl
->dev
, buf
, mtd
->writesize
, dir
);
530 ret
= dma_mapping_error(ctrl
->dev
, dma_addr
);
532 dev_err(ctrl
->dev
, "dma mapping error\n");
536 writel_relaxed(mtd
->writesize
- 1, ctrl
->regs
+ DMA_CFG_A
);
537 writel_relaxed(dma_addr
, ctrl
->regs
+ DATA_PTR
);
541 dma_addr_oob
= dma_map_single(ctrl
->dev
, oob_buf
, mtd
->oobsize
,
543 ret
= dma_mapping_error(ctrl
->dev
, dma_addr_oob
);
545 dev_err(ctrl
->dev
, "dma mapping error\n");
547 goto err_unmap_dma_page
;
550 writel_relaxed(oob_len
- 1, ctrl
->regs
+ DMA_CFG_B
);
551 writel_relaxed(dma_addr_oob
, ctrl
->regs
+ TAG_PTR
);
554 dma_ctrl
= DMA_MST_CTRL_GO
| DMA_MST_CTRL_PERF_EN
|
555 DMA_MST_CTRL_IE_DONE
| DMA_MST_CTRL_IS_DONE
|
556 DMA_MST_CTRL_BURST_16
;
559 dma_ctrl
|= DMA_MST_CTRL_EN_A
;
561 dma_ctrl
|= DMA_MST_CTRL_EN_B
;
564 dma_ctrl
|= DMA_MST_CTRL_IN
| DMA_MST_CTRL_REUSE
;
566 dma_ctrl
|= DMA_MST_CTRL_OUT
;
568 writel_relaxed(dma_ctrl
, ctrl
->regs
+ DMA_MST_CTRL
);
570 cmd
|= COMMAND_GO
| COMMAND_RBSY_CHK
| COMMAND_TRANS_SIZE(9) |
571 COMMAND_CE(ctrl
->cur_cs
);
574 cmd
|= COMMAND_A_VALID
;
576 cmd
|= COMMAND_B_VALID
;
581 cmd
|= COMMAND_TX
| COMMAND_AFT_DAT
;
583 writel_relaxed(cmd
, ctrl
->regs
+ COMMAND
);
585 ret
= wait_for_completion_timeout(&ctrl
->command_complete
,
586 msecs_to_jiffies(500));
588 dev_err(ctrl
->dev
, "COMMAND timeout\n");
589 tegra_nand_dump_reg(ctrl
);
590 tegra_nand_controller_abort(ctrl
);
595 ret
= wait_for_completion_timeout(&ctrl
->dma_complete
,
596 msecs_to_jiffies(500));
598 dev_err(ctrl
->dev
, "DMA timeout\n");
599 tegra_nand_dump_reg(ctrl
);
600 tegra_nand_controller_abort(ctrl
);
608 dma_unmap_single(ctrl
->dev
, dma_addr_oob
, mtd
->oobsize
, dir
);
611 dma_unmap_single(ctrl
->dev
, dma_addr
, mtd
->writesize
, dir
);
616 static int tegra_nand_read_page_raw(struct nand_chip
*chip
, u8
*buf
,
617 int oob_required
, int page
)
619 struct mtd_info
*mtd
= nand_to_mtd(chip
);
620 void *oob_buf
= oob_required
? chip
->oob_poi
: NULL
;
622 return tegra_nand_page_xfer(mtd
, chip
, buf
, oob_buf
,
623 mtd
->oobsize
, page
, true);
626 static int tegra_nand_write_page_raw(struct nand_chip
*chip
, const u8
*buf
,
627 int oob_required
, int page
)
629 struct mtd_info
*mtd
= nand_to_mtd(chip
);
630 void *oob_buf
= oob_required
? chip
->oob_poi
: NULL
;
632 return tegra_nand_page_xfer(mtd
, chip
, (void *)buf
, oob_buf
,
633 mtd
->oobsize
, page
, false);
636 static int tegra_nand_read_oob(struct nand_chip
*chip
, int page
)
638 struct mtd_info
*mtd
= nand_to_mtd(chip
);
640 return tegra_nand_page_xfer(mtd
, chip
, NULL
, chip
->oob_poi
,
641 mtd
->oobsize
, page
, true);
644 static int tegra_nand_write_oob(struct nand_chip
*chip
, int page
)
646 struct mtd_info
*mtd
= nand_to_mtd(chip
);
648 return tegra_nand_page_xfer(mtd
, chip
, NULL
, chip
->oob_poi
,
649 mtd
->oobsize
, page
, false);
652 static int tegra_nand_read_page_hwecc(struct nand_chip
*chip
, u8
*buf
,
653 int oob_required
, int page
)
655 struct mtd_info
*mtd
= nand_to_mtd(chip
);
656 struct tegra_nand_controller
*ctrl
= to_tegra_ctrl(chip
->controller
);
657 struct tegra_nand_chip
*nand
= to_tegra_chip(chip
);
658 void *oob_buf
= oob_required
? chip
->oob_poi
: NULL
;
659 u32 dec_stat
, max_corr_cnt
;
660 unsigned long fail_sec_flag
;
663 tegra_nand_hw_ecc(ctrl
, chip
, true);
664 ret
= tegra_nand_page_xfer(mtd
, chip
, buf
, oob_buf
, 0, page
, true);
665 tegra_nand_hw_ecc(ctrl
, chip
, false);
669 /* No correctable or un-correctable errors, page must have 0 bitflips */
670 if (!ctrl
->last_read_error
)
674 * Correctable or un-correctable errors occurred. Use DEC_STAT_BUF
675 * which contains information for all ECC selections.
677 * Note that since we do not use Command Queues DEC_RESULT does not
678 * state the number of pages we can read from the DEC_STAT_BUF. But
679 * since CORRFAIL_ERR did occur during page read we do have a valid
680 * result in DEC_STAT_BUF.
682 ctrl
->last_read_error
= false;
683 dec_stat
= readl_relaxed(ctrl
->regs
+ DEC_STAT_BUF
);
685 fail_sec_flag
= (dec_stat
& DEC_STAT_BUF_FAIL_SEC_FLAG_MASK
) >>
686 DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT
;
688 max_corr_cnt
= (dec_stat
& DEC_STAT_BUF_MAX_CORR_CNT_MASK
) >>
689 DEC_STAT_BUF_MAX_CORR_CNT_SHIFT
;
692 int bit
, max_bitflips
= 0;
695 * Since we do not support subpage writes, a complete page
696 * is either written or not. We can take a shortcut here by
697 * checking wheather any of the sector has been successful
698 * read. If at least one sectors has been read successfully,
699 * the page must have been a written previously. It cannot
702 * E.g. controller might return fail_sec_flag with 0x4, which
703 * would mean only the third sector failed to correct. The
704 * page must have been written and the third sector is really
705 * not correctable anymore.
707 if (fail_sec_flag
^ GENMASK(chip
->ecc
.steps
- 1, 0)) {
708 mtd
->ecc_stats
.failed
+= hweight8(fail_sec_flag
);
713 * All sectors failed to correct, but the ECC isn't smart
714 * enough to figure out if a page is really just erased.
715 * Read OOB data and check whether data/OOB is completely
716 * erased or if error correction just failed for all sub-
719 ret
= tegra_nand_read_oob(chip
, page
);
723 for_each_set_bit(bit
, &fail_sec_flag
, chip
->ecc
.steps
) {
724 u8
*data
= buf
+ (chip
->ecc
.size
* bit
);
725 u8
*oob
= chip
->oob_poi
+ nand
->ecc
.offset
+
726 (chip
->ecc
.bytes
* bit
);
728 ret
= nand_check_erased_ecc_chunk(data
, chip
->ecc
.size
,
729 oob
, chip
->ecc
.bytes
,
733 mtd
->ecc_stats
.failed
++;
735 mtd
->ecc_stats
.corrected
+= ret
;
736 max_bitflips
= max(ret
, max_bitflips
);
740 return max_t(unsigned int, max_corr_cnt
, max_bitflips
);
744 corr_sec_flag
= (dec_stat
& DEC_STAT_BUF_CORR_SEC_FLAG_MASK
) >>
745 DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT
;
748 * The value returned in the register is the maximum of
749 * bitflips encountered in any of the ECC regions. As there is
750 * no way to get the number of bitflips in a specific regions
751 * we are not able to deliver correct stats but instead
752 * overestimate the number of corrected bitflips by assuming
753 * that all regions where errors have been corrected
754 * encountered the maximum number of bitflips.
756 mtd
->ecc_stats
.corrected
+= max_corr_cnt
* hweight8(corr_sec_flag
);
762 static int tegra_nand_write_page_hwecc(struct nand_chip
*chip
, const u8
*buf
,
763 int oob_required
, int page
)
765 struct mtd_info
*mtd
= nand_to_mtd(chip
);
766 struct tegra_nand_controller
*ctrl
= to_tegra_ctrl(chip
->controller
);
767 void *oob_buf
= oob_required
? chip
->oob_poi
: NULL
;
770 tegra_nand_hw_ecc(ctrl
, chip
, true);
771 ret
= tegra_nand_page_xfer(mtd
, chip
, (void *)buf
, oob_buf
,
773 tegra_nand_hw_ecc(ctrl
, chip
, false);
778 static void tegra_nand_setup_timing(struct tegra_nand_controller
*ctrl
,
779 const struct nand_sdr_timings
*timings
)
782 * The period (and all other timings in this function) is in ps,
783 * so need to take care here to avoid integer overflows.
785 unsigned int rate
= clk_get_rate(ctrl
->clk
) / 1000000;
786 unsigned int period
= DIV_ROUND_UP(1000000, rate
);
789 val
= DIV_ROUND_UP(max3(timings
->tAR_min
, timings
->tRR_min
,
790 timings
->tRC_min
), period
);
791 reg
|= TIMING_TCR_TAR_TRR(OFFSET(val
, 3));
793 val
= DIV_ROUND_UP(max(max(timings
->tCS_min
, timings
->tCH_min
),
794 max(timings
->tALS_min
, timings
->tALH_min
)),
796 reg
|= TIMING_TCS(OFFSET(val
, 2));
798 val
= DIV_ROUND_UP(max(timings
->tRP_min
, timings
->tREA_max
) + 6000,
800 reg
|= TIMING_TRP(OFFSET(val
, 1)) | TIMING_TRP_RESP(OFFSET(val
, 1));
802 reg
|= TIMING_TWB(OFFSET(DIV_ROUND_UP(timings
->tWB_max
, period
), 1));
803 reg
|= TIMING_TWHR(OFFSET(DIV_ROUND_UP(timings
->tWHR_min
, period
), 1));
804 reg
|= TIMING_TWH(OFFSET(DIV_ROUND_UP(timings
->tWH_min
, period
), 1));
805 reg
|= TIMING_TWP(OFFSET(DIV_ROUND_UP(timings
->tWP_min
, period
), 1));
806 reg
|= TIMING_TRH(OFFSET(DIV_ROUND_UP(timings
->tREH_min
, period
), 1));
808 writel_relaxed(reg
, ctrl
->regs
+ TIMING_1
);
810 val
= DIV_ROUND_UP(timings
->tADL_min
, period
);
811 reg
= TIMING_TADL(OFFSET(val
, 3));
813 writel_relaxed(reg
, ctrl
->regs
+ TIMING_2
);
816 static int tegra_nand_setup_interface(struct nand_chip
*chip
, int csline
,
817 const struct nand_interface_config
*conf
)
819 struct tegra_nand_controller
*ctrl
= to_tegra_ctrl(chip
->controller
);
820 const struct nand_sdr_timings
*timings
;
822 timings
= nand_get_sdr_timings(conf
);
824 return PTR_ERR(timings
);
826 if (csline
== NAND_DATA_IFACE_CHECK_ONLY
)
829 tegra_nand_setup_timing(ctrl
, timings
);
834 static const int rs_strength_bootable
[] = { 4 };
835 static const int rs_strength
[] = { 4, 6, 8 };
836 static const int bch_strength_bootable
[] = { 8, 16 };
837 static const int bch_strength
[] = { 4, 8, 14, 16 };
839 static int tegra_nand_get_strength(struct nand_chip
*chip
, const int *strength
,
840 int strength_len
, int bits_per_step
,
843 struct nand_device
*base
= mtd_to_nanddev(nand_to_mtd(chip
));
844 const struct nand_ecc_props
*requirements
=
845 nanddev_get_ecc_requirements(base
);
846 bool maximize
= base
->ecc
.user_conf
.flags
& NAND_ECC_MAXIMIZE_STRENGTH
;
850 * Loop through available strengths. Backwards in case we try to
851 * maximize the BCH strength.
853 for (i
= 0; i
< strength_len
; i
++) {
854 int strength_sel
, bytes_per_step
, bytes_per_page
;
857 strength_sel
= strength
[strength_len
- i
- 1];
859 strength_sel
= strength
[i
];
861 if (strength_sel
< requirements
->strength
)
865 bytes_per_step
= DIV_ROUND_UP(bits_per_step
* strength_sel
,
867 bytes_per_page
= round_up(bytes_per_step
* chip
->ecc
.steps
, 4);
869 /* Check whether strength fits OOB */
870 if (bytes_per_page
< (oobsize
- SKIP_SPARE_BYTES
))
877 static int tegra_nand_select_strength(struct nand_chip
*chip
, int oobsize
)
880 int strength_len
, bits_per_step
;
882 switch (chip
->ecc
.algo
) {
883 case NAND_ECC_ALGO_RS
:
884 bits_per_step
= BITS_PER_STEP_RS
;
885 if (chip
->options
& NAND_IS_BOOT_MEDIUM
) {
886 strength
= rs_strength_bootable
;
887 strength_len
= ARRAY_SIZE(rs_strength_bootable
);
889 strength
= rs_strength
;
890 strength_len
= ARRAY_SIZE(rs_strength
);
893 case NAND_ECC_ALGO_BCH
:
894 bits_per_step
= BITS_PER_STEP_BCH
;
895 if (chip
->options
& NAND_IS_BOOT_MEDIUM
) {
896 strength
= bch_strength_bootable
;
897 strength_len
= ARRAY_SIZE(bch_strength_bootable
);
899 strength
= bch_strength
;
900 strength_len
= ARRAY_SIZE(bch_strength
);
907 return tegra_nand_get_strength(chip
, strength
, strength_len
,
908 bits_per_step
, oobsize
);
911 static int tegra_nand_attach_chip(struct nand_chip
*chip
)
913 struct tegra_nand_controller
*ctrl
= to_tegra_ctrl(chip
->controller
);
914 const struct nand_ecc_props
*requirements
=
915 nanddev_get_ecc_requirements(&chip
->base
);
916 struct tegra_nand_chip
*nand
= to_tegra_chip(chip
);
917 struct mtd_info
*mtd
= nand_to_mtd(chip
);
921 if (chip
->bbt_options
& NAND_BBT_USE_FLASH
)
922 chip
->bbt_options
|= NAND_BBT_NO_OOB
;
924 chip
->ecc
.engine_type
= NAND_ECC_ENGINE_TYPE_ON_HOST
;
925 chip
->ecc
.size
= 512;
926 chip
->ecc
.steps
= mtd
->writesize
/ chip
->ecc
.size
;
927 if (requirements
->step_size
!= 512) {
928 dev_err(ctrl
->dev
, "Unsupported step size %d\n",
929 requirements
->step_size
);
933 chip
->ecc
.read_page
= tegra_nand_read_page_hwecc
;
934 chip
->ecc
.write_page
= tegra_nand_write_page_hwecc
;
935 chip
->ecc
.read_page_raw
= tegra_nand_read_page_raw
;
936 chip
->ecc
.write_page_raw
= tegra_nand_write_page_raw
;
937 chip
->ecc
.read_oob
= tegra_nand_read_oob
;
938 chip
->ecc
.write_oob
= tegra_nand_write_oob
;
940 if (chip
->options
& NAND_BUSWIDTH_16
)
941 nand
->config
|= CONFIG_BUS_WIDTH_16
;
943 if (chip
->ecc
.algo
== NAND_ECC_ALGO_UNKNOWN
) {
944 if (mtd
->writesize
< 2048)
945 chip
->ecc
.algo
= NAND_ECC_ALGO_RS
;
947 chip
->ecc
.algo
= NAND_ECC_ALGO_BCH
;
950 if (chip
->ecc
.algo
== NAND_ECC_ALGO_BCH
&& mtd
->writesize
< 2048) {
951 dev_err(ctrl
->dev
, "BCH supports 2K or 4K page size only\n");
955 if (!chip
->ecc
.strength
) {
956 ret
= tegra_nand_select_strength(chip
, mtd
->oobsize
);
959 "No valid strength found, minimum %d\n",
960 requirements
->strength
);
964 chip
->ecc
.strength
= ret
;
967 nand
->config_ecc
= CONFIG_PIPE_EN
| CONFIG_SKIP_SPARE
|
968 CONFIG_SKIP_SPARE_SIZE_4
;
970 switch (chip
->ecc
.algo
) {
971 case NAND_ECC_ALGO_RS
:
972 bits_per_step
= BITS_PER_STEP_RS
* chip
->ecc
.strength
;
973 mtd_set_ooblayout(mtd
, &tegra_nand_oob_rs_ops
);
974 nand
->config_ecc
|= CONFIG_HW_ECC
| CONFIG_ECC_SEL
|
976 switch (chip
->ecc
.strength
) {
978 nand
->config_ecc
|= CONFIG_TVAL_4
;
981 nand
->config_ecc
|= CONFIG_TVAL_6
;
984 nand
->config_ecc
|= CONFIG_TVAL_8
;
987 dev_err(ctrl
->dev
, "ECC strength %d not supported\n",
992 case NAND_ECC_ALGO_BCH
:
993 bits_per_step
= BITS_PER_STEP_BCH
* chip
->ecc
.strength
;
994 mtd_set_ooblayout(mtd
, &tegra_nand_oob_bch_ops
);
995 nand
->bch_config
= BCH_ENABLE
;
996 switch (chip
->ecc
.strength
) {
998 nand
->bch_config
|= BCH_TVAL_4
;
1001 nand
->bch_config
|= BCH_TVAL_8
;
1004 nand
->bch_config
|= BCH_TVAL_14
;
1007 nand
->bch_config
|= BCH_TVAL_16
;
1010 dev_err(ctrl
->dev
, "ECC strength %d not supported\n",
1011 chip
->ecc
.strength
);
1016 dev_err(ctrl
->dev
, "ECC algorithm not supported\n");
1020 dev_info(ctrl
->dev
, "Using %s with strength %d per 512 byte step\n",
1021 chip
->ecc
.algo
== NAND_ECC_ALGO_BCH
? "BCH" : "RS",
1022 chip
->ecc
.strength
);
1024 chip
->ecc
.bytes
= DIV_ROUND_UP(bits_per_step
, BITS_PER_BYTE
);
1026 switch (mtd
->writesize
) {
1028 nand
->config
|= CONFIG_PS_256
;
1031 nand
->config
|= CONFIG_PS_512
;
1034 nand
->config
|= CONFIG_PS_1024
;
1037 nand
->config
|= CONFIG_PS_2048
;
1040 nand
->config
|= CONFIG_PS_4096
;
1043 dev_err(ctrl
->dev
, "Unsupported writesize %d\n",
1048 /* Store complete configuration for HW ECC in config_ecc */
1049 nand
->config_ecc
|= nand
->config
;
1051 /* Non-HW ECC read/writes complete OOB */
1052 nand
->config
|= CONFIG_TAG_BYTE_SIZE(mtd
->oobsize
- 1);
1053 writel_relaxed(nand
->config
, ctrl
->regs
+ CONFIG
);
1058 static const struct nand_controller_ops tegra_nand_controller_ops
= {
1059 .attach_chip
= &tegra_nand_attach_chip
,
1060 .exec_op
= tegra_nand_exec_op
,
1061 .setup_interface
= tegra_nand_setup_interface
,
1064 static int tegra_nand_chips_init(struct device
*dev
,
1065 struct tegra_nand_controller
*ctrl
)
1067 struct device_node
*np
= dev
->of_node
;
1068 struct device_node
*np_nand
;
1069 int nsels
, nchips
= of_get_child_count(np
);
1070 struct tegra_nand_chip
*nand
;
1071 struct mtd_info
*mtd
;
1072 struct nand_chip
*chip
;
1077 dev_err(dev
, "Currently only one NAND chip supported\n");
1081 np_nand
= of_get_next_child(np
, NULL
);
1083 nsels
= of_property_count_elems_of_size(np_nand
, "reg", sizeof(u32
));
1085 dev_err(dev
, "Missing/invalid reg property\n");
1089 /* Retrieve CS id, currently only single die NAND supported */
1090 ret
= of_property_read_u32(np_nand
, "reg", &cs
);
1092 dev_err(dev
, "could not retrieve reg property: %d\n", ret
);
1096 nand
= devm_kzalloc(dev
, sizeof(*nand
), GFP_KERNEL
);
1102 nand
->wp_gpio
= devm_gpiod_get_optional(dev
, "wp", GPIOD_OUT_LOW
);
1104 if (IS_ERR(nand
->wp_gpio
)) {
1105 ret
= PTR_ERR(nand
->wp_gpio
);
1106 dev_err(dev
, "Failed to request WP GPIO: %d\n", ret
);
1111 chip
->controller
= &ctrl
->controller
;
1113 mtd
= nand_to_mtd(chip
);
1115 mtd
->dev
.parent
= dev
;
1116 mtd
->owner
= THIS_MODULE
;
1118 nand_set_flash_node(chip
, np_nand
);
1121 mtd
->name
= "tegra_nand";
1123 chip
->options
= NAND_NO_SUBPAGE_WRITE
| NAND_USES_DMA
;
1125 ret
= nand_scan(chip
, 1);
1129 mtd_ooblayout_ecc(mtd
, 0, &nand
->ecc
);
1131 ret
= mtd_device_register(mtd
, NULL
, 0);
1133 dev_err(dev
, "Failed to register mtd device: %d\n", ret
);
1143 static int tegra_nand_probe(struct platform_device
*pdev
)
1145 struct reset_control
*rst
;
1146 struct tegra_nand_controller
*ctrl
;
1147 struct resource
*res
;
1150 ctrl
= devm_kzalloc(&pdev
->dev
, sizeof(*ctrl
), GFP_KERNEL
);
1154 ctrl
->dev
= &pdev
->dev
;
1155 nand_controller_init(&ctrl
->controller
);
1156 ctrl
->controller
.ops
= &tegra_nand_controller_ops
;
1158 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1159 ctrl
->regs
= devm_ioremap_resource(&pdev
->dev
, res
);
1160 if (IS_ERR(ctrl
->regs
))
1161 return PTR_ERR(ctrl
->regs
);
1163 rst
= devm_reset_control_get(&pdev
->dev
, "nand");
1165 return PTR_ERR(rst
);
1167 ctrl
->clk
= devm_clk_get(&pdev
->dev
, "nand");
1168 if (IS_ERR(ctrl
->clk
))
1169 return PTR_ERR(ctrl
->clk
);
1171 err
= clk_prepare_enable(ctrl
->clk
);
1175 err
= reset_control_reset(rst
);
1177 dev_err(ctrl
->dev
, "Failed to reset HW: %d\n", err
);
1178 goto err_disable_clk
;
1181 writel_relaxed(HWSTATUS_CMD_DEFAULT
, ctrl
->regs
+ HWSTATUS_CMD
);
1182 writel_relaxed(HWSTATUS_MASK_DEFAULT
, ctrl
->regs
+ HWSTATUS_MASK
);
1183 writel_relaxed(INT_MASK
, ctrl
->regs
+ IER
);
1185 init_completion(&ctrl
->command_complete
);
1186 init_completion(&ctrl
->dma_complete
);
1188 ctrl
->irq
= platform_get_irq(pdev
, 0);
1189 err
= devm_request_irq(&pdev
->dev
, ctrl
->irq
, tegra_nand_irq
, 0,
1190 dev_name(&pdev
->dev
), ctrl
);
1192 dev_err(ctrl
->dev
, "Failed to get IRQ: %d\n", err
);
1193 goto err_disable_clk
;
1196 writel_relaxed(DMA_MST_CTRL_IS_DONE
, ctrl
->regs
+ DMA_MST_CTRL
);
1198 err
= tegra_nand_chips_init(ctrl
->dev
, ctrl
);
1200 goto err_disable_clk
;
1202 platform_set_drvdata(pdev
, ctrl
);
1207 clk_disable_unprepare(ctrl
->clk
);
1211 static int tegra_nand_remove(struct platform_device
*pdev
)
1213 struct tegra_nand_controller
*ctrl
= platform_get_drvdata(pdev
);
1214 struct nand_chip
*chip
= ctrl
->chip
;
1215 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1218 ret
= mtd_device_unregister(mtd
);
1224 clk_disable_unprepare(ctrl
->clk
);
1229 static const struct of_device_id tegra_nand_of_match
[] = {
1230 { .compatible
= "nvidia,tegra20-nand" },
1233 MODULE_DEVICE_TABLE(of
, tegra_nand_of_match
);
1235 static struct platform_driver tegra_nand_driver
= {
1237 .name
= "tegra-nand",
1238 .of_match_table
= tegra_nand_of_match
,
1240 .probe
= tegra_nand_probe
,
1241 .remove
= tegra_nand_remove
,
1243 module_platform_driver(tegra_nand_driver
);
1245 MODULE_DESCRIPTION("NVIDIA Tegra NAND driver");
1246 MODULE_AUTHOR("Thierry Reding <thierry.reding@nvidia.com>");
1247 MODULE_AUTHOR("Lucas Stach <dev@lynxeye.de>");
1248 MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>");
1249 MODULE_LICENSE("GPL v2");