1 // SPDX-License-Identifier: GPL-2.0
3 * Support for Macronix external hardware ECC engine for NAND devices, also
4 * called DPE for Data Processing Engine.
6 * Copyright © 2019 Macronix
7 * Author: Miquel Raynal <miquel.raynal@bootlin.com>
10 #include <linux/dma-mapping.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
14 #include <linux/iopoll.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/nand.h>
19 #include <linux/mtd/nand-ecc-mxic.h>
20 #include <linux/mutex.h>
22 #include <linux/of_platform.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
26 /* DPE Configuration */
27 #define DP_CONFIG 0x00
29 #define ECC_TYP(idx) (((idx) << 3) & GENMASK(6, 3))
30 /* DPE Interrupt Status */
31 #define INTRPT_STS 0x04
32 #define TRANS_CMPLT BIT(0)
33 #define SDMA_MAIN BIT(1)
34 #define SDMA_SPARE BIT(2)
35 #define ECC_ERR BIT(3)
36 #define TO_SPARE BIT(4)
37 #define TO_MAIN BIT(5)
38 /* DPE Interrupt Status Enable */
39 #define INTRPT_STS_EN 0x08
40 /* DPE Interrupt Signal Enable */
41 #define INTRPT_SIG_EN 0x0C
42 /* Host Controller Configuration */
43 #define HC_CONFIG 0x10
44 #define DEV2MEM 0 /* TRANS_TYP_DMA in the spec */
45 #define MEM2MEM BIT(4) /* TRANS_TYP_IO in the spec */
46 #define MAPPING BIT(5) /* TRANS_TYP_MAPPING in the spec */
47 #define ECC_PACKED 0 /* LAYOUT_TYP_INTEGRATED in the spec */
48 #define ECC_INTERLEAVED BIT(2) /* LAYOUT_TYP_DISTRIBUTED in the spec */
49 #define BURST_TYP_FIXED 0
50 #define BURST_TYP_INCREASING BIT(0)
51 /* Host Controller Slave Address */
52 #define HC_SLV_ADDR 0x14
54 #define CHUNK_SIZE 0x20
56 #define MAIN_SIZE 0x24
58 #define SPARE_SIZE 0x28
59 #define META_SZ(reg) ((reg) & GENMASK(7, 0))
60 #define PARITY_SZ(reg) (((reg) & GENMASK(15, 8)) >> 8)
61 #define RSV_SZ(reg) (((reg) & GENMASK(23, 16)) >> 16)
62 #define SPARE_SZ(reg) ((reg) >> 24)
64 #define CHUNK_CNT 0x30
66 #define SDMA_CTRL 0x40
68 #define READ_NAND BIT(1)
69 #define CONT_NAND BIT(29)
70 #define CONT_SYSM BIT(30) /* Continue System Memory? */
71 #define SDMA_STRT BIT(31)
72 /* SDMA Address of Main Data */
73 #define SDMA_MAIN_ADDR 0x44
74 /* SDMA Address of Spare Data */
75 #define SDMA_SPARE_ADDR 0x48
76 /* DPE Version Number */
78 #define DP_VER_OFFSET 16
80 /* Status bytes between each chunk of spare data */
83 #define MAX_CORR_ERR 0x28
84 #define UNCORR_ERR 0xFE
85 #define ERASED_CHUNK 0xFF
87 struct mxic_ecc_engine
{
91 struct completion complete
;
92 struct nand_ecc_engine external_engine
;
93 struct nand_ecc_engine pipelined_engine
;
99 unsigned int data_step_sz
;
100 unsigned int oob_step_sz
;
101 unsigned int parity_sz
;
102 unsigned int meta_sz
;
106 /* DMA boilerplate */
107 struct nand_ecc_req_tweak_ctx req_ctx
;
109 struct scatterlist sg
[2];
110 struct nand_page_io_req
*req
;
111 unsigned int pageoffs
;
114 static struct mxic_ecc_engine
*ext_ecc_eng_to_mxic(struct nand_ecc_engine
*eng
)
116 return container_of(eng
, struct mxic_ecc_engine
, external_engine
);
119 static struct mxic_ecc_engine
*pip_ecc_eng_to_mxic(struct nand_ecc_engine
*eng
)
121 return container_of(eng
, struct mxic_ecc_engine
, pipelined_engine
);
124 static struct mxic_ecc_engine
*nand_to_mxic(struct nand_device
*nand
)
126 struct nand_ecc_engine
*eng
= nand
->ecc
.engine
;
128 if (eng
->integration
== NAND_ECC_ENGINE_INTEGRATION_EXTERNAL
)
129 return ext_ecc_eng_to_mxic(eng
);
131 return pip_ecc_eng_to_mxic(eng
);
134 static int mxic_ecc_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
135 struct mtd_oob_region
*oobregion
)
137 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
138 struct mxic_ecc_ctx
*ctx
= nand_to_ecc_ctx(nand
);
140 if (section
< 0 || section
>= ctx
->steps
)
143 oobregion
->offset
= (section
* ctx
->oob_step_sz
) + ctx
->meta_sz
;
144 oobregion
->length
= ctx
->parity_sz
;
149 static int mxic_ecc_ooblayout_free(struct mtd_info
*mtd
, int section
,
150 struct mtd_oob_region
*oobregion
)
152 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
153 struct mxic_ecc_ctx
*ctx
= nand_to_ecc_ctx(nand
);
155 if (section
< 0 || section
>= ctx
->steps
)
159 oobregion
->offset
= 2;
160 oobregion
->length
= ctx
->meta_sz
- 2;
162 oobregion
->offset
= section
* ctx
->oob_step_sz
;
163 oobregion
->length
= ctx
->meta_sz
;
169 static const struct mtd_ooblayout_ops mxic_ecc_ooblayout_ops
= {
170 .ecc
= mxic_ecc_ooblayout_ecc
,
171 .free
= mxic_ecc_ooblayout_free
,
174 static void mxic_ecc_disable_engine(struct mxic_ecc_engine
*mxic
)
178 reg
= readl(mxic
->regs
+ DP_CONFIG
);
180 writel(reg
, mxic
->regs
+ DP_CONFIG
);
183 static void mxic_ecc_enable_engine(struct mxic_ecc_engine
*mxic
)
187 reg
= readl(mxic
->regs
+ DP_CONFIG
);
189 writel(reg
, mxic
->regs
+ DP_CONFIG
);
192 static void mxic_ecc_disable_int(struct mxic_ecc_engine
*mxic
)
194 writel(0, mxic
->regs
+ INTRPT_SIG_EN
);
197 static void mxic_ecc_enable_int(struct mxic_ecc_engine
*mxic
)
199 writel(TRANS_CMPLT
, mxic
->regs
+ INTRPT_SIG_EN
);
202 static irqreturn_t
mxic_ecc_isr(int irq
, void *dev_id
)
204 struct mxic_ecc_engine
*mxic
= dev_id
;
207 sts
= readl(mxic
->regs
+ INTRPT_STS
);
211 if (sts
& TRANS_CMPLT
)
212 complete(&mxic
->complete
);
214 writel(sts
, mxic
->regs
+ INTRPT_STS
);
219 static int mxic_ecc_init_ctx(struct nand_device
*nand
, struct device
*dev
)
221 struct mxic_ecc_engine
*mxic
= nand_to_mxic(nand
);
222 struct nand_ecc_props
*conf
= &nand
->ecc
.ctx
.conf
;
223 struct nand_ecc_props
*reqs
= &nand
->ecc
.requirements
;
224 struct nand_ecc_props
*user
= &nand
->ecc
.user_conf
;
225 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
226 int step_size
= 0, strength
= 0, desired_correction
= 0, steps
, idx
;
227 static const int possible_strength
[] = {4, 8, 40, 48};
228 static const int spare_size
[] = {32, 32, 96, 96};
229 struct mxic_ecc_ctx
*ctx
;
233 ctx
= devm_kzalloc(dev
, sizeof(*ctx
), GFP_KERNEL
);
237 nand
->ecc
.ctx
.priv
= ctx
;
239 /* Only large page NAND chips may use BCH */
240 if (mtd
->oobsize
< 64) {
241 pr_err("BCH cannot be used with small page NAND chips\n");
245 mtd_set_ooblayout(mtd
, &mxic_ecc_ooblayout_ops
);
247 /* Enable all status bits */
248 writel(TRANS_CMPLT
| SDMA_MAIN
| SDMA_SPARE
| ECC_ERR
|
249 TO_SPARE
| TO_MAIN
, mxic
->regs
+ INTRPT_STS_EN
);
251 /* Configure the correction depending on the NAND device topology */
252 if (user
->step_size
&& user
->strength
) {
253 step_size
= user
->step_size
;
254 strength
= user
->strength
;
255 } else if (reqs
->step_size
&& reqs
->strength
) {
256 step_size
= reqs
->step_size
;
257 strength
= reqs
->strength
;
260 if (step_size
&& strength
) {
261 steps
= mtd
->writesize
/ step_size
;
262 desired_correction
= steps
* strength
;
265 /* Step size is fixed to 1kiB, strength may vary (4 possible values) */
266 conf
->step_size
= SZ_1K
;
267 steps
= mtd
->writesize
/ conf
->step_size
;
269 ctx
->status
= devm_kzalloc(dev
, steps
* sizeof(u8
), GFP_KERNEL
);
273 if (desired_correction
) {
274 strength
= desired_correction
/ steps
;
276 for (idx
= 0; idx
< ARRAY_SIZE(possible_strength
); idx
++)
277 if (possible_strength
[idx
] >= strength
)
280 idx
= min_t(unsigned int, idx
,
281 ARRAY_SIZE(possible_strength
) - 1);
283 /* Missing data, maximize the correction */
284 idx
= ARRAY_SIZE(possible_strength
) - 1;
287 /* Tune the selected strength until it fits in the OOB area */
288 for (; idx
>= 0; idx
--) {
289 if (spare_size
[idx
] * steps
<= mtd
->oobsize
)
293 /* This engine cannot be used with this NAND device */
297 /* Configure the engine for the desired strength */
298 writel(ECC_TYP(idx
), mxic
->regs
+ DP_CONFIG
);
299 conf
->strength
= possible_strength
[idx
];
300 spare_reg
= readl(mxic
->regs
+ SPARE_SIZE
);
303 ctx
->data_step_sz
= mtd
->writesize
/ steps
;
304 ctx
->oob_step_sz
= mtd
->oobsize
/ steps
;
305 ctx
->parity_sz
= PARITY_SZ(spare_reg
);
306 ctx
->meta_sz
= META_SZ(spare_reg
);
308 /* Ensure buffers will contain enough bytes to store the STAT_BYTES */
309 ctx
->req_ctx
.oob_buffer_size
= nanddev_per_page_oobsize(nand
) +
310 (ctx
->steps
* STAT_BYTES
);
311 ret
= nand_ecc_init_req_tweaking(&ctx
->req_ctx
, nand
);
315 ctx
->oobwithstat
= kmalloc(mtd
->oobsize
+ (ctx
->steps
* STAT_BYTES
),
317 if (!ctx
->oobwithstat
) {
319 goto cleanup_req_tweak
;
322 sg_init_table(ctx
->sg
, 2);
324 /* Configuration dump and sanity checks */
325 dev_err(dev
, "DPE version number: %d\n",
326 readl(mxic
->regs
+ DP_VER
) >> DP_VER_OFFSET
);
327 dev_err(dev
, "Chunk size: %d\n", readl(mxic
->regs
+ CHUNK_SIZE
));
328 dev_err(dev
, "Main size: %d\n", readl(mxic
->regs
+ MAIN_SIZE
));
329 dev_err(dev
, "Spare size: %d\n", SPARE_SZ(spare_reg
));
330 dev_err(dev
, "Rsv size: %ld\n", RSV_SZ(spare_reg
));
331 dev_err(dev
, "Parity size: %d\n", ctx
->parity_sz
);
332 dev_err(dev
, "Meta size: %d\n", ctx
->meta_sz
);
334 if ((ctx
->meta_sz
+ ctx
->parity_sz
+ RSV_SZ(spare_reg
)) !=
335 SPARE_SZ(spare_reg
)) {
336 dev_err(dev
, "Wrong OOB configuration: %d + %d + %ld != %d\n",
337 ctx
->meta_sz
, ctx
->parity_sz
, RSV_SZ(spare_reg
),
338 SPARE_SZ(spare_reg
));
340 goto free_oobwithstat
;
343 if (ctx
->oob_step_sz
!= SPARE_SZ(spare_reg
)) {
344 dev_err(dev
, "Wrong OOB configuration: %d != %d\n",
345 ctx
->oob_step_sz
, SPARE_SZ(spare_reg
));
347 goto free_oobwithstat
;
353 kfree(ctx
->oobwithstat
);
355 nand_ecc_cleanup_req_tweaking(&ctx
->req_ctx
);
360 static int mxic_ecc_init_ctx_external(struct nand_device
*nand
)
362 struct mxic_ecc_engine
*mxic
= nand_to_mxic(nand
);
363 struct device
*dev
= nand
->ecc
.engine
->dev
;
366 dev_info(dev
, "Macronix ECC engine in external mode\n");
368 ret
= mxic_ecc_init_ctx(nand
, dev
);
372 /* Trigger each step manually */
373 writel(1, mxic
->regs
+ CHUNK_CNT
);
374 writel(BURST_TYP_INCREASING
| ECC_PACKED
| MEM2MEM
,
375 mxic
->regs
+ HC_CONFIG
);
380 static int mxic_ecc_init_ctx_pipelined(struct nand_device
*nand
)
382 struct mxic_ecc_engine
*mxic
= nand_to_mxic(nand
);
383 struct mxic_ecc_ctx
*ctx
;
387 dev
= nand_ecc_get_engine_dev(nand
->ecc
.engine
->dev
);
391 dev_info(dev
, "Macronix ECC engine in pipelined/mapping mode\n");
393 ret
= mxic_ecc_init_ctx(nand
, dev
);
397 ctx
= nand_to_ecc_ctx(nand
);
399 /* All steps should be handled in one go directly by the internal DMA */
400 writel(ctx
->steps
, mxic
->regs
+ CHUNK_CNT
);
403 * Interleaved ECC scheme cannot be used otherwise factory bad block
404 * markers would be lost. A packed layout is mandatory.
406 writel(BURST_TYP_INCREASING
| ECC_PACKED
| MAPPING
,
407 mxic
->regs
+ HC_CONFIG
);
412 static void mxic_ecc_cleanup_ctx(struct nand_device
*nand
)
414 struct mxic_ecc_ctx
*ctx
= nand_to_ecc_ctx(nand
);
417 nand_ecc_cleanup_req_tweaking(&ctx
->req_ctx
);
418 kfree(ctx
->oobwithstat
);
422 static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine
*mxic
)
428 reinit_completion(&mxic
->complete
);
429 mxic_ecc_enable_int(mxic
);
430 ret
= wait_for_completion_timeout(&mxic
->complete
,
431 msecs_to_jiffies(1000));
432 ret
= ret
? 0 : -ETIMEDOUT
;
433 mxic_ecc_disable_int(mxic
);
435 ret
= readl_poll_timeout(mxic
->regs
+ INTRPT_STS
, val
,
436 val
& TRANS_CMPLT
, 10, USEC_PER_SEC
);
437 writel(val
, mxic
->regs
+ INTRPT_STS
);
441 dev_err(mxic
->dev
, "Timeout on data xfer completion\n");
448 static int mxic_ecc_process_data(struct mxic_ecc_engine
*mxic
,
449 unsigned int direction
)
451 unsigned int dir
= (direction
== NAND_PAGE_READ
) ?
452 READ_NAND
: WRITE_NAND
;
455 mxic_ecc_enable_engine(mxic
);
457 /* Trigger processing */
458 writel(SDMA_STRT
| dir
, mxic
->regs
+ SDMA_CTRL
);
460 /* Wait for completion */
461 ret
= mxic_ecc_data_xfer_wait_for_completion(mxic
);
463 mxic_ecc_disable_engine(mxic
);
468 int mxic_ecc_process_data_pipelined(struct nand_ecc_engine
*eng
,
469 unsigned int direction
, dma_addr_t dirmap
)
471 struct mxic_ecc_engine
*mxic
= pip_ecc_eng_to_mxic(eng
);
474 writel(dirmap
, mxic
->regs
+ HC_SLV_ADDR
);
476 return mxic_ecc_process_data(mxic
, direction
);
478 EXPORT_SYMBOL_GPL(mxic_ecc_process_data_pipelined
);
480 static void mxic_ecc_extract_status_bytes(struct mxic_ecc_ctx
*ctx
)
482 u8
*buf
= ctx
->oobwithstat
;
486 /* Extract the ECC status */
487 for (step
= 0; step
< ctx
->steps
; step
++) {
488 next_stat_pos
= ctx
->oob_step_sz
+
489 ((STAT_BYTES
+ ctx
->oob_step_sz
) * step
);
491 ctx
->status
[step
] = buf
[next_stat_pos
];
495 static void mxic_ecc_reconstruct_oobbuf(struct mxic_ecc_ctx
*ctx
,
496 u8
*dst
, const u8
*src
)
500 /* Reconstruct the OOB buffer linearly (without the ECC status bytes) */
501 for (step
= 0; step
< ctx
->steps
; step
++)
502 memcpy(dst
+ (step
* ctx
->oob_step_sz
),
503 src
+ (step
* (ctx
->oob_step_sz
+ STAT_BYTES
)),
507 static void mxic_ecc_add_room_in_oobbuf(struct mxic_ecc_ctx
*ctx
,
508 u8
*dst
, const u8
*src
)
512 /* Add some space in the OOB buffer for the status bytes */
513 for (step
= 0; step
< ctx
->steps
; step
++)
514 memcpy(dst
+ (step
* (ctx
->oob_step_sz
+ STAT_BYTES
)),
515 src
+ (step
* ctx
->oob_step_sz
),
519 static int mxic_ecc_count_biterrs(struct mxic_ecc_engine
*mxic
,
520 struct nand_device
*nand
)
522 struct mxic_ecc_ctx
*ctx
= nand_to_ecc_ctx(nand
);
523 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
524 struct device
*dev
= mxic
->dev
;
525 unsigned int max_bf
= 0;
526 bool failure
= false;
529 for (step
= 0; step
< ctx
->steps
; step
++) {
530 u8 stat
= ctx
->status
[step
];
532 if (stat
== NO_ERR
) {
533 dev_dbg(dev
, "ECC step %d: no error\n", step
);
534 } else if (stat
== ERASED_CHUNK
) {
535 dev_dbg(dev
, "ECC step %d: erased\n", step
);
536 } else if (stat
== UNCORR_ERR
|| stat
> MAX_CORR_ERR
) {
537 dev_dbg(dev
, "ECC step %d: uncorrectable\n", step
);
538 mtd
->ecc_stats
.failed
++;
541 dev_dbg(dev
, "ECC step %d: %d bits corrected\n",
543 max_bf
= max_t(unsigned int, max_bf
, stat
);
544 mtd
->ecc_stats
.corrected
+= stat
;
548 return failure
? -EBADMSG
: max_bf
;
551 /* External ECC engine helpers */
552 static int mxic_ecc_prepare_io_req_external(struct nand_device
*nand
,
553 struct nand_page_io_req
*req
)
555 struct mxic_ecc_engine
*mxic
= nand_to_mxic(nand
);
556 struct mxic_ecc_ctx
*ctx
= nand_to_ecc_ctx(nand
);
557 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
558 int offset
, nents
, step
, ret
;
560 if (req
->mode
== MTD_OPS_RAW
)
563 nand_ecc_tweak_req(&ctx
->req_ctx
, req
);
566 if (req
->type
== NAND_PAGE_READ
)
569 mxic_ecc_add_room_in_oobbuf(ctx
, ctx
->oobwithstat
,
570 ctx
->req
->oobbuf
.out
);
572 sg_set_buf(&ctx
->sg
[0], req
->databuf
.out
, req
->datalen
);
573 sg_set_buf(&ctx
->sg
[1], ctx
->oobwithstat
,
574 req
->ooblen
+ (ctx
->steps
* STAT_BYTES
));
576 nents
= dma_map_sg(mxic
->dev
, ctx
->sg
, 2, DMA_BIDIRECTIONAL
);
580 mutex_lock(&mxic
->lock
);
582 for (step
= 0; step
< ctx
->steps
; step
++) {
583 writel(sg_dma_address(&ctx
->sg
[0]) + (step
* ctx
->data_step_sz
),
584 mxic
->regs
+ SDMA_MAIN_ADDR
);
585 writel(sg_dma_address(&ctx
->sg
[1]) + (step
* (ctx
->oob_step_sz
+ STAT_BYTES
)),
586 mxic
->regs
+ SDMA_SPARE_ADDR
);
587 ret
= mxic_ecc_process_data(mxic
, ctx
->req
->type
);
592 mutex_unlock(&mxic
->lock
);
594 dma_unmap_sg(mxic
->dev
, ctx
->sg
, 2, DMA_BIDIRECTIONAL
);
599 /* Retrieve the calculated ECC bytes */
600 for (step
= 0; step
< ctx
->steps
; step
++) {
601 offset
= ctx
->meta_sz
+ (step
* ctx
->oob_step_sz
);
602 mtd_ooblayout_get_eccbytes(mtd
,
603 (u8
*)ctx
->req
->oobbuf
.out
+ offset
,
604 ctx
->oobwithstat
+ (step
* STAT_BYTES
),
605 step
* ctx
->parity_sz
,
612 static int mxic_ecc_finish_io_req_external(struct nand_device
*nand
,
613 struct nand_page_io_req
*req
)
615 struct mxic_ecc_engine
*mxic
= nand_to_mxic(nand
);
616 struct mxic_ecc_ctx
*ctx
= nand_to_ecc_ctx(nand
);
617 int nents
, step
, ret
;
619 if (req
->mode
== MTD_OPS_RAW
)
622 if (req
->type
== NAND_PAGE_WRITE
) {
623 nand_ecc_restore_req(&ctx
->req_ctx
, req
);
627 /* Copy the OOB buffer and add room for the ECC engine status bytes */
628 mxic_ecc_add_room_in_oobbuf(ctx
, ctx
->oobwithstat
, ctx
->req
->oobbuf
.in
);
630 sg_set_buf(&ctx
->sg
[0], req
->databuf
.in
, req
->datalen
);
631 sg_set_buf(&ctx
->sg
[1], ctx
->oobwithstat
,
632 req
->ooblen
+ (ctx
->steps
* STAT_BYTES
));
633 nents
= dma_map_sg(mxic
->dev
, ctx
->sg
, 2, DMA_BIDIRECTIONAL
);
637 mutex_lock(&mxic
->lock
);
639 for (step
= 0; step
< ctx
->steps
; step
++) {
640 writel(sg_dma_address(&ctx
->sg
[0]) + (step
* ctx
->data_step_sz
),
641 mxic
->regs
+ SDMA_MAIN_ADDR
);
642 writel(sg_dma_address(&ctx
->sg
[1]) + (step
* (ctx
->oob_step_sz
+ STAT_BYTES
)),
643 mxic
->regs
+ SDMA_SPARE_ADDR
);
644 ret
= mxic_ecc_process_data(mxic
, ctx
->req
->type
);
649 mutex_unlock(&mxic
->lock
);
651 dma_unmap_sg(mxic
->dev
, ctx
->sg
, 2, DMA_BIDIRECTIONAL
);
654 nand_ecc_restore_req(&ctx
->req_ctx
, req
);
658 /* Extract the status bytes and reconstruct the buffer */
659 mxic_ecc_extract_status_bytes(ctx
);
660 mxic_ecc_reconstruct_oobbuf(ctx
, ctx
->req
->oobbuf
.in
, ctx
->oobwithstat
);
662 nand_ecc_restore_req(&ctx
->req_ctx
, req
);
664 return mxic_ecc_count_biterrs(mxic
, nand
);
667 /* Pipelined ECC engine helpers */
668 static int mxic_ecc_prepare_io_req_pipelined(struct nand_device
*nand
,
669 struct nand_page_io_req
*req
)
671 struct mxic_ecc_engine
*mxic
= nand_to_mxic(nand
);
672 struct mxic_ecc_ctx
*ctx
= nand_to_ecc_ctx(nand
);
675 if (req
->mode
== MTD_OPS_RAW
)
678 nand_ecc_tweak_req(&ctx
->req_ctx
, req
);
681 /* Copy the OOB buffer and add room for the ECC engine status bytes */
682 mxic_ecc_add_room_in_oobbuf(ctx
, ctx
->oobwithstat
, ctx
->req
->oobbuf
.in
);
684 sg_set_buf(&ctx
->sg
[0], req
->databuf
.in
, req
->datalen
);
685 sg_set_buf(&ctx
->sg
[1], ctx
->oobwithstat
,
686 req
->ooblen
+ (ctx
->steps
* STAT_BYTES
));
688 nents
= dma_map_sg(mxic
->dev
, ctx
->sg
, 2, DMA_BIDIRECTIONAL
);
692 mutex_lock(&mxic
->lock
);
694 writel(sg_dma_address(&ctx
->sg
[0]), mxic
->regs
+ SDMA_MAIN_ADDR
);
695 writel(sg_dma_address(&ctx
->sg
[1]), mxic
->regs
+ SDMA_SPARE_ADDR
);
700 static int mxic_ecc_finish_io_req_pipelined(struct nand_device
*nand
,
701 struct nand_page_io_req
*req
)
703 struct mxic_ecc_engine
*mxic
= nand_to_mxic(nand
);
704 struct mxic_ecc_ctx
*ctx
= nand_to_ecc_ctx(nand
);
707 if (req
->mode
== MTD_OPS_RAW
)
710 mutex_unlock(&mxic
->lock
);
712 dma_unmap_sg(mxic
->dev
, ctx
->sg
, 2, DMA_BIDIRECTIONAL
);
714 if (req
->type
== NAND_PAGE_READ
) {
715 mxic_ecc_extract_status_bytes(ctx
);
716 mxic_ecc_reconstruct_oobbuf(ctx
, ctx
->req
->oobbuf
.in
,
718 ret
= mxic_ecc_count_biterrs(mxic
, nand
);
721 nand_ecc_restore_req(&ctx
->req_ctx
, req
);
726 static const struct nand_ecc_engine_ops mxic_ecc_engine_external_ops
= {
727 .init_ctx
= mxic_ecc_init_ctx_external
,
728 .cleanup_ctx
= mxic_ecc_cleanup_ctx
,
729 .prepare_io_req
= mxic_ecc_prepare_io_req_external
,
730 .finish_io_req
= mxic_ecc_finish_io_req_external
,
733 static const struct nand_ecc_engine_ops mxic_ecc_engine_pipelined_ops
= {
734 .init_ctx
= mxic_ecc_init_ctx_pipelined
,
735 .cleanup_ctx
= mxic_ecc_cleanup_ctx
,
736 .prepare_io_req
= mxic_ecc_prepare_io_req_pipelined
,
737 .finish_io_req
= mxic_ecc_finish_io_req_pipelined
,
740 const struct nand_ecc_engine_ops
*mxic_ecc_get_pipelined_ops(void)
742 return &mxic_ecc_engine_pipelined_ops
;
744 EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_ops
);
746 static struct platform_device
*
747 mxic_ecc_get_pdev(struct platform_device
*spi_pdev
)
749 struct platform_device
*eng_pdev
;
750 struct device_node
*np
;
752 /* Retrieve the nand-ecc-engine phandle */
753 np
= of_parse_phandle(spi_pdev
->dev
.of_node
, "nand-ecc-engine", 0);
757 /* Jump to the engine's device node */
758 eng_pdev
= of_find_device_by_node(np
);
764 void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine
*eng
)
766 struct mxic_ecc_engine
*mxic
= pip_ecc_eng_to_mxic(eng
);
768 platform_device_put(to_platform_device(mxic
->dev
));
770 EXPORT_SYMBOL_GPL(mxic_ecc_put_pipelined_engine
);
772 struct nand_ecc_engine
*
773 mxic_ecc_get_pipelined_engine(struct platform_device
*spi_pdev
)
775 struct platform_device
*eng_pdev
;
776 struct mxic_ecc_engine
*mxic
;
778 eng_pdev
= mxic_ecc_get_pdev(spi_pdev
);
780 return ERR_PTR(-ENODEV
);
782 mxic
= platform_get_drvdata(eng_pdev
);
784 platform_device_put(eng_pdev
);
785 return ERR_PTR(-EPROBE_DEFER
);
788 return &mxic
->pipelined_engine
;
790 EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_engine
);
793 * Only the external ECC engine is exported as the pipelined is SoC specific, so
794 * it is registered directly by the drivers that wrap it.
796 static int mxic_ecc_probe(struct platform_device
*pdev
)
798 struct device
*dev
= &pdev
->dev
;
799 struct mxic_ecc_engine
*mxic
;
802 mxic
= devm_kzalloc(&pdev
->dev
, sizeof(*mxic
), GFP_KERNEL
);
806 mxic
->dev
= &pdev
->dev
;
809 * Both memory regions for the ECC engine itself and the AXI slave
810 * address are mandatory.
812 mxic
->regs
= devm_platform_ioremap_resource(pdev
, 0);
813 if (IS_ERR(mxic
->regs
)) {
814 dev_err(&pdev
->dev
, "Missing memory region\n");
815 return PTR_ERR(mxic
->regs
);
818 mxic_ecc_disable_engine(mxic
);
819 mxic_ecc_disable_int(mxic
);
821 /* IRQ is optional yet much more efficient */
822 mxic
->irq
= platform_get_irq_byname_optional(pdev
, "ecc-engine");
824 ret
= devm_request_irq(&pdev
->dev
, mxic
->irq
, mxic_ecc_isr
, 0,
829 dev_info(dev
, "Invalid or missing IRQ, fallback to polling\n");
833 mutex_init(&mxic
->lock
);
836 * In external mode, the device is the ECC engine. In pipelined mode,
837 * the device is the host controller. The device is used to match the
838 * right ECC engine based on the DT properties.
840 mxic
->external_engine
.dev
= &pdev
->dev
;
841 mxic
->external_engine
.integration
= NAND_ECC_ENGINE_INTEGRATION_EXTERNAL
;
842 mxic
->external_engine
.ops
= &mxic_ecc_engine_external_ops
;
844 nand_ecc_register_on_host_hw_engine(&mxic
->external_engine
);
846 platform_set_drvdata(pdev
, mxic
);
851 static void mxic_ecc_remove(struct platform_device
*pdev
)
853 struct mxic_ecc_engine
*mxic
= platform_get_drvdata(pdev
);
855 nand_ecc_unregister_on_host_hw_engine(&mxic
->external_engine
);
858 static const struct of_device_id mxic_ecc_of_ids
[] = {
860 .compatible
= "mxicy,nand-ecc-engine-rev3",
864 MODULE_DEVICE_TABLE(of
, mxic_ecc_of_ids
);
866 static struct platform_driver mxic_ecc_driver
= {
868 .name
= "mxic-nand-ecc-engine",
869 .of_match_table
= mxic_ecc_of_ids
,
871 .probe
= mxic_ecc_probe
,
872 .remove
= mxic_ecc_remove
,
874 module_platform_driver(mxic_ecc_driver
);
876 MODULE_LICENSE("GPL");
877 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
878 MODULE_DESCRIPTION("Macronix NAND hardware ECC controller");