1 // SPDX-License-Identifier: GPL-2.0+
3 * Generic Error-Correcting Code (ECC) engine
5 * Copyright (C) 2019 Macronix
7 * Miquèl RAYNAL <miquel.raynal@bootlin.com>
10 * This file describes the abstraction of any NAND ECC engine. It has been
11 * designed to fit most cases, including parallel NANDs and SPI-NANDs.
13 * There are three main situations where instantiating this ECC engine makes
15 * - external: The ECC engine is outside the NAND pipeline, typically this
16 * is a software ECC engine, or an hardware engine that is
17 * outside the NAND controller pipeline.
18 * - pipelined: The ECC engine is inside the NAND pipeline, ie. on the
19 * controller's side. This is the case of most of the raw NAND
20 * controllers. In the pipeline case, the ECC bytes are
21 * generated/data corrected on the fly when a page is
23 * - ondie: The ECC engine is inside the NAND pipeline, on the chip's side.
24 * Some NAND chips can correct themselves the data.
26 * Besides the initial setup and final cleanups, the interfaces are rather
28 * - prepare: Prepare an I/O request. Enable/disable the ECC engine based on
29 * the I/O request type. In case of software correction or external
30 * engine, this step may involve to derive the ECC bytes and place
31 * them in the OOB area before a write.
32 * - finish: Finish an I/O request. Correct the data in case of a read
33 * request and report the number of corrected bits/uncorrectable
34 * errors. Most likely empty for write operations, unless you have
35 * hardware specific stuff to do, like shutting down the engine to
38 * The I/O request should be enclosed in a prepare()/finish() pair of calls
39 * and will behave differently depending on the requested I/O type:
40 * - raw: Correction disabled
41 * - ecc: Correction enabled
43 * The request direction is impacting the logic as well:
44 * - read: Load data from the NAND chip
45 * - write: Store data in the NAND chip
47 * Mixing all this combinations together gives the following behavior.
48 * Those are just examples, drivers are free to add custom steps in their
49 * prepare/finish hook.
51 * [external ECC engine]
52 * - external + prepare + raw + read: do nothing
53 * - external + finish + raw + read: do nothing
54 * - external + prepare + raw + write: do nothing
55 * - external + finish + raw + write: do nothing
56 * - external + prepare + ecc + read: do nothing
57 * - external + finish + ecc + read: calculate expected ECC bytes, extract
58 * ECC bytes from OOB buffer, correct
59 * and report any bitflip/error
60 * - external + prepare + ecc + write: calculate ECC bytes and store them at
61 * the right place in the OOB buffer based
63 * - external + finish + ecc + write: do nothing
65 * [pipelined ECC engine]
66 * - pipelined + prepare + raw + read: disable the controller's ECC engine if
68 * - pipelined + finish + raw + read: do nothing
69 * - pipelined + prepare + raw + write: disable the controller's ECC engine if
71 * - pipelined + finish + raw + write: do nothing
72 * - pipelined + prepare + ecc + read: enable the controller's ECC engine if
74 * - pipelined + finish + ecc + read: check the status, report any
76 * - pipelined + prepare + ecc + write: enable the controller's ECC engine if
78 * - pipelined + finish + ecc + write: do nothing
81 * - ondie + prepare + raw + read: send commands to disable the on-chip ECC
83 * - ondie + finish + raw + read: do nothing
84 * - ondie + prepare + raw + write: send commands to disable the on-chip ECC
86 * - ondie + finish + raw + write: do nothing
87 * - ondie + prepare + ecc + read: send commands to enable the on-chip ECC
88 * engine if deactivated
89 * - ondie + finish + ecc + read: send commands to check the status, report
91 * - ondie + prepare + ecc + write: send commands to enable the on-chip ECC
92 * engine if deactivated
93 * - ondie + finish + ecc + write: do nothing
96 #include <linux/module.h>
97 #include <linux/mtd/nand.h>
98 #include <linux/platform_device.h>
99 #include <linux/slab.h>
100 #include <linux/of.h>
101 #include <linux/of_platform.h>
103 static LIST_HEAD(on_host_hw_engines
);
104 static DEFINE_MUTEX(on_host_hw_engines_mutex
);
107 * nand_ecc_init_ctx - Init the ECC engine context
108 * @nand: the NAND device
110 * On success, the caller is responsible of calling @nand_ecc_cleanup_ctx().
112 int nand_ecc_init_ctx(struct nand_device
*nand
)
114 if (!nand
->ecc
.engine
|| !nand
->ecc
.engine
->ops
->init_ctx
)
117 return nand
->ecc
.engine
->ops
->init_ctx(nand
);
119 EXPORT_SYMBOL(nand_ecc_init_ctx
);
122 * nand_ecc_cleanup_ctx - Cleanup the ECC engine context
123 * @nand: the NAND device
125 void nand_ecc_cleanup_ctx(struct nand_device
*nand
)
127 if (nand
->ecc
.engine
&& nand
->ecc
.engine
->ops
->cleanup_ctx
)
128 nand
->ecc
.engine
->ops
->cleanup_ctx(nand
);
130 EXPORT_SYMBOL(nand_ecc_cleanup_ctx
);
133 * nand_ecc_prepare_io_req - Prepare an I/O request
134 * @nand: the NAND device
135 * @req: the I/O request
137 int nand_ecc_prepare_io_req(struct nand_device
*nand
,
138 struct nand_page_io_req
*req
)
140 if (!nand
->ecc
.engine
|| !nand
->ecc
.engine
->ops
->prepare_io_req
)
143 return nand
->ecc
.engine
->ops
->prepare_io_req(nand
, req
);
145 EXPORT_SYMBOL(nand_ecc_prepare_io_req
);
148 * nand_ecc_finish_io_req - Finish an I/O request
149 * @nand: the NAND device
150 * @req: the I/O request
152 int nand_ecc_finish_io_req(struct nand_device
*nand
,
153 struct nand_page_io_req
*req
)
155 if (!nand
->ecc
.engine
|| !nand
->ecc
.engine
->ops
->finish_io_req
)
158 return nand
->ecc
.engine
->ops
->finish_io_req(nand
, req
);
160 EXPORT_SYMBOL(nand_ecc_finish_io_req
);
162 /* Define default OOB placement schemes for large and small page devices */
163 static int nand_ooblayout_ecc_sp(struct mtd_info
*mtd
, int section
,
164 struct mtd_oob_region
*oobregion
)
166 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
167 unsigned int total_ecc_bytes
= nand
->ecc
.ctx
.total
;
173 oobregion
->offset
= 0;
174 if (mtd
->oobsize
== 16)
175 oobregion
->length
= 4;
177 oobregion
->length
= 3;
179 if (mtd
->oobsize
== 8)
182 oobregion
->offset
= 6;
183 oobregion
->length
= total_ecc_bytes
- 4;
189 static int nand_ooblayout_free_sp(struct mtd_info
*mtd
, int section
,
190 struct mtd_oob_region
*oobregion
)
195 if (mtd
->oobsize
== 16) {
199 oobregion
->length
= 8;
200 oobregion
->offset
= 8;
202 oobregion
->length
= 2;
204 oobregion
->offset
= 3;
206 oobregion
->offset
= 6;
212 static const struct mtd_ooblayout_ops nand_ooblayout_sp_ops
= {
213 .ecc
= nand_ooblayout_ecc_sp
,
214 .free
= nand_ooblayout_free_sp
,
217 const struct mtd_ooblayout_ops
*nand_get_small_page_ooblayout(void)
219 return &nand_ooblayout_sp_ops
;
221 EXPORT_SYMBOL_GPL(nand_get_small_page_ooblayout
);
223 static int nand_ooblayout_ecc_lp(struct mtd_info
*mtd
, int section
,
224 struct mtd_oob_region
*oobregion
)
226 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
227 unsigned int total_ecc_bytes
= nand
->ecc
.ctx
.total
;
229 if (section
|| !total_ecc_bytes
)
232 oobregion
->length
= total_ecc_bytes
;
233 oobregion
->offset
= mtd
->oobsize
- oobregion
->length
;
238 static int nand_ooblayout_free_lp(struct mtd_info
*mtd
, int section
,
239 struct mtd_oob_region
*oobregion
)
241 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
242 unsigned int total_ecc_bytes
= nand
->ecc
.ctx
.total
;
247 oobregion
->length
= mtd
->oobsize
- total_ecc_bytes
- 2;
248 oobregion
->offset
= 2;
253 static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops
= {
254 .ecc
= nand_ooblayout_ecc_lp
,
255 .free
= nand_ooblayout_free_lp
,
258 const struct mtd_ooblayout_ops
*nand_get_large_page_ooblayout(void)
260 return &nand_ooblayout_lp_ops
;
262 EXPORT_SYMBOL_GPL(nand_get_large_page_ooblayout
);
265 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
266 * are placed at a fixed offset.
268 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info
*mtd
, int section
,
269 struct mtd_oob_region
*oobregion
)
271 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
272 unsigned int total_ecc_bytes
= nand
->ecc
.ctx
.total
;
277 switch (mtd
->oobsize
) {
279 oobregion
->offset
= 40;
282 oobregion
->offset
= 80;
288 oobregion
->length
= total_ecc_bytes
;
289 if (oobregion
->offset
+ oobregion
->length
> mtd
->oobsize
)
295 static int nand_ooblayout_free_lp_hamming(struct mtd_info
*mtd
, int section
,
296 struct mtd_oob_region
*oobregion
)
298 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
299 unsigned int total_ecc_bytes
= nand
->ecc
.ctx
.total
;
302 if (section
< 0 || section
> 1)
305 switch (mtd
->oobsize
) {
317 oobregion
->offset
= 2;
318 oobregion
->length
= ecc_offset
- 2;
320 oobregion
->offset
= ecc_offset
+ total_ecc_bytes
;
321 oobregion
->length
= mtd
->oobsize
- oobregion
->offset
;
327 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops
= {
328 .ecc
= nand_ooblayout_ecc_lp_hamming
,
329 .free
= nand_ooblayout_free_lp_hamming
,
332 const struct mtd_ooblayout_ops
*nand_get_large_page_hamming_ooblayout(void)
334 return &nand_ooblayout_lp_hamming_ops
;
336 EXPORT_SYMBOL_GPL(nand_get_large_page_hamming_ooblayout
);
338 static enum nand_ecc_engine_type
339 of_get_nand_ecc_engine_type(struct device_node
*np
)
341 struct device_node
*eng_np
;
343 if (of_property_read_bool(np
, "nand-no-ecc-engine"))
344 return NAND_ECC_ENGINE_TYPE_NONE
;
346 if (of_property_read_bool(np
, "nand-use-soft-ecc-engine"))
347 return NAND_ECC_ENGINE_TYPE_SOFT
;
349 eng_np
= of_parse_phandle(np
, "nand-ecc-engine", 0);
354 return NAND_ECC_ENGINE_TYPE_ON_DIE
;
356 return NAND_ECC_ENGINE_TYPE_ON_HOST
;
359 return NAND_ECC_ENGINE_TYPE_INVALID
;
362 static const char * const nand_ecc_placement
[] = {
363 [NAND_ECC_PLACEMENT_OOB
] = "oob",
364 [NAND_ECC_PLACEMENT_INTERLEAVED
] = "interleaved",
367 static enum nand_ecc_placement
of_get_nand_ecc_placement(struct device_node
*np
)
369 enum nand_ecc_placement placement
;
373 err
= of_property_read_string(np
, "nand-ecc-placement", &pm
);
375 for (placement
= NAND_ECC_PLACEMENT_OOB
;
376 placement
< ARRAY_SIZE(nand_ecc_placement
); placement
++) {
377 if (!strcasecmp(pm
, nand_ecc_placement
[placement
]))
382 return NAND_ECC_PLACEMENT_UNKNOWN
;
385 static const char * const nand_ecc_algos
[] = {
386 [NAND_ECC_ALGO_HAMMING
] = "hamming",
387 [NAND_ECC_ALGO_BCH
] = "bch",
388 [NAND_ECC_ALGO_RS
] = "rs",
391 static enum nand_ecc_algo
of_get_nand_ecc_algo(struct device_node
*np
)
393 enum nand_ecc_algo ecc_algo
;
397 err
= of_property_read_string(np
, "nand-ecc-algo", &pm
);
399 for (ecc_algo
= NAND_ECC_ALGO_HAMMING
;
400 ecc_algo
< ARRAY_SIZE(nand_ecc_algos
);
402 if (!strcasecmp(pm
, nand_ecc_algos
[ecc_algo
]))
407 return NAND_ECC_ALGO_UNKNOWN
;
410 static int of_get_nand_ecc_step_size(struct device_node
*np
)
415 ret
= of_property_read_u32(np
, "nand-ecc-step-size", &val
);
416 return ret
? ret
: val
;
419 static int of_get_nand_ecc_strength(struct device_node
*np
)
424 ret
= of_property_read_u32(np
, "nand-ecc-strength", &val
);
425 return ret
? ret
: val
;
428 void of_get_nand_ecc_user_config(struct nand_device
*nand
)
430 struct device_node
*dn
= nanddev_get_of_node(nand
);
433 nand
->ecc
.user_conf
.engine_type
= of_get_nand_ecc_engine_type(dn
);
434 nand
->ecc
.user_conf
.algo
= of_get_nand_ecc_algo(dn
);
435 nand
->ecc
.user_conf
.placement
= of_get_nand_ecc_placement(dn
);
437 strength
= of_get_nand_ecc_strength(dn
);
439 nand
->ecc
.user_conf
.strength
= strength
;
441 size
= of_get_nand_ecc_step_size(dn
);
443 nand
->ecc
.user_conf
.step_size
= size
;
445 if (of_property_read_bool(dn
, "nand-ecc-maximize"))
446 nand
->ecc
.user_conf
.flags
|= NAND_ECC_MAXIMIZE_STRENGTH
;
448 EXPORT_SYMBOL(of_get_nand_ecc_user_config
);
451 * nand_ecc_is_strong_enough - Check if the chip configuration meets the
452 * datasheet requirements.
454 * @nand: Device to check
456 * If our configuration corrects A bits per B bytes and the minimum
457 * required correction level is X bits per Y bytes, then we must ensure
458 * both of the following are true:
463 * Requirement (1) ensures we can correct for the required bitflip density.
464 * Requirement (2) ensures we can correct even when all bitflips are clumped
465 * in the same sector.
467 bool nand_ecc_is_strong_enough(struct nand_device
*nand
)
469 const struct nand_ecc_props
*reqs
= nanddev_get_ecc_requirements(nand
);
470 const struct nand_ecc_props
*conf
= nanddev_get_ecc_conf(nand
);
471 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
474 if (conf
->step_size
== 0 || reqs
->step_size
== 0)
475 /* Not enough information */
479 * We get the number of corrected bits per page to compare
480 * the correction density.
482 corr
= (mtd
->writesize
* conf
->strength
) / conf
->step_size
;
483 ds_corr
= (mtd
->writesize
* reqs
->strength
) / reqs
->step_size
;
485 return corr
>= ds_corr
&& conf
->strength
>= reqs
->strength
;
487 EXPORT_SYMBOL(nand_ecc_is_strong_enough
);
489 /* ECC engine driver internal helpers */
490 int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx
*ctx
,
491 struct nand_device
*nand
)
493 unsigned int total_buffer_size
;
497 /* Let the user decide the exact length of each buffer */
498 if (!ctx
->page_buffer_size
)
499 ctx
->page_buffer_size
= nanddev_page_size(nand
);
500 if (!ctx
->oob_buffer_size
)
501 ctx
->oob_buffer_size
= nanddev_per_page_oobsize(nand
);
503 total_buffer_size
= ctx
->page_buffer_size
+ ctx
->oob_buffer_size
;
505 ctx
->spare_databuf
= kzalloc(total_buffer_size
, GFP_KERNEL
);
506 if (!ctx
->spare_databuf
)
509 ctx
->spare_oobbuf
= ctx
->spare_databuf
+ ctx
->page_buffer_size
;
513 EXPORT_SYMBOL_GPL(nand_ecc_init_req_tweaking
);
515 void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx
*ctx
)
517 kfree(ctx
->spare_databuf
);
519 EXPORT_SYMBOL_GPL(nand_ecc_cleanup_req_tweaking
);
522 * Ensure data and OOB area is fully read/written otherwise the correction might
523 * not work as expected.
525 void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx
*ctx
,
526 struct nand_page_io_req
*req
)
528 struct nand_device
*nand
= ctx
->nand
;
529 struct nand_page_io_req
*orig
, *tweak
;
531 /* Save the original request */
532 ctx
->orig_req
= *req
;
533 ctx
->bounce_data
= false;
534 ctx
->bounce_oob
= false;
535 orig
= &ctx
->orig_req
;
538 /* Ensure the request covers the entire page */
539 if (orig
->datalen
< nanddev_page_size(nand
)) {
540 ctx
->bounce_data
= true;
542 tweak
->datalen
= nanddev_page_size(nand
);
543 tweak
->databuf
.in
= ctx
->spare_databuf
;
544 memset(tweak
->databuf
.in
, 0xFF, ctx
->page_buffer_size
);
547 if (orig
->ooblen
< nanddev_per_page_oobsize(nand
)) {
548 ctx
->bounce_oob
= true;
550 tweak
->ooblen
= nanddev_per_page_oobsize(nand
);
551 tweak
->oobbuf
.in
= ctx
->spare_oobbuf
;
552 memset(tweak
->oobbuf
.in
, 0xFF, ctx
->oob_buffer_size
);
555 /* Copy the data that must be writen in the bounce buffers, if needed */
556 if (orig
->type
== NAND_PAGE_WRITE
) {
557 if (ctx
->bounce_data
)
558 memcpy((void *)tweak
->databuf
.out
+ orig
->dataoffs
,
559 orig
->databuf
.out
, orig
->datalen
);
562 memcpy((void *)tweak
->oobbuf
.out
+ orig
->ooboffs
,
563 orig
->oobbuf
.out
, orig
->ooblen
);
566 EXPORT_SYMBOL_GPL(nand_ecc_tweak_req
);
568 void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx
*ctx
,
569 struct nand_page_io_req
*req
)
571 struct nand_page_io_req
*orig
, *tweak
;
573 orig
= &ctx
->orig_req
;
576 /* Restore the data read from the bounce buffers, if needed */
577 if (orig
->type
== NAND_PAGE_READ
) {
578 if (ctx
->bounce_data
)
579 memcpy(orig
->databuf
.in
,
580 tweak
->databuf
.in
+ orig
->dataoffs
,
584 memcpy(orig
->oobbuf
.in
,
585 tweak
->oobbuf
.in
+ orig
->ooboffs
,
589 /* Ensure the original request is restored */
592 EXPORT_SYMBOL_GPL(nand_ecc_restore_req
);
594 struct nand_ecc_engine
*nand_ecc_get_sw_engine(struct nand_device
*nand
)
596 unsigned int algo
= nand
->ecc
.user_conf
.algo
;
598 if (algo
== NAND_ECC_ALGO_UNKNOWN
)
599 algo
= nand
->ecc
.defaults
.algo
;
602 case NAND_ECC_ALGO_HAMMING
:
603 return nand_ecc_sw_hamming_get_engine();
604 case NAND_ECC_ALGO_BCH
:
605 return nand_ecc_sw_bch_get_engine();
612 EXPORT_SYMBOL(nand_ecc_get_sw_engine
);
614 struct nand_ecc_engine
*nand_ecc_get_on_die_hw_engine(struct nand_device
*nand
)
616 return nand
->ecc
.ondie_engine
;
618 EXPORT_SYMBOL(nand_ecc_get_on_die_hw_engine
);
620 int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine
*engine
)
622 struct nand_ecc_engine
*item
;
627 /* Prevent multiple registrations of one engine */
628 list_for_each_entry(item
, &on_host_hw_engines
, node
)
632 mutex_lock(&on_host_hw_engines_mutex
);
633 list_add_tail(&engine
->node
, &on_host_hw_engines
);
634 mutex_unlock(&on_host_hw_engines_mutex
);
638 EXPORT_SYMBOL(nand_ecc_register_on_host_hw_engine
);
640 int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine
*engine
)
645 mutex_lock(&on_host_hw_engines_mutex
);
646 list_del(&engine
->node
);
647 mutex_unlock(&on_host_hw_engines_mutex
);
651 EXPORT_SYMBOL(nand_ecc_unregister_on_host_hw_engine
);
653 static struct nand_ecc_engine
*nand_ecc_match_on_host_hw_engine(struct device
*dev
)
655 struct nand_ecc_engine
*item
;
657 list_for_each_entry(item
, &on_host_hw_engines
, node
)
658 if (item
->dev
== dev
)
664 struct nand_ecc_engine
*nand_ecc_get_on_host_hw_engine(struct nand_device
*nand
)
666 struct nand_ecc_engine
*engine
= NULL
;
667 struct device
*dev
= &nand
->mtd
.dev
;
668 struct platform_device
*pdev
;
669 struct device_node
*np
;
671 if (list_empty(&on_host_hw_engines
))
674 /* Check for an explicit nand-ecc-engine property */
675 np
= of_parse_phandle(dev
->of_node
, "nand-ecc-engine", 0);
677 pdev
= of_find_device_by_node(np
);
679 return ERR_PTR(-EPROBE_DEFER
);
681 engine
= nand_ecc_match_on_host_hw_engine(&pdev
->dev
);
682 platform_device_put(pdev
);
686 return ERR_PTR(-EPROBE_DEFER
);
690 get_device(engine
->dev
);
694 EXPORT_SYMBOL(nand_ecc_get_on_host_hw_engine
);
696 void nand_ecc_put_on_host_hw_engine(struct nand_device
*nand
)
698 put_device(nand
->ecc
.engine
->dev
);
700 EXPORT_SYMBOL(nand_ecc_put_on_host_hw_engine
);
703 * In the case of a pipelined engine, the device registering the ECC
704 * engine is not necessarily the ECC engine itself but may be a host controller.
705 * It is then useful to provide a helper to retrieve the right device object
706 * which actually represents the ECC engine.
708 struct device
*nand_ecc_get_engine_dev(struct device
*host
)
710 struct platform_device
*ecc_pdev
;
711 struct device_node
*np
;
714 * If the device node contains this property, it means we need to follow
715 * it in order to get the right ECC engine device we are looking for.
717 np
= of_parse_phandle(host
->of_node
, "nand-ecc-engine", 0);
721 ecc_pdev
= of_find_device_by_node(np
);
727 platform_device_put(ecc_pdev
);
730 return &ecc_pdev
->dev
;
733 MODULE_LICENSE("GPL");
734 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
735 MODULE_DESCRIPTION("Generic ECC engine");