Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / mtd / nand / ecc.c
blob6c43dfda01d4ddd7094414ebba8d39cebffdf327
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Generic Error-Correcting Code (ECC) engine
5 * Copyright (C) 2019 Macronix
6 * Author:
7 * Miquèl RAYNAL <miquel.raynal@bootlin.com>
10 * This file describes the abstraction of any NAND ECC engine. It has been
11 * designed to fit most cases, including parallel NANDs and SPI-NANDs.
13 * There are three main situations where instantiating this ECC engine makes
14 * sense:
15 * - external: The ECC engine is outside the NAND pipeline, typically this
16 * is a software ECC engine, or an hardware engine that is
17 * outside the NAND controller pipeline.
18 * - pipelined: The ECC engine is inside the NAND pipeline, ie. on the
19 * controller's side. This is the case of most of the raw NAND
20 * controllers. In the pipeline case, the ECC bytes are
21 * generated/data corrected on the fly when a page is
22 * written/read.
23 * - ondie: The ECC engine is inside the NAND pipeline, on the chip's side.
24 * Some NAND chips can correct themselves the data.
26 * Besides the initial setup and final cleanups, the interfaces are rather
27 * simple:
28 * - prepare: Prepare an I/O request. Enable/disable the ECC engine based on
29 * the I/O request type. In case of software correction or external
30 * engine, this step may involve to derive the ECC bytes and place
31 * them in the OOB area before a write.
32 * - finish: Finish an I/O request. Correct the data in case of a read
33 * request and report the number of corrected bits/uncorrectable
34 * errors. Most likely empty for write operations, unless you have
35 * hardware specific stuff to do, like shutting down the engine to
36 * save power.
38 * The I/O request should be enclosed in a prepare()/finish() pair of calls
39 * and will behave differently depending on the requested I/O type:
40 * - raw: Correction disabled
41 * - ecc: Correction enabled
43 * The request direction is impacting the logic as well:
44 * - read: Load data from the NAND chip
45 * - write: Store data in the NAND chip
47 * Mixing all this combinations together gives the following behavior.
48 * Those are just examples, drivers are free to add custom steps in their
49 * prepare/finish hook.
51 * [external ECC engine]
52 * - external + prepare + raw + read: do nothing
53 * - external + finish + raw + read: do nothing
54 * - external + prepare + raw + write: do nothing
55 * - external + finish + raw + write: do nothing
56 * - external + prepare + ecc + read: do nothing
57 * - external + finish + ecc + read: calculate expected ECC bytes, extract
58 * ECC bytes from OOB buffer, correct
59 * and report any bitflip/error
60 * - external + prepare + ecc + write: calculate ECC bytes and store them at
61 * the right place in the OOB buffer based
62 * on the OOB layout
63 * - external + finish + ecc + write: do nothing
65 * [pipelined ECC engine]
66 * - pipelined + prepare + raw + read: disable the controller's ECC engine if
67 * activated
68 * - pipelined + finish + raw + read: do nothing
69 * - pipelined + prepare + raw + write: disable the controller's ECC engine if
70 * activated
71 * - pipelined + finish + raw + write: do nothing
72 * - pipelined + prepare + ecc + read: enable the controller's ECC engine if
73 * deactivated
74 * - pipelined + finish + ecc + read: check the status, report any
75 * error/bitflip
76 * - pipelined + prepare + ecc + write: enable the controller's ECC engine if
77 * deactivated
78 * - pipelined + finish + ecc + write: do nothing
80 * [ondie ECC engine]
81 * - ondie + prepare + raw + read: send commands to disable the on-chip ECC
82 * engine if activated
83 * - ondie + finish + raw + read: do nothing
84 * - ondie + prepare + raw + write: send commands to disable the on-chip ECC
85 * engine if activated
86 * - ondie + finish + raw + write: do nothing
87 * - ondie + prepare + ecc + read: send commands to enable the on-chip ECC
88 * engine if deactivated
89 * - ondie + finish + ecc + read: send commands to check the status, report
90 * any error/bitflip
91 * - ondie + prepare + ecc + write: send commands to enable the on-chip ECC
92 * engine if deactivated
93 * - ondie + finish + ecc + write: do nothing
96 #include <linux/module.h>
97 #include <linux/mtd/nand.h>
98 #include <linux/slab.h>
101 * nand_ecc_init_ctx - Init the ECC engine context
102 * @nand: the NAND device
104 * On success, the caller is responsible of calling @nand_ecc_cleanup_ctx().
106 int nand_ecc_init_ctx(struct nand_device *nand)
108 if (!nand->ecc.engine || !nand->ecc.engine->ops->init_ctx)
109 return 0;
111 return nand->ecc.engine->ops->init_ctx(nand);
113 EXPORT_SYMBOL(nand_ecc_init_ctx);
116 * nand_ecc_cleanup_ctx - Cleanup the ECC engine context
117 * @nand: the NAND device
119 void nand_ecc_cleanup_ctx(struct nand_device *nand)
121 if (nand->ecc.engine && nand->ecc.engine->ops->cleanup_ctx)
122 nand->ecc.engine->ops->cleanup_ctx(nand);
124 EXPORT_SYMBOL(nand_ecc_cleanup_ctx);
127 * nand_ecc_prepare_io_req - Prepare an I/O request
128 * @nand: the NAND device
129 * @req: the I/O request
131 int nand_ecc_prepare_io_req(struct nand_device *nand,
132 struct nand_page_io_req *req)
134 if (!nand->ecc.engine || !nand->ecc.engine->ops->prepare_io_req)
135 return 0;
137 return nand->ecc.engine->ops->prepare_io_req(nand, req);
139 EXPORT_SYMBOL(nand_ecc_prepare_io_req);
142 * nand_ecc_finish_io_req - Finish an I/O request
143 * @nand: the NAND device
144 * @req: the I/O request
146 int nand_ecc_finish_io_req(struct nand_device *nand,
147 struct nand_page_io_req *req)
149 if (!nand->ecc.engine || !nand->ecc.engine->ops->finish_io_req)
150 return 0;
152 return nand->ecc.engine->ops->finish_io_req(nand, req);
154 EXPORT_SYMBOL(nand_ecc_finish_io_req);
156 /* Define default OOB placement schemes for large and small page devices */
157 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
158 struct mtd_oob_region *oobregion)
160 struct nand_device *nand = mtd_to_nanddev(mtd);
161 unsigned int total_ecc_bytes = nand->ecc.ctx.total;
163 if (section > 1)
164 return -ERANGE;
166 if (!section) {
167 oobregion->offset = 0;
168 if (mtd->oobsize == 16)
169 oobregion->length = 4;
170 else
171 oobregion->length = 3;
172 } else {
173 if (mtd->oobsize == 8)
174 return -ERANGE;
176 oobregion->offset = 6;
177 oobregion->length = total_ecc_bytes - 4;
180 return 0;
183 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
184 struct mtd_oob_region *oobregion)
186 if (section > 1)
187 return -ERANGE;
189 if (mtd->oobsize == 16) {
190 if (section)
191 return -ERANGE;
193 oobregion->length = 8;
194 oobregion->offset = 8;
195 } else {
196 oobregion->length = 2;
197 if (!section)
198 oobregion->offset = 3;
199 else
200 oobregion->offset = 6;
203 return 0;
206 static const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
207 .ecc = nand_ooblayout_ecc_sp,
208 .free = nand_ooblayout_free_sp,
211 const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void)
213 return &nand_ooblayout_sp_ops;
215 EXPORT_SYMBOL_GPL(nand_get_small_page_ooblayout);
217 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
218 struct mtd_oob_region *oobregion)
220 struct nand_device *nand = mtd_to_nanddev(mtd);
221 unsigned int total_ecc_bytes = nand->ecc.ctx.total;
223 if (section || !total_ecc_bytes)
224 return -ERANGE;
226 oobregion->length = total_ecc_bytes;
227 oobregion->offset = mtd->oobsize - oobregion->length;
229 return 0;
232 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
233 struct mtd_oob_region *oobregion)
235 struct nand_device *nand = mtd_to_nanddev(mtd);
236 unsigned int total_ecc_bytes = nand->ecc.ctx.total;
238 if (section)
239 return -ERANGE;
241 oobregion->length = mtd->oobsize - total_ecc_bytes - 2;
242 oobregion->offset = 2;
244 return 0;
247 static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
248 .ecc = nand_ooblayout_ecc_lp,
249 .free = nand_ooblayout_free_lp,
252 const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void)
254 return &nand_ooblayout_lp_ops;
256 EXPORT_SYMBOL_GPL(nand_get_large_page_ooblayout);
259 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
260 * are placed at a fixed offset.
262 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
263 struct mtd_oob_region *oobregion)
265 struct nand_device *nand = mtd_to_nanddev(mtd);
266 unsigned int total_ecc_bytes = nand->ecc.ctx.total;
268 if (section)
269 return -ERANGE;
271 switch (mtd->oobsize) {
272 case 64:
273 oobregion->offset = 40;
274 break;
275 case 128:
276 oobregion->offset = 80;
277 break;
278 default:
279 return -EINVAL;
282 oobregion->length = total_ecc_bytes;
283 if (oobregion->offset + oobregion->length > mtd->oobsize)
284 return -ERANGE;
286 return 0;
289 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
290 struct mtd_oob_region *oobregion)
292 struct nand_device *nand = mtd_to_nanddev(mtd);
293 unsigned int total_ecc_bytes = nand->ecc.ctx.total;
294 int ecc_offset = 0;
296 if (section < 0 || section > 1)
297 return -ERANGE;
299 switch (mtd->oobsize) {
300 case 64:
301 ecc_offset = 40;
302 break;
303 case 128:
304 ecc_offset = 80;
305 break;
306 default:
307 return -EINVAL;
310 if (section == 0) {
311 oobregion->offset = 2;
312 oobregion->length = ecc_offset - 2;
313 } else {
314 oobregion->offset = ecc_offset + total_ecc_bytes;
315 oobregion->length = mtd->oobsize - oobregion->offset;
318 return 0;
321 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
322 .ecc = nand_ooblayout_ecc_lp_hamming,
323 .free = nand_ooblayout_free_lp_hamming,
326 const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void)
328 return &nand_ooblayout_lp_hamming_ops;
330 EXPORT_SYMBOL_GPL(nand_get_large_page_hamming_ooblayout);
332 static enum nand_ecc_engine_type
333 of_get_nand_ecc_engine_type(struct device_node *np)
335 struct device_node *eng_np;
337 if (of_property_read_bool(np, "nand-no-ecc-engine"))
338 return NAND_ECC_ENGINE_TYPE_NONE;
340 if (of_property_read_bool(np, "nand-use-soft-ecc-engine"))
341 return NAND_ECC_ENGINE_TYPE_SOFT;
343 eng_np = of_parse_phandle(np, "nand-ecc-engine", 0);
344 of_node_put(eng_np);
346 if (eng_np) {
347 if (eng_np == np)
348 return NAND_ECC_ENGINE_TYPE_ON_DIE;
349 else
350 return NAND_ECC_ENGINE_TYPE_ON_HOST;
353 return NAND_ECC_ENGINE_TYPE_INVALID;
356 static const char * const nand_ecc_placement[] = {
357 [NAND_ECC_PLACEMENT_OOB] = "oob",
358 [NAND_ECC_PLACEMENT_INTERLEAVED] = "interleaved",
361 static enum nand_ecc_placement of_get_nand_ecc_placement(struct device_node *np)
363 enum nand_ecc_placement placement;
364 const char *pm;
365 int err;
367 err = of_property_read_string(np, "nand-ecc-placement", &pm);
368 if (!err) {
369 for (placement = NAND_ECC_PLACEMENT_OOB;
370 placement < ARRAY_SIZE(nand_ecc_placement); placement++) {
371 if (!strcasecmp(pm, nand_ecc_placement[placement]))
372 return placement;
376 return NAND_ECC_PLACEMENT_UNKNOWN;
379 static const char * const nand_ecc_algos[] = {
380 [NAND_ECC_ALGO_HAMMING] = "hamming",
381 [NAND_ECC_ALGO_BCH] = "bch",
382 [NAND_ECC_ALGO_RS] = "rs",
385 static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
387 enum nand_ecc_algo ecc_algo;
388 const char *pm;
389 int err;
391 err = of_property_read_string(np, "nand-ecc-algo", &pm);
392 if (!err) {
393 for (ecc_algo = NAND_ECC_ALGO_HAMMING;
394 ecc_algo < ARRAY_SIZE(nand_ecc_algos);
395 ecc_algo++) {
396 if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
397 return ecc_algo;
401 return NAND_ECC_ALGO_UNKNOWN;
404 static int of_get_nand_ecc_step_size(struct device_node *np)
406 int ret;
407 u32 val;
409 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
410 return ret ? ret : val;
413 static int of_get_nand_ecc_strength(struct device_node *np)
415 int ret;
416 u32 val;
418 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
419 return ret ? ret : val;
422 void of_get_nand_ecc_user_config(struct nand_device *nand)
424 struct device_node *dn = nanddev_get_of_node(nand);
425 int strength, size;
427 nand->ecc.user_conf.engine_type = of_get_nand_ecc_engine_type(dn);
428 nand->ecc.user_conf.algo = of_get_nand_ecc_algo(dn);
429 nand->ecc.user_conf.placement = of_get_nand_ecc_placement(dn);
431 strength = of_get_nand_ecc_strength(dn);
432 if (strength >= 0)
433 nand->ecc.user_conf.strength = strength;
435 size = of_get_nand_ecc_step_size(dn);
436 if (size >= 0)
437 nand->ecc.user_conf.step_size = size;
439 if (of_property_read_bool(dn, "nand-ecc-maximize"))
440 nand->ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
442 EXPORT_SYMBOL(of_get_nand_ecc_user_config);
445 * nand_ecc_is_strong_enough - Check if the chip configuration meets the
446 * datasheet requirements.
448 * @nand: Device to check
450 * If our configuration corrects A bits per B bytes and the minimum
451 * required correction level is X bits per Y bytes, then we must ensure
452 * both of the following are true:
454 * (1) A / B >= X / Y
455 * (2) A >= X
457 * Requirement (1) ensures we can correct for the required bitflip density.
458 * Requirement (2) ensures we can correct even when all bitflips are clumped
459 * in the same sector.
461 bool nand_ecc_is_strong_enough(struct nand_device *nand)
463 const struct nand_ecc_props *reqs = nanddev_get_ecc_requirements(nand);
464 const struct nand_ecc_props *conf = nanddev_get_ecc_conf(nand);
465 struct mtd_info *mtd = nanddev_to_mtd(nand);
466 int corr, ds_corr;
468 if (conf->step_size == 0 || reqs->step_size == 0)
469 /* Not enough information */
470 return true;
473 * We get the number of corrected bits per page to compare
474 * the correction density.
476 corr = (mtd->writesize * conf->strength) / conf->step_size;
477 ds_corr = (mtd->writesize * reqs->strength) / reqs->step_size;
479 return corr >= ds_corr && conf->strength >= reqs->strength;
481 EXPORT_SYMBOL(nand_ecc_is_strong_enough);
483 /* ECC engine driver internal helpers */
484 int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
485 struct nand_device *nand)
487 unsigned int total_buffer_size;
489 ctx->nand = nand;
491 /* Let the user decide the exact length of each buffer */
492 if (!ctx->page_buffer_size)
493 ctx->page_buffer_size = nanddev_page_size(nand);
494 if (!ctx->oob_buffer_size)
495 ctx->oob_buffer_size = nanddev_per_page_oobsize(nand);
497 total_buffer_size = ctx->page_buffer_size + ctx->oob_buffer_size;
499 ctx->spare_databuf = kzalloc(total_buffer_size, GFP_KERNEL);
500 if (!ctx->spare_databuf)
501 return -ENOMEM;
503 ctx->spare_oobbuf = ctx->spare_databuf + ctx->page_buffer_size;
505 return 0;
507 EXPORT_SYMBOL_GPL(nand_ecc_init_req_tweaking);
509 void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx)
511 kfree(ctx->spare_databuf);
513 EXPORT_SYMBOL_GPL(nand_ecc_cleanup_req_tweaking);
516 * Ensure data and OOB area is fully read/written otherwise the correction might
517 * not work as expected.
519 void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
520 struct nand_page_io_req *req)
522 struct nand_device *nand = ctx->nand;
523 struct nand_page_io_req *orig, *tweak;
525 /* Save the original request */
526 ctx->orig_req = *req;
527 ctx->bounce_data = false;
528 ctx->bounce_oob = false;
529 orig = &ctx->orig_req;
530 tweak = req;
532 /* Ensure the request covers the entire page */
533 if (orig->datalen < nanddev_page_size(nand)) {
534 ctx->bounce_data = true;
535 tweak->dataoffs = 0;
536 tweak->datalen = nanddev_page_size(nand);
537 tweak->databuf.in = ctx->spare_databuf;
538 memset(tweak->databuf.in, 0xFF, ctx->page_buffer_size);
541 if (orig->ooblen < nanddev_per_page_oobsize(nand)) {
542 ctx->bounce_oob = true;
543 tweak->ooboffs = 0;
544 tweak->ooblen = nanddev_per_page_oobsize(nand);
545 tweak->oobbuf.in = ctx->spare_oobbuf;
546 memset(tweak->oobbuf.in, 0xFF, ctx->oob_buffer_size);
549 /* Copy the data that must be writen in the bounce buffers, if needed */
550 if (orig->type == NAND_PAGE_WRITE) {
551 if (ctx->bounce_data)
552 memcpy((void *)tweak->databuf.out + orig->dataoffs,
553 orig->databuf.out, orig->datalen);
555 if (ctx->bounce_oob)
556 memcpy((void *)tweak->oobbuf.out + orig->ooboffs,
557 orig->oobbuf.out, orig->ooblen);
560 EXPORT_SYMBOL_GPL(nand_ecc_tweak_req);
562 void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
563 struct nand_page_io_req *req)
565 struct nand_page_io_req *orig, *tweak;
567 orig = &ctx->orig_req;
568 tweak = req;
570 /* Restore the data read from the bounce buffers, if needed */
571 if (orig->type == NAND_PAGE_READ) {
572 if (ctx->bounce_data)
573 memcpy(orig->databuf.in,
574 tweak->databuf.in + orig->dataoffs,
575 orig->datalen);
577 if (ctx->bounce_oob)
578 memcpy(orig->oobbuf.in,
579 tweak->oobbuf.in + orig->ooboffs,
580 orig->ooblen);
583 /* Ensure the original request is restored */
584 *req = *orig;
586 EXPORT_SYMBOL_GPL(nand_ecc_restore_req);
588 struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand)
590 unsigned int algo = nand->ecc.user_conf.algo;
592 if (algo == NAND_ECC_ALGO_UNKNOWN)
593 algo = nand->ecc.defaults.algo;
595 switch (algo) {
596 case NAND_ECC_ALGO_HAMMING:
597 return nand_ecc_sw_hamming_get_engine();
598 case NAND_ECC_ALGO_BCH:
599 return nand_ecc_sw_bch_get_engine();
600 default:
601 break;
604 return NULL;
606 EXPORT_SYMBOL(nand_ecc_get_sw_engine);
608 struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand)
610 return nand->ecc.ondie_engine;
612 EXPORT_SYMBOL(nand_ecc_get_on_die_hw_engine);
614 MODULE_LICENSE("GPL");
615 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
616 MODULE_DESCRIPTION("Generic ECC engine");