1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * This file contains an ECC algorithm that detects and corrects 1 bit
4 * errors in a 256 byte block of data.
6 * Copyright © 2008 Koninklijke Philips Electronics NV.
7 * Author: Frans Meulenbroeks
9 * Completely replaces the previous ECC implementation which was written by:
10 * Steven J. Hill (sjhill@realitydiluted.com)
11 * Thomas Gleixner (tglx@linutronix.de)
13 * Information on how this algorithm works and how it was developed
14 * can be found in Documentation/driver-api/mtd/nand_ecc.rst
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/mtd/nand-ecc-sw-hamming.h>
22 #include <linux/slab.h>
23 #include <asm/byteorder.h>
26 * invparity is a 256 byte table that contains the odd parity
27 * for each byte. So if the number of bits in a byte is even,
28 * the array element is 1, and when the number of bits is odd
29 * the array eleemnt is 0.
31 static const char invparity
[256] = {
32 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
33 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
34 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
35 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
36 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
37 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
38 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
39 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
40 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
41 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
42 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
43 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
44 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
45 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
46 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
47 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
51 * bitsperbyte contains the number of bits per byte
52 * this is only used for testing and repairing parity
53 * (a precalculated value slightly improves performance)
55 static const char bitsperbyte
[256] = {
56 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
57 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
58 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
59 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
60 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
61 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
62 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
63 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
64 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
65 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
66 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
67 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
68 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
69 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
70 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
71 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
75 * addressbits is a lookup table to filter out the bits from the xor-ed
76 * ECC data that identify the faulty location.
77 * this is only used for repairing parity
78 * see the comments in nand_ecc_sw_hamming_correct for more details
80 static const char addressbits
[256] = {
81 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
82 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
83 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
84 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
85 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
86 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
87 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
88 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
89 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
90 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
91 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
92 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
93 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
94 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
95 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
96 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
97 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
98 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
99 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
100 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
101 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
102 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
103 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
104 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
105 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
106 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
107 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
108 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
109 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
110 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
111 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
112 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
115 int ecc_sw_hamming_calculate(const unsigned char *buf
, unsigned int step_size
,
116 unsigned char *code
, bool sm_order
)
118 const u32
*bp
= (uint32_t *)buf
;
119 const u32 eccsize_mult
= (step_size
== 256) ? 1 : 2;
120 /* current value in buffer */
122 /* rp0..rp17 are the various accumulated parities (per byte) */
123 u32 rp0
, rp1
, rp2
, rp3
, rp4
, rp5
, rp6
, rp7
, rp8
, rp9
, rp10
, rp11
, rp12
,
124 rp13
, rp14
, rp15
, rp16
, rp17
;
125 /* Cumulative parity for all data */
127 /* Cumulative parity at the end of the loop (rp12, rp14, rp16) */
142 * The loop is unrolled a number of times;
143 * This avoids if statements to decide on which rp value to update
144 * Also we process the data by longwords.
145 * Note: passing unaligned data might give a performance penalty.
146 * It is assumed that the buffers are aligned.
147 * tmppar is the cumulative sum of this iteration.
148 * needed for calculating rp12, rp14, rp16 and par
149 * also used as a performance improvement for rp6, rp8 and rp10
151 for (i
= 0; i
< eccsize_mult
<< 2; i
++) {
214 if (eccsize_mult
== 2 && (i
& 0x4) == 0)
219 * handle the fact that we use longword operations
220 * we'll bring rp4..rp14..rp16 back to single byte entities by
221 * shifting and xoring first fold the upper and lower 16 bits,
222 * then the upper and lower 8 bits.
233 rp10
^= (rp10
>> 16);
236 rp12
^= (rp12
>> 16);
239 rp14
^= (rp14
>> 16);
242 if (eccsize_mult
== 2) {
243 rp16
^= (rp16
>> 16);
249 * we also need to calculate the row parity for rp0..rp3
250 * This is present in par, because par is now
251 * rp3 rp3 rp2 rp2 in little endian and
252 * rp2 rp2 rp3 rp3 in big endian
254 * rp1 rp0 rp1 rp0 in little endian and
255 * rp0 rp1 rp0 rp1 in big endian
256 * First calculate rp2 and rp3
274 /* reduce par to 16 bits then calculate rp1 and rp0 */
277 rp0
= (par
>> 8) & 0xff;
280 rp1
= (par
>> 8) & 0xff;
284 /* finally reduce par to 8 bits */
289 * and calculate rp5..rp15..rp17
290 * note that par = rp4 ^ rp5 and due to the commutative property
291 * of the ^ operator we can say:
293 * The & 0xff seems superfluous, but benchmarking learned that
294 * leaving it out gives slightly worse results. No idea why, probably
295 * it has to do with the way the pipeline in pentium is organized.
297 rp5
= (par
^ rp4
) & 0xff;
298 rp7
= (par
^ rp6
) & 0xff;
299 rp9
= (par
^ rp8
) & 0xff;
300 rp11
= (par
^ rp10
) & 0xff;
301 rp13
= (par
^ rp12
) & 0xff;
302 rp15
= (par
^ rp14
) & 0xff;
303 if (eccsize_mult
== 2)
304 rp17
= (par
^ rp16
) & 0xff;
307 * Finally calculate the ECC bits.
308 * Again here it might seem that there are performance optimisations
309 * possible, but benchmarks showed that on the system this is developed
310 * the code below is the fastest
313 code
[0] = (invparity
[rp7
] << 7) | (invparity
[rp6
] << 6) |
314 (invparity
[rp5
] << 5) | (invparity
[rp4
] << 4) |
315 (invparity
[rp3
] << 3) | (invparity
[rp2
] << 2) |
316 (invparity
[rp1
] << 1) | (invparity
[rp0
]);
317 code
[1] = (invparity
[rp15
] << 7) | (invparity
[rp14
] << 6) |
318 (invparity
[rp13
] << 5) | (invparity
[rp12
] << 4) |
319 (invparity
[rp11
] << 3) | (invparity
[rp10
] << 2) |
320 (invparity
[rp9
] << 1) | (invparity
[rp8
]);
322 code
[1] = (invparity
[rp7
] << 7) | (invparity
[rp6
] << 6) |
323 (invparity
[rp5
] << 5) | (invparity
[rp4
] << 4) |
324 (invparity
[rp3
] << 3) | (invparity
[rp2
] << 2) |
325 (invparity
[rp1
] << 1) | (invparity
[rp0
]);
326 code
[0] = (invparity
[rp15
] << 7) | (invparity
[rp14
] << 6) |
327 (invparity
[rp13
] << 5) | (invparity
[rp12
] << 4) |
328 (invparity
[rp11
] << 3) | (invparity
[rp10
] << 2) |
329 (invparity
[rp9
] << 1) | (invparity
[rp8
]);
332 if (eccsize_mult
== 1)
334 (invparity
[par
& 0xf0] << 7) |
335 (invparity
[par
& 0x0f] << 6) |
336 (invparity
[par
& 0xcc] << 5) |
337 (invparity
[par
& 0x33] << 4) |
338 (invparity
[par
& 0xaa] << 3) |
339 (invparity
[par
& 0x55] << 2) |
343 (invparity
[par
& 0xf0] << 7) |
344 (invparity
[par
& 0x0f] << 6) |
345 (invparity
[par
& 0xcc] << 5) |
346 (invparity
[par
& 0x33] << 4) |
347 (invparity
[par
& 0xaa] << 3) |
348 (invparity
[par
& 0x55] << 2) |
349 (invparity
[rp17
] << 1) |
350 (invparity
[rp16
] << 0);
354 EXPORT_SYMBOL(ecc_sw_hamming_calculate
);
357 * nand_ecc_sw_hamming_calculate - Calculate 3-byte ECC for 256/512-byte block
359 * @buf: Input buffer with raw data
360 * @code: Output buffer with ECC
362 int nand_ecc_sw_hamming_calculate(struct nand_device
*nand
,
363 const unsigned char *buf
, unsigned char *code
)
365 struct nand_ecc_sw_hamming_conf
*engine_conf
= nand
->ecc
.ctx
.priv
;
366 unsigned int step_size
= nand
->ecc
.ctx
.conf
.step_size
;
367 bool sm_order
= engine_conf
? engine_conf
->sm_order
: false;
369 return ecc_sw_hamming_calculate(buf
, step_size
, code
, sm_order
);
371 EXPORT_SYMBOL(nand_ecc_sw_hamming_calculate
);
373 int ecc_sw_hamming_correct(unsigned char *buf
, unsigned char *read_ecc
,
374 unsigned char *calc_ecc
, unsigned int step_size
,
377 const u32 eccsize_mult
= step_size
>> 8;
378 unsigned char b0
, b1
, b2
, bit_addr
;
379 unsigned int byte_addr
;
382 * b0 to b2 indicate which bit is faulty (if any)
383 * we might need the xor result more than once,
384 * so keep them in a local var
387 b0
= read_ecc
[0] ^ calc_ecc
[0];
388 b1
= read_ecc
[1] ^ calc_ecc
[1];
390 b0
= read_ecc
[1] ^ calc_ecc
[1];
391 b1
= read_ecc
[0] ^ calc_ecc
[0];
394 b2
= read_ecc
[2] ^ calc_ecc
[2];
396 /* check if there are any bitfaults */
398 /* repeated if statements are slightly more efficient than switch ... */
399 /* ordered in order of likelihood */
401 if ((b0
| b1
| b2
) == 0)
402 return 0; /* no error */
404 if ((((b0
^ (b0
>> 1)) & 0x55) == 0x55) &&
405 (((b1
^ (b1
>> 1)) & 0x55) == 0x55) &&
406 ((eccsize_mult
== 1 && ((b2
^ (b2
>> 1)) & 0x54) == 0x54) ||
407 (eccsize_mult
== 2 && ((b2
^ (b2
>> 1)) & 0x55) == 0x55))) {
408 /* single bit error */
410 * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
411 * byte, cp 5/3/1 indicate the faulty bit.
412 * A lookup table (called addressbits) is used to filter
413 * the bits from the byte they are in.
414 * A marginal optimisation is possible by having three
415 * different lookup tables.
416 * One as we have now (for b0), one for b2
417 * (that would avoid the >> 1), and one for b1 (with all values
418 * << 4). However it was felt that introducing two more tables
419 * hardly justify the gain.
421 * The b2 shift is there to get rid of the lowest two bits.
422 * We could also do addressbits[b2] >> 1 but for the
423 * performance it does not make any difference
425 if (eccsize_mult
== 1)
426 byte_addr
= (addressbits
[b1
] << 4) + addressbits
[b0
];
428 byte_addr
= (addressbits
[b2
& 0x3] << 8) +
429 (addressbits
[b1
] << 4) + addressbits
[b0
];
430 bit_addr
= addressbits
[b2
>> 2];
432 buf
[byte_addr
] ^= (1 << bit_addr
);
436 /* count nr of bits; use table lookup, faster than calculating it */
437 if ((bitsperbyte
[b0
] + bitsperbyte
[b1
] + bitsperbyte
[b2
]) == 1)
438 return 1; /* error in ECC data; no action needed */
440 pr_err("%s: uncorrectable ECC error\n", __func__
);
443 EXPORT_SYMBOL(ecc_sw_hamming_correct
);
446 * nand_ecc_sw_hamming_correct - Detect and correct bit error(s)
448 * @buf: Raw data read from the chip
449 * @read_ecc: ECC bytes read from the chip
450 * @calc_ecc: ECC calculated from the raw data
452 * Detect and correct up to 1 bit error per 256/512-byte block.
454 int nand_ecc_sw_hamming_correct(struct nand_device
*nand
, unsigned char *buf
,
455 unsigned char *read_ecc
,
456 unsigned char *calc_ecc
)
458 struct nand_ecc_sw_hamming_conf
*engine_conf
= nand
->ecc
.ctx
.priv
;
459 unsigned int step_size
= nand
->ecc
.ctx
.conf
.step_size
;
460 bool sm_order
= engine_conf
? engine_conf
->sm_order
: false;
462 return ecc_sw_hamming_correct(buf
, read_ecc
, calc_ecc
, step_size
,
465 EXPORT_SYMBOL(nand_ecc_sw_hamming_correct
);
467 int nand_ecc_sw_hamming_init_ctx(struct nand_device
*nand
)
469 struct nand_ecc_props
*conf
= &nand
->ecc
.ctx
.conf
;
470 struct nand_ecc_sw_hamming_conf
*engine_conf
;
471 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
474 if (!mtd
->ooblayout
) {
475 switch (mtd
->oobsize
) {
478 mtd_set_ooblayout(mtd
, nand_get_small_page_ooblayout());
482 mtd_set_ooblayout(mtd
,
483 nand_get_large_page_hamming_ooblayout());
490 conf
->engine_type
= NAND_ECC_ENGINE_TYPE_SOFT
;
491 conf
->algo
= NAND_ECC_ALGO_HAMMING
;
492 conf
->step_size
= nand
->ecc
.user_conf
.step_size
;
495 /* Use the strongest configuration by default */
496 if (conf
->step_size
!= 256 && conf
->step_size
!= 512)
497 conf
->step_size
= 256;
499 engine_conf
= kzalloc(sizeof(*engine_conf
), GFP_KERNEL
);
503 ret
= nand_ecc_init_req_tweaking(&engine_conf
->req_ctx
, nand
);
505 goto free_engine_conf
;
507 engine_conf
->code_size
= 3;
508 engine_conf
->calc_buf
= kzalloc(mtd
->oobsize
, GFP_KERNEL
);
509 engine_conf
->code_buf
= kzalloc(mtd
->oobsize
, GFP_KERNEL
);
510 if (!engine_conf
->calc_buf
|| !engine_conf
->code_buf
) {
515 nand
->ecc
.ctx
.priv
= engine_conf
;
516 nand
->ecc
.ctx
.nsteps
= mtd
->writesize
/ conf
->step_size
;
517 nand
->ecc
.ctx
.total
= nand
->ecc
.ctx
.nsteps
* engine_conf
->code_size
;
522 nand_ecc_cleanup_req_tweaking(&engine_conf
->req_ctx
);
523 kfree(engine_conf
->calc_buf
);
524 kfree(engine_conf
->code_buf
);
530 EXPORT_SYMBOL(nand_ecc_sw_hamming_init_ctx
);
532 void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device
*nand
)
534 struct nand_ecc_sw_hamming_conf
*engine_conf
= nand
->ecc
.ctx
.priv
;
537 nand_ecc_cleanup_req_tweaking(&engine_conf
->req_ctx
);
538 kfree(engine_conf
->calc_buf
);
539 kfree(engine_conf
->code_buf
);
543 EXPORT_SYMBOL(nand_ecc_sw_hamming_cleanup_ctx
);
545 static int nand_ecc_sw_hamming_prepare_io_req(struct nand_device
*nand
,
546 struct nand_page_io_req
*req
)
548 struct nand_ecc_sw_hamming_conf
*engine_conf
= nand
->ecc
.ctx
.priv
;
549 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
550 int eccsize
= nand
->ecc
.ctx
.conf
.step_size
;
551 int eccbytes
= engine_conf
->code_size
;
552 int eccsteps
= nand
->ecc
.ctx
.nsteps
;
553 int total
= nand
->ecc
.ctx
.total
;
554 u8
*ecccalc
= engine_conf
->calc_buf
;
558 /* Nothing to do for a raw operation */
559 if (req
->mode
== MTD_OPS_RAW
)
562 /* This engine does not provide BBM/free OOB bytes protection */
566 nand_ecc_tweak_req(&engine_conf
->req_ctx
, req
);
568 /* No more preparation for page read */
569 if (req
->type
== NAND_PAGE_READ
)
572 /* Preparation for page write: derive the ECC bytes and place them */
573 for (i
= 0, data
= req
->databuf
.out
;
575 eccsteps
--, i
+= eccbytes
, data
+= eccsize
)
576 nand_ecc_sw_hamming_calculate(nand
, data
, &ecccalc
[i
]);
578 return mtd_ooblayout_set_eccbytes(mtd
, ecccalc
, (void *)req
->oobbuf
.out
,
582 static int nand_ecc_sw_hamming_finish_io_req(struct nand_device
*nand
,
583 struct nand_page_io_req
*req
)
585 struct nand_ecc_sw_hamming_conf
*engine_conf
= nand
->ecc
.ctx
.priv
;
586 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
587 int eccsize
= nand
->ecc
.ctx
.conf
.step_size
;
588 int total
= nand
->ecc
.ctx
.total
;
589 int eccbytes
= engine_conf
->code_size
;
590 int eccsteps
= nand
->ecc
.ctx
.nsteps
;
591 u8
*ecccalc
= engine_conf
->calc_buf
;
592 u8
*ecccode
= engine_conf
->code_buf
;
593 unsigned int max_bitflips
= 0;
594 u8
*data
= req
->databuf
.in
;
597 /* Nothing to do for a raw operation */
598 if (req
->mode
== MTD_OPS_RAW
)
601 /* This engine does not provide BBM/free OOB bytes protection */
605 /* No more preparation for page write */
606 if (req
->type
== NAND_PAGE_WRITE
) {
607 nand_ecc_restore_req(&engine_conf
->req_ctx
, req
);
611 /* Finish a page read: retrieve the (raw) ECC bytes*/
612 ret
= mtd_ooblayout_get_eccbytes(mtd
, ecccode
, req
->oobbuf
.in
, 0,
617 /* Calculate the ECC bytes */
618 for (i
= 0; eccsteps
; eccsteps
--, i
+= eccbytes
, data
+= eccsize
)
619 nand_ecc_sw_hamming_calculate(nand
, data
, &ecccalc
[i
]);
621 /* Finish a page read: compare and correct */
622 for (eccsteps
= nand
->ecc
.ctx
.nsteps
, i
= 0, data
= req
->databuf
.in
;
624 eccsteps
--, i
+= eccbytes
, data
+= eccsize
) {
625 int stat
= nand_ecc_sw_hamming_correct(nand
, data
,
629 mtd
->ecc_stats
.failed
++;
631 mtd
->ecc_stats
.corrected
+= stat
;
632 max_bitflips
= max_t(unsigned int, max_bitflips
, stat
);
636 nand_ecc_restore_req(&engine_conf
->req_ctx
, req
);
641 static const struct nand_ecc_engine_ops nand_ecc_sw_hamming_engine_ops
= {
642 .init_ctx
= nand_ecc_sw_hamming_init_ctx
,
643 .cleanup_ctx
= nand_ecc_sw_hamming_cleanup_ctx
,
644 .prepare_io_req
= nand_ecc_sw_hamming_prepare_io_req
,
645 .finish_io_req
= nand_ecc_sw_hamming_finish_io_req
,
648 static struct nand_ecc_engine nand_ecc_sw_hamming_engine
= {
649 .ops
= &nand_ecc_sw_hamming_engine_ops
,
652 struct nand_ecc_engine
*nand_ecc_sw_hamming_get_engine(void)
654 return &nand_ecc_sw_hamming_engine
;
656 EXPORT_SYMBOL(nand_ecc_sw_hamming_get_engine
);
658 MODULE_LICENSE("GPL");
659 MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
660 MODULE_DESCRIPTION("NAND software Hamming ECC support");