2 * This file contains an ECC algorithm that detects and corrects 1 bit
3 * errors in a 256 byte block of data.
5 * drivers/mtd/nand/nand_ecc.c
7 * Copyright © 2008 Koninklijke Philips Electronics NV.
8 * Author: Frans Meulenbroeks
10 * Completely replaces the previous ECC implementation which was written by:
11 * Steven J. Hill (sjhill@realitydiluted.com)
12 * Thomas Gleixner (tglx@linutronix.de)
14 * Information on how this algorithm works and how it was developed
15 * can be found in Documentation/mtd/nand_ecc.txt
17 * This file is free software; you can redistribute it and/or modify it
18 * under the terms of the GNU General Public License as published by the
19 * Free Software Foundation; either version 2 or (at your option) any
22 * This file is distributed in the hope that it will be useful, but WITHOUT
23 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 * You should have received a copy of the GNU General Public License along
28 * with this file; if not, write to the Free Software Foundation, Inc.,
29 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
34 * The STANDALONE macro is useful when running the code outside the kernel
35 * e.g. when running the code in a testbed or a benchmark program.
36 * When STANDALONE is used, the module related macros are commented out
37 * as well as the linux include files.
38 * Instead a private definition of mtd_info is given to satisfy the compiler
39 * (the code does not use mtd_info, so the code does not care)
42 #include <linux/types.h>
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/mtd/mtd.h>
46 #include <linux/mtd/nand.h>
47 #include <linux/mtd/nand_ecc.h>
48 #include <asm/byteorder.h>
52 #define EXPORT_SYMBOL(x) /* x */
54 #define MODULE_LICENSE(x) /* x */
55 #define MODULE_AUTHOR(x) /* x */
56 #define MODULE_DESCRIPTION(x) /* x */
63 * invparity is a 256 byte table that contains the odd parity
64 * for each byte. So if the number of bits in a byte is even,
65 * the array element is 1, and when the number of bits is odd
66 * the array eleemnt is 0.
68 static const char invparity
[256] = {
69 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
70 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
71 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
72 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
73 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
74 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
75 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
76 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
77 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
78 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
79 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
80 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
81 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
82 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
83 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
84 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
88 * bitsperbyte contains the number of bits per byte
89 * this is only used for testing and repairing parity
90 * (a precalculated value slightly improves performance)
92 static const char bitsperbyte
[256] = {
93 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
94 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
95 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
96 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
97 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
98 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
99 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
100 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
101 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
102 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
103 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
104 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
105 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
106 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
107 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
108 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
112 * addressbits is a lookup table to filter out the bits from the xor-ed
113 * ecc data that identify the faulty location.
114 * this is only used for repairing parity
115 * see the comments in nand_correct_data for more details
117 static const char addressbits
[256] = {
118 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
119 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
120 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
121 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
122 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
123 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
124 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
125 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
126 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
127 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
128 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
129 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
130 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
131 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
132 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
133 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
134 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
135 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
136 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
137 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
138 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
139 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
140 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
141 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
142 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
143 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
144 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
145 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
146 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
147 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
148 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
149 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
153 * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
155 * @mtd: MTD block structure
156 * @buf: input buffer with raw data
157 * @code: output buffer with ECC
159 int nand_calculate_ecc(struct mtd_info
*mtd
, const unsigned char *buf
,
163 const uint32_t *bp
= (uint32_t *)buf
;
164 /* 256 or 512 bytes/ecc */
165 const uint32_t eccsize_mult
=
166 (((struct nand_chip
*)mtd
->priv
)->ecc
.size
) >> 8;
167 uint32_t cur
; /* current value in buffer */
168 /* rp0..rp15..rp17 are the various accumulated parities (per byte) */
169 uint32_t rp0
, rp1
, rp2
, rp3
, rp4
, rp5
, rp6
, rp7
;
170 uint32_t rp8
, rp9
, rp10
, rp11
, rp12
, rp13
, rp14
, rp15
, rp16
;
171 uint32_t uninitialized_var(rp17
); /* to make compiler happy */
172 uint32_t par
; /* the cumulative parity for all data */
173 uint32_t tmppar
; /* the cumulative parity for this iteration;
174 for rp12, rp14 and rp16 at the end of the
187 * The loop is unrolled a number of times;
188 * This avoids if statements to decide on which rp value to update
189 * Also we process the data by longwords.
190 * Note: passing unaligned data might give a performance penalty.
191 * It is assumed that the buffers are aligned.
192 * tmppar is the cumulative sum of this iteration.
193 * needed for calculating rp12, rp14, rp16 and par
194 * also used as a performance improvement for rp6, rp8 and rp10
196 for (i
= 0; i
< eccsize_mult
<< 2; i
++) {
259 if (eccsize_mult
== 2 && (i
& 0x4) == 0)
264 * handle the fact that we use longword operations
265 * we'll bring rp4..rp14..rp16 back to single byte entities by
266 * shifting and xoring first fold the upper and lower 16 bits,
267 * then the upper and lower 8 bits.
278 rp10
^= (rp10
>> 16);
281 rp12
^= (rp12
>> 16);
284 rp14
^= (rp14
>> 16);
287 if (eccsize_mult
== 2) {
288 rp16
^= (rp16
>> 16);
294 * we also need to calculate the row parity for rp0..rp3
295 * This is present in par, because par is now
296 * rp3 rp3 rp2 rp2 in little endian and
297 * rp2 rp2 rp3 rp3 in big endian
299 * rp1 rp0 rp1 rp0 in little endian and
300 * rp0 rp1 rp0 rp1 in big endian
301 * First calculate rp2 and rp3
319 /* reduce par to 16 bits then calculate rp1 and rp0 */
322 rp0
= (par
>> 8) & 0xff;
325 rp1
= (par
>> 8) & 0xff;
329 /* finally reduce par to 8 bits */
334 * and calculate rp5..rp15..rp17
335 * note that par = rp4 ^ rp5 and due to the commutative property
336 * of the ^ operator we can say:
338 * The & 0xff seems superfluous, but benchmarking learned that
339 * leaving it out gives slightly worse results. No idea why, probably
340 * it has to do with the way the pipeline in pentium is organized.
342 rp5
= (par
^ rp4
) & 0xff;
343 rp7
= (par
^ rp6
) & 0xff;
344 rp9
= (par
^ rp8
) & 0xff;
345 rp11
= (par
^ rp10
) & 0xff;
346 rp13
= (par
^ rp12
) & 0xff;
347 rp15
= (par
^ rp14
) & 0xff;
348 if (eccsize_mult
== 2)
349 rp17
= (par
^ rp16
) & 0xff;
352 * Finally calculate the ecc bits.
353 * Again here it might seem that there are performance optimisations
354 * possible, but benchmarks showed that on the system this is developed
355 * the code below is the fastest
357 #ifdef CONFIG_MTD_NAND_ECC_SMC
359 (invparity
[rp7
] << 7) |
360 (invparity
[rp6
] << 6) |
361 (invparity
[rp5
] << 5) |
362 (invparity
[rp4
] << 4) |
363 (invparity
[rp3
] << 3) |
364 (invparity
[rp2
] << 2) |
365 (invparity
[rp1
] << 1) |
368 (invparity
[rp15
] << 7) |
369 (invparity
[rp14
] << 6) |
370 (invparity
[rp13
] << 5) |
371 (invparity
[rp12
] << 4) |
372 (invparity
[rp11
] << 3) |
373 (invparity
[rp10
] << 2) |
374 (invparity
[rp9
] << 1) |
378 (invparity
[rp7
] << 7) |
379 (invparity
[rp6
] << 6) |
380 (invparity
[rp5
] << 5) |
381 (invparity
[rp4
] << 4) |
382 (invparity
[rp3
] << 3) |
383 (invparity
[rp2
] << 2) |
384 (invparity
[rp1
] << 1) |
387 (invparity
[rp15
] << 7) |
388 (invparity
[rp14
] << 6) |
389 (invparity
[rp13
] << 5) |
390 (invparity
[rp12
] << 4) |
391 (invparity
[rp11
] << 3) |
392 (invparity
[rp10
] << 2) |
393 (invparity
[rp9
] << 1) |
396 if (eccsize_mult
== 1)
398 (invparity
[par
& 0xf0] << 7) |
399 (invparity
[par
& 0x0f] << 6) |
400 (invparity
[par
& 0xcc] << 5) |
401 (invparity
[par
& 0x33] << 4) |
402 (invparity
[par
& 0xaa] << 3) |
403 (invparity
[par
& 0x55] << 2) |
407 (invparity
[par
& 0xf0] << 7) |
408 (invparity
[par
& 0x0f] << 6) |
409 (invparity
[par
& 0xcc] << 5) |
410 (invparity
[par
& 0x33] << 4) |
411 (invparity
[par
& 0xaa] << 3) |
412 (invparity
[par
& 0x55] << 2) |
413 (invparity
[rp17
] << 1) |
414 (invparity
[rp16
] << 0);
417 EXPORT_SYMBOL(nand_calculate_ecc
);
420 * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
421 * @mtd: MTD block structure
422 * @buf: raw data read from the chip
423 * @read_ecc: ECC from the chip
424 * @calc_ecc: the ECC calculated from raw data
426 * Detect and correct a 1 bit error for 256/512 byte block
428 int nand_correct_data(struct mtd_info
*mtd
, unsigned char *buf
,
429 unsigned char *read_ecc
, unsigned char *calc_ecc
)
431 unsigned char b0
, b1
, b2
, bit_addr
;
432 unsigned int byte_addr
;
433 /* 256 or 512 bytes/ecc */
434 const uint32_t eccsize_mult
=
435 (((struct nand_chip
*)mtd
->priv
)->ecc
.size
) >> 8;
438 * b0 to b2 indicate which bit is faulty (if any)
439 * we might need the xor result more than once,
440 * so keep them in a local var
442 #ifdef CONFIG_MTD_NAND_ECC_SMC
443 b0
= read_ecc
[0] ^ calc_ecc
[0];
444 b1
= read_ecc
[1] ^ calc_ecc
[1];
446 b0
= read_ecc
[1] ^ calc_ecc
[1];
447 b1
= read_ecc
[0] ^ calc_ecc
[0];
449 b2
= read_ecc
[2] ^ calc_ecc
[2];
451 /* check if there are any bitfaults */
453 /* repeated if statements are slightly more efficient than switch ... */
454 /* ordered in order of likelihood */
456 if ((b0
| b1
| b2
) == 0)
457 return 0; /* no error */
459 if ((((b0
^ (b0
>> 1)) & 0x55) == 0x55) &&
460 (((b1
^ (b1
>> 1)) & 0x55) == 0x55) &&
461 ((eccsize_mult
== 1 && ((b2
^ (b2
>> 1)) & 0x54) == 0x54) ||
462 (eccsize_mult
== 2 && ((b2
^ (b2
>> 1)) & 0x55) == 0x55))) {
463 /* single bit error */
465 * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
466 * byte, cp 5/3/1 indicate the faulty bit.
467 * A lookup table (called addressbits) is used to filter
468 * the bits from the byte they are in.
469 * A marginal optimisation is possible by having three
470 * different lookup tables.
471 * One as we have now (for b0), one for b2
472 * (that would avoid the >> 1), and one for b1 (with all values
473 * << 4). However it was felt that introducing two more tables
474 * hardly justify the gain.
476 * The b2 shift is there to get rid of the lowest two bits.
477 * We could also do addressbits[b2] >> 1 but for the
478 * performace it does not make any difference
480 if (eccsize_mult
== 1)
481 byte_addr
= (addressbits
[b1
] << 4) + addressbits
[b0
];
483 byte_addr
= (addressbits
[b2
& 0x3] << 8) +
484 (addressbits
[b1
] << 4) + addressbits
[b0
];
485 bit_addr
= addressbits
[b2
>> 2];
487 buf
[byte_addr
] ^= (1 << bit_addr
);
491 /* count nr of bits; use table lookup, faster than calculating it */
492 if ((bitsperbyte
[b0
] + bitsperbyte
[b1
] + bitsperbyte
[b2
]) == 1)
493 return 1; /* error in ecc data; no action needed */
495 printk(KERN_ERR
"uncorrectable error : ");
498 EXPORT_SYMBOL(nand_correct_data
);
500 MODULE_LICENSE("GPL");
501 MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
502 MODULE_DESCRIPTION("Generic NAND ECC support");