Linux 4.19.133
[linux/fpc-iii.git] / drivers / mtd / nand / raw / nand_ecc.c
blob8e132edbc5ce96c7f1e1e90658db450a830145e8
1 /*
2 * This file contains an ECC algorithm that detects and corrects 1 bit
3 * errors in a 256 byte block of data.
5 * Copyright © 2008 Koninklijke Philips Electronics NV.
6 * Author: Frans Meulenbroeks
8 * Completely replaces the previous ECC implementation which was written by:
9 * Steven J. Hill (sjhill@realitydiluted.com)
10 * Thomas Gleixner (tglx@linutronix.de)
12 * Information on how this algorithm works and how it was developed
13 * can be found in Documentation/mtd/nand_ecc.txt
15 * This file is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 or (at your option) any
18 * later version.
20 * This file is distributed in the hope that it will be useful, but WITHOUT
21 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
23 * for more details.
25 * You should have received a copy of the GNU General Public License along
26 * with this file; if not, write to the Free Software Foundation, Inc.,
27 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
31 #include <linux/types.h>
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/mtd/mtd.h>
35 #include <linux/mtd/rawnand.h>
36 #include <linux/mtd/nand_ecc.h>
37 #include <asm/byteorder.h>
40 * invparity is a 256 byte table that contains the odd parity
41 * for each byte. So if the number of bits in a byte is even,
42 * the array element is 1, and when the number of bits is odd
43 * the array eleemnt is 0.
45 static const char invparity[256] = {
46 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
47 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
48 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
49 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
50 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
51 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
52 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
53 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
54 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
55 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
56 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
57 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
58 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
59 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
60 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
61 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
65 * bitsperbyte contains the number of bits per byte
66 * this is only used for testing and repairing parity
67 * (a precalculated value slightly improves performance)
69 static const char bitsperbyte[256] = {
70 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
71 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
72 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
73 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
74 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
75 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
76 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
77 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
78 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
79 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
80 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
81 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
82 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
83 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
84 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
85 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
89 * addressbits is a lookup table to filter out the bits from the xor-ed
90 * ECC data that identify the faulty location.
91 * this is only used for repairing parity
92 * see the comments in nand_correct_data for more details
94 static const char addressbits[256] = {
95 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
96 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
97 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
98 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
99 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
100 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
101 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
102 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
103 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
104 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
105 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
106 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
107 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
108 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
109 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
110 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
111 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
112 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
113 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
114 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
115 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
116 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
117 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
118 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
119 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
120 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
121 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
122 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
123 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
124 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
125 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
126 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
130 * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
131 * block
132 * @buf: input buffer with raw data
133 * @eccsize: data bytes per ECC step (256 or 512)
134 * @code: output buffer with ECC
136 void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
137 unsigned char *code)
139 int i;
140 const uint32_t *bp = (uint32_t *)buf;
141 /* 256 or 512 bytes/ecc */
142 const uint32_t eccsize_mult = eccsize >> 8;
143 uint32_t cur; /* current value in buffer */
144 /* rp0..rp15..rp17 are the various accumulated parities (per byte) */
145 uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
146 uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
147 uint32_t uninitialized_var(rp17); /* to make compiler happy */
148 uint32_t par; /* the cumulative parity for all data */
149 uint32_t tmppar; /* the cumulative parity for this iteration;
150 for rp12, rp14 and rp16 at the end of the
151 loop */
153 par = 0;
154 rp4 = 0;
155 rp6 = 0;
156 rp8 = 0;
157 rp10 = 0;
158 rp12 = 0;
159 rp14 = 0;
160 rp16 = 0;
163 * The loop is unrolled a number of times;
164 * This avoids if statements to decide on which rp value to update
165 * Also we process the data by longwords.
166 * Note: passing unaligned data might give a performance penalty.
167 * It is assumed that the buffers are aligned.
168 * tmppar is the cumulative sum of this iteration.
169 * needed for calculating rp12, rp14, rp16 and par
170 * also used as a performance improvement for rp6, rp8 and rp10
172 for (i = 0; i < eccsize_mult << 2; i++) {
173 cur = *bp++;
174 tmppar = cur;
175 rp4 ^= cur;
176 cur = *bp++;
177 tmppar ^= cur;
178 rp6 ^= tmppar;
179 cur = *bp++;
180 tmppar ^= cur;
181 rp4 ^= cur;
182 cur = *bp++;
183 tmppar ^= cur;
184 rp8 ^= tmppar;
186 cur = *bp++;
187 tmppar ^= cur;
188 rp4 ^= cur;
189 rp6 ^= cur;
190 cur = *bp++;
191 tmppar ^= cur;
192 rp6 ^= cur;
193 cur = *bp++;
194 tmppar ^= cur;
195 rp4 ^= cur;
196 cur = *bp++;
197 tmppar ^= cur;
198 rp10 ^= tmppar;
200 cur = *bp++;
201 tmppar ^= cur;
202 rp4 ^= cur;
203 rp6 ^= cur;
204 rp8 ^= cur;
205 cur = *bp++;
206 tmppar ^= cur;
207 rp6 ^= cur;
208 rp8 ^= cur;
209 cur = *bp++;
210 tmppar ^= cur;
211 rp4 ^= cur;
212 rp8 ^= cur;
213 cur = *bp++;
214 tmppar ^= cur;
215 rp8 ^= cur;
217 cur = *bp++;
218 tmppar ^= cur;
219 rp4 ^= cur;
220 rp6 ^= cur;
221 cur = *bp++;
222 tmppar ^= cur;
223 rp6 ^= cur;
224 cur = *bp++;
225 tmppar ^= cur;
226 rp4 ^= cur;
227 cur = *bp++;
228 tmppar ^= cur;
230 par ^= tmppar;
231 if ((i & 0x1) == 0)
232 rp12 ^= tmppar;
233 if ((i & 0x2) == 0)
234 rp14 ^= tmppar;
235 if (eccsize_mult == 2 && (i & 0x4) == 0)
236 rp16 ^= tmppar;
240 * handle the fact that we use longword operations
241 * we'll bring rp4..rp14..rp16 back to single byte entities by
242 * shifting and xoring first fold the upper and lower 16 bits,
243 * then the upper and lower 8 bits.
245 rp4 ^= (rp4 >> 16);
246 rp4 ^= (rp4 >> 8);
247 rp4 &= 0xff;
248 rp6 ^= (rp6 >> 16);
249 rp6 ^= (rp6 >> 8);
250 rp6 &= 0xff;
251 rp8 ^= (rp8 >> 16);
252 rp8 ^= (rp8 >> 8);
253 rp8 &= 0xff;
254 rp10 ^= (rp10 >> 16);
255 rp10 ^= (rp10 >> 8);
256 rp10 &= 0xff;
257 rp12 ^= (rp12 >> 16);
258 rp12 ^= (rp12 >> 8);
259 rp12 &= 0xff;
260 rp14 ^= (rp14 >> 16);
261 rp14 ^= (rp14 >> 8);
262 rp14 &= 0xff;
263 if (eccsize_mult == 2) {
264 rp16 ^= (rp16 >> 16);
265 rp16 ^= (rp16 >> 8);
266 rp16 &= 0xff;
270 * we also need to calculate the row parity for rp0..rp3
271 * This is present in par, because par is now
272 * rp3 rp3 rp2 rp2 in little endian and
273 * rp2 rp2 rp3 rp3 in big endian
274 * as well as
275 * rp1 rp0 rp1 rp0 in little endian and
276 * rp0 rp1 rp0 rp1 in big endian
277 * First calculate rp2 and rp3
279 #ifdef __BIG_ENDIAN
280 rp2 = (par >> 16);
281 rp2 ^= (rp2 >> 8);
282 rp2 &= 0xff;
283 rp3 = par & 0xffff;
284 rp3 ^= (rp3 >> 8);
285 rp3 &= 0xff;
286 #else
287 rp3 = (par >> 16);
288 rp3 ^= (rp3 >> 8);
289 rp3 &= 0xff;
290 rp2 = par & 0xffff;
291 rp2 ^= (rp2 >> 8);
292 rp2 &= 0xff;
293 #endif
295 /* reduce par to 16 bits then calculate rp1 and rp0 */
296 par ^= (par >> 16);
297 #ifdef __BIG_ENDIAN
298 rp0 = (par >> 8) & 0xff;
299 rp1 = (par & 0xff);
300 #else
301 rp1 = (par >> 8) & 0xff;
302 rp0 = (par & 0xff);
303 #endif
305 /* finally reduce par to 8 bits */
306 par ^= (par >> 8);
307 par &= 0xff;
310 * and calculate rp5..rp15..rp17
311 * note that par = rp4 ^ rp5 and due to the commutative property
312 * of the ^ operator we can say:
313 * rp5 = (par ^ rp4);
314 * The & 0xff seems superfluous, but benchmarking learned that
315 * leaving it out gives slightly worse results. No idea why, probably
316 * it has to do with the way the pipeline in pentium is organized.
318 rp5 = (par ^ rp4) & 0xff;
319 rp7 = (par ^ rp6) & 0xff;
320 rp9 = (par ^ rp8) & 0xff;
321 rp11 = (par ^ rp10) & 0xff;
322 rp13 = (par ^ rp12) & 0xff;
323 rp15 = (par ^ rp14) & 0xff;
324 if (eccsize_mult == 2)
325 rp17 = (par ^ rp16) & 0xff;
328 * Finally calculate the ECC bits.
329 * Again here it might seem that there are performance optimisations
330 * possible, but benchmarks showed that on the system this is developed
331 * the code below is the fastest
333 #ifdef CONFIG_MTD_NAND_ECC_SMC
334 code[0] =
335 (invparity[rp7] << 7) |
336 (invparity[rp6] << 6) |
337 (invparity[rp5] << 5) |
338 (invparity[rp4] << 4) |
339 (invparity[rp3] << 3) |
340 (invparity[rp2] << 2) |
341 (invparity[rp1] << 1) |
342 (invparity[rp0]);
343 code[1] =
344 (invparity[rp15] << 7) |
345 (invparity[rp14] << 6) |
346 (invparity[rp13] << 5) |
347 (invparity[rp12] << 4) |
348 (invparity[rp11] << 3) |
349 (invparity[rp10] << 2) |
350 (invparity[rp9] << 1) |
351 (invparity[rp8]);
352 #else
353 code[1] =
354 (invparity[rp7] << 7) |
355 (invparity[rp6] << 6) |
356 (invparity[rp5] << 5) |
357 (invparity[rp4] << 4) |
358 (invparity[rp3] << 3) |
359 (invparity[rp2] << 2) |
360 (invparity[rp1] << 1) |
361 (invparity[rp0]);
362 code[0] =
363 (invparity[rp15] << 7) |
364 (invparity[rp14] << 6) |
365 (invparity[rp13] << 5) |
366 (invparity[rp12] << 4) |
367 (invparity[rp11] << 3) |
368 (invparity[rp10] << 2) |
369 (invparity[rp9] << 1) |
370 (invparity[rp8]);
371 #endif
372 if (eccsize_mult == 1)
373 code[2] =
374 (invparity[par & 0xf0] << 7) |
375 (invparity[par & 0x0f] << 6) |
376 (invparity[par & 0xcc] << 5) |
377 (invparity[par & 0x33] << 4) |
378 (invparity[par & 0xaa] << 3) |
379 (invparity[par & 0x55] << 2) |
381 else
382 code[2] =
383 (invparity[par & 0xf0] << 7) |
384 (invparity[par & 0x0f] << 6) |
385 (invparity[par & 0xcc] << 5) |
386 (invparity[par & 0x33] << 4) |
387 (invparity[par & 0xaa] << 3) |
388 (invparity[par & 0x55] << 2) |
389 (invparity[rp17] << 1) |
390 (invparity[rp16] << 0);
392 EXPORT_SYMBOL(__nand_calculate_ecc);
395 * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
396 * block
397 * @mtd: MTD block structure
398 * @buf: input buffer with raw data
399 * @code: output buffer with ECC
401 int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
402 unsigned char *code)
404 __nand_calculate_ecc(buf,
405 mtd_to_nand(mtd)->ecc.size, code);
407 return 0;
409 EXPORT_SYMBOL(nand_calculate_ecc);
412 * __nand_correct_data - [NAND Interface] Detect and correct bit error(s)
413 * @buf: raw data read from the chip
414 * @read_ecc: ECC from the chip
415 * @calc_ecc: the ECC calculated from raw data
416 * @eccsize: data bytes per ECC step (256 or 512)
418 * Detect and correct a 1 bit error for eccsize byte block
420 int __nand_correct_data(unsigned char *buf,
421 unsigned char *read_ecc, unsigned char *calc_ecc,
422 unsigned int eccsize)
424 unsigned char b0, b1, b2, bit_addr;
425 unsigned int byte_addr;
426 /* 256 or 512 bytes/ecc */
427 const uint32_t eccsize_mult = eccsize >> 8;
430 * b0 to b2 indicate which bit is faulty (if any)
431 * we might need the xor result more than once,
432 * so keep them in a local var
434 #ifdef CONFIG_MTD_NAND_ECC_SMC
435 b0 = read_ecc[0] ^ calc_ecc[0];
436 b1 = read_ecc[1] ^ calc_ecc[1];
437 #else
438 b0 = read_ecc[1] ^ calc_ecc[1];
439 b1 = read_ecc[0] ^ calc_ecc[0];
440 #endif
441 b2 = read_ecc[2] ^ calc_ecc[2];
443 /* check if there are any bitfaults */
445 /* repeated if statements are slightly more efficient than switch ... */
446 /* ordered in order of likelihood */
448 if ((b0 | b1 | b2) == 0)
449 return 0; /* no error */
451 if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
452 (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
453 ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) ||
454 (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) {
455 /* single bit error */
457 * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
458 * byte, cp 5/3/1 indicate the faulty bit.
459 * A lookup table (called addressbits) is used to filter
460 * the bits from the byte they are in.
461 * A marginal optimisation is possible by having three
462 * different lookup tables.
463 * One as we have now (for b0), one for b2
464 * (that would avoid the >> 1), and one for b1 (with all values
465 * << 4). However it was felt that introducing two more tables
466 * hardly justify the gain.
468 * The b2 shift is there to get rid of the lowest two bits.
469 * We could also do addressbits[b2] >> 1 but for the
470 * performance it does not make any difference
472 if (eccsize_mult == 1)
473 byte_addr = (addressbits[b1] << 4) + addressbits[b0];
474 else
475 byte_addr = (addressbits[b2 & 0x3] << 8) +
476 (addressbits[b1] << 4) + addressbits[b0];
477 bit_addr = addressbits[b2 >> 2];
478 /* flip the bit */
479 buf[byte_addr] ^= (1 << bit_addr);
480 return 1;
483 /* count nr of bits; use table lookup, faster than calculating it */
484 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
485 return 1; /* error in ECC data; no action needed */
487 pr_err("%s: uncorrectable ECC error\n", __func__);
488 return -EBADMSG;
490 EXPORT_SYMBOL(__nand_correct_data);
493 * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
494 * @mtd: MTD block structure
495 * @buf: raw data read from the chip
496 * @read_ecc: ECC from the chip
497 * @calc_ecc: the ECC calculated from raw data
499 * Detect and correct a 1 bit error for 256/512 byte block
501 int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
502 unsigned char *read_ecc, unsigned char *calc_ecc)
504 return __nand_correct_data(buf, read_ecc, calc_ecc,
505 mtd_to_nand(mtd)->ecc.size);
507 EXPORT_SYMBOL(nand_correct_data);
509 MODULE_LICENSE("GPL");
510 MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
511 MODULE_DESCRIPTION("Generic NAND ECC support");