Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / mtd / nand / nand_base.c
blobe70ca16a5118150bf1e3c3e3ffc33d97a08efe19
1 /*
2 * Overview:
3 * This is the generic MTD driver for NAND flash devices. It should be
4 * capable of working with almost all NAND chips currently available.
6 * Additional technical information is available on
7 * http://www.linux-mtd.infradead.org/doc/nand.html
9 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
12 * Credits:
13 * David Woodhouse for adding multichip support
15 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16 * rework for 2K page size chips
18 * TODO:
19 * Enable cached programming for 2k page size chips
20 * Check, if mtd->ecctype should be set to MTD_ECC_HW
21 * if we have HW ECC support.
22 * BBT table is not serialized, has to be fixed
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/mm.h>
39 #include <linux/nmi.h>
40 #include <linux/types.h>
41 #include <linux/mtd/mtd.h>
42 #include <linux/mtd/rawnand.h>
43 #include <linux/mtd/nand_ecc.h>
44 #include <linux/mtd/nand_bch.h>
45 #include <linux/interrupt.h>
46 #include <linux/bitops.h>
47 #include <linux/io.h>
48 #include <linux/mtd/partitions.h>
49 #include <linux/of.h>
51 static int nand_get_device(struct mtd_info *mtd, int new_state);
53 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
54 struct mtd_oob_ops *ops);
56 /* Define default oob placement schemes for large and small page devices */
57 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
58 struct mtd_oob_region *oobregion)
60 struct nand_chip *chip = mtd_to_nand(mtd);
61 struct nand_ecc_ctrl *ecc = &chip->ecc;
63 if (section > 1)
64 return -ERANGE;
66 if (!section) {
67 oobregion->offset = 0;
68 if (mtd->oobsize == 16)
69 oobregion->length = 4;
70 else
71 oobregion->length = 3;
72 } else {
73 if (mtd->oobsize == 8)
74 return -ERANGE;
76 oobregion->offset = 6;
77 oobregion->length = ecc->total - 4;
80 return 0;
83 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
84 struct mtd_oob_region *oobregion)
86 if (section > 1)
87 return -ERANGE;
89 if (mtd->oobsize == 16) {
90 if (section)
91 return -ERANGE;
93 oobregion->length = 8;
94 oobregion->offset = 8;
95 } else {
96 oobregion->length = 2;
97 if (!section)
98 oobregion->offset = 3;
99 else
100 oobregion->offset = 6;
103 return 0;
106 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
107 .ecc = nand_ooblayout_ecc_sp,
108 .free = nand_ooblayout_free_sp,
110 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
112 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
113 struct mtd_oob_region *oobregion)
115 struct nand_chip *chip = mtd_to_nand(mtd);
116 struct nand_ecc_ctrl *ecc = &chip->ecc;
118 if (section || !ecc->total)
119 return -ERANGE;
121 oobregion->length = ecc->total;
122 oobregion->offset = mtd->oobsize - oobregion->length;
124 return 0;
127 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
128 struct mtd_oob_region *oobregion)
130 struct nand_chip *chip = mtd_to_nand(mtd);
131 struct nand_ecc_ctrl *ecc = &chip->ecc;
133 if (section)
134 return -ERANGE;
136 oobregion->length = mtd->oobsize - ecc->total - 2;
137 oobregion->offset = 2;
139 return 0;
142 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
143 .ecc = nand_ooblayout_ecc_lp,
144 .free = nand_ooblayout_free_lp,
146 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
149 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
150 * are placed at a fixed offset.
152 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
153 struct mtd_oob_region *oobregion)
155 struct nand_chip *chip = mtd_to_nand(mtd);
156 struct nand_ecc_ctrl *ecc = &chip->ecc;
158 if (section)
159 return -ERANGE;
161 switch (mtd->oobsize) {
162 case 64:
163 oobregion->offset = 40;
164 break;
165 case 128:
166 oobregion->offset = 80;
167 break;
168 default:
169 return -EINVAL;
172 oobregion->length = ecc->total;
173 if (oobregion->offset + oobregion->length > mtd->oobsize)
174 return -ERANGE;
176 return 0;
179 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
180 struct mtd_oob_region *oobregion)
182 struct nand_chip *chip = mtd_to_nand(mtd);
183 struct nand_ecc_ctrl *ecc = &chip->ecc;
184 int ecc_offset = 0;
186 if (section < 0 || section > 1)
187 return -ERANGE;
189 switch (mtd->oobsize) {
190 case 64:
191 ecc_offset = 40;
192 break;
193 case 128:
194 ecc_offset = 80;
195 break;
196 default:
197 return -EINVAL;
200 if (section == 0) {
201 oobregion->offset = 2;
202 oobregion->length = ecc_offset - 2;
203 } else {
204 oobregion->offset = ecc_offset + ecc->total;
205 oobregion->length = mtd->oobsize - oobregion->offset;
208 return 0;
211 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
212 .ecc = nand_ooblayout_ecc_lp_hamming,
213 .free = nand_ooblayout_free_lp_hamming,
216 static int check_offs_len(struct mtd_info *mtd,
217 loff_t ofs, uint64_t len)
219 struct nand_chip *chip = mtd_to_nand(mtd);
220 int ret = 0;
222 /* Start address must align on block boundary */
223 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
224 pr_debug("%s: unaligned address\n", __func__);
225 ret = -EINVAL;
228 /* Length must align on block boundary */
229 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
230 pr_debug("%s: length not block aligned\n", __func__);
231 ret = -EINVAL;
234 return ret;
238 * nand_release_device - [GENERIC] release chip
239 * @mtd: MTD device structure
241 * Release chip lock and wake up anyone waiting on the device.
243 static void nand_release_device(struct mtd_info *mtd)
245 struct nand_chip *chip = mtd_to_nand(mtd);
247 /* Release the controller and the chip */
248 spin_lock(&chip->controller->lock);
249 chip->controller->active = NULL;
250 chip->state = FL_READY;
251 wake_up(&chip->controller->wq);
252 spin_unlock(&chip->controller->lock);
256 * nand_read_byte - [DEFAULT] read one byte from the chip
257 * @mtd: MTD device structure
259 * Default read function for 8bit buswidth
261 static uint8_t nand_read_byte(struct mtd_info *mtd)
263 struct nand_chip *chip = mtd_to_nand(mtd);
264 return readb(chip->IO_ADDR_R);
268 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
269 * @mtd: MTD device structure
271 * Default read function for 16bit buswidth with endianness conversion.
274 static uint8_t nand_read_byte16(struct mtd_info *mtd)
276 struct nand_chip *chip = mtd_to_nand(mtd);
277 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
281 * nand_read_word - [DEFAULT] read one word from the chip
282 * @mtd: MTD device structure
284 * Default read function for 16bit buswidth without endianness conversion.
286 static u16 nand_read_word(struct mtd_info *mtd)
288 struct nand_chip *chip = mtd_to_nand(mtd);
289 return readw(chip->IO_ADDR_R);
293 * nand_select_chip - [DEFAULT] control CE line
294 * @mtd: MTD device structure
295 * @chipnr: chipnumber to select, -1 for deselect
297 * Default select function for 1 chip devices.
299 static void nand_select_chip(struct mtd_info *mtd, int chipnr)
301 struct nand_chip *chip = mtd_to_nand(mtd);
303 switch (chipnr) {
304 case -1:
305 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
306 break;
307 case 0:
308 break;
310 default:
311 BUG();
316 * nand_write_byte - [DEFAULT] write single byte to chip
317 * @mtd: MTD device structure
318 * @byte: value to write
320 * Default function to write a byte to I/O[7:0]
322 static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
324 struct nand_chip *chip = mtd_to_nand(mtd);
326 chip->write_buf(mtd, &byte, 1);
330 * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
331 * @mtd: MTD device structure
332 * @byte: value to write
334 * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
336 static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
338 struct nand_chip *chip = mtd_to_nand(mtd);
339 uint16_t word = byte;
342 * It's not entirely clear what should happen to I/O[15:8] when writing
343 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
345 * When the host supports a 16-bit bus width, only data is
346 * transferred at the 16-bit width. All address and command line
347 * transfers shall use only the lower 8-bits of the data bus. During
348 * command transfers, the host may place any value on the upper
349 * 8-bits of the data bus. During address transfers, the host shall
350 * set the upper 8-bits of the data bus to 00h.
352 * One user of the write_byte callback is nand_onfi_set_features. The
353 * four parameters are specified to be written to I/O[7:0], but this is
354 * neither an address nor a command transfer. Let's assume a 0 on the
355 * upper I/O lines is OK.
357 chip->write_buf(mtd, (uint8_t *)&word, 2);
361 * nand_write_buf - [DEFAULT] write buffer to chip
362 * @mtd: MTD device structure
363 * @buf: data buffer
364 * @len: number of bytes to write
366 * Default write function for 8bit buswidth.
368 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
370 struct nand_chip *chip = mtd_to_nand(mtd);
372 iowrite8_rep(chip->IO_ADDR_W, buf, len);
376 * nand_read_buf - [DEFAULT] read chip data into buffer
377 * @mtd: MTD device structure
378 * @buf: buffer to store date
379 * @len: number of bytes to read
381 * Default read function for 8bit buswidth.
383 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
385 struct nand_chip *chip = mtd_to_nand(mtd);
387 ioread8_rep(chip->IO_ADDR_R, buf, len);
391 * nand_write_buf16 - [DEFAULT] write buffer to chip
392 * @mtd: MTD device structure
393 * @buf: data buffer
394 * @len: number of bytes to write
396 * Default write function for 16bit buswidth.
398 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
400 struct nand_chip *chip = mtd_to_nand(mtd);
401 u16 *p = (u16 *) buf;
403 iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
407 * nand_read_buf16 - [DEFAULT] read chip data into buffer
408 * @mtd: MTD device structure
409 * @buf: buffer to store date
410 * @len: number of bytes to read
412 * Default read function for 16bit buswidth.
414 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
416 struct nand_chip *chip = mtd_to_nand(mtd);
417 u16 *p = (u16 *) buf;
419 ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
423 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
424 * @mtd: MTD device structure
425 * @ofs: offset from device start
427 * Check, if the block is bad.
429 static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
431 int page, page_end, res;
432 struct nand_chip *chip = mtd_to_nand(mtd);
433 u8 bad;
435 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
436 ofs += mtd->erasesize - mtd->writesize;
438 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
439 page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
441 for (; page < page_end; page++) {
442 res = chip->ecc.read_oob(mtd, chip, page);
443 if (res)
444 return res;
446 bad = chip->oob_poi[chip->badblockpos];
448 if (likely(chip->badblockbits == 8))
449 res = bad != 0xFF;
450 else
451 res = hweight8(bad) < chip->badblockbits;
452 if (res)
453 return res;
456 return 0;
460 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
461 * @mtd: MTD device structure
462 * @ofs: offset from device start
464 * This is the default implementation, which can be overridden by a hardware
465 * specific driver. It provides the details for writing a bad block marker to a
466 * block.
468 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
470 struct nand_chip *chip = mtd_to_nand(mtd);
471 struct mtd_oob_ops ops;
472 uint8_t buf[2] = { 0, 0 };
473 int ret = 0, res, i = 0;
475 memset(&ops, 0, sizeof(ops));
476 ops.oobbuf = buf;
477 ops.ooboffs = chip->badblockpos;
478 if (chip->options & NAND_BUSWIDTH_16) {
479 ops.ooboffs &= ~0x01;
480 ops.len = ops.ooblen = 2;
481 } else {
482 ops.len = ops.ooblen = 1;
484 ops.mode = MTD_OPS_PLACE_OOB;
486 /* Write to first/last page(s) if necessary */
487 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
488 ofs += mtd->erasesize - mtd->writesize;
489 do {
490 res = nand_do_write_oob(mtd, ofs, &ops);
491 if (!ret)
492 ret = res;
494 i++;
495 ofs += mtd->writesize;
496 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
498 return ret;
502 * nand_block_markbad_lowlevel - mark a block bad
503 * @mtd: MTD device structure
504 * @ofs: offset from device start
506 * This function performs the generic NAND bad block marking steps (i.e., bad
507 * block table(s) and/or marker(s)). We only allow the hardware driver to
508 * specify how to write bad block markers to OOB (chip->block_markbad).
510 * We try operations in the following order:
512 * (1) erase the affected block, to allow OOB marker to be written cleanly
513 * (2) write bad block marker to OOB area of affected block (unless flag
514 * NAND_BBT_NO_OOB_BBM is present)
515 * (3) update the BBT
517 * Note that we retain the first error encountered in (2) or (3), finish the
518 * procedures, and dump the error in the end.
520 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
522 struct nand_chip *chip = mtd_to_nand(mtd);
523 int res, ret = 0;
525 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
526 struct erase_info einfo;
528 /* Attempt erase before marking OOB */
529 memset(&einfo, 0, sizeof(einfo));
530 einfo.mtd = mtd;
531 einfo.addr = ofs;
532 einfo.len = 1ULL << chip->phys_erase_shift;
533 nand_erase_nand(mtd, &einfo, 0);
535 /* Write bad block marker to OOB */
536 nand_get_device(mtd, FL_WRITING);
537 ret = chip->block_markbad(mtd, ofs);
538 nand_release_device(mtd);
541 /* Mark block bad in BBT */
542 if (chip->bbt) {
543 res = nand_markbad_bbt(mtd, ofs);
544 if (!ret)
545 ret = res;
548 if (!ret)
549 mtd->ecc_stats.badblocks++;
551 return ret;
555 * nand_check_wp - [GENERIC] check if the chip is write protected
556 * @mtd: MTD device structure
558 * Check, if the device is write protected. The function expects, that the
559 * device is already selected.
561 static int nand_check_wp(struct mtd_info *mtd)
563 struct nand_chip *chip = mtd_to_nand(mtd);
564 u8 status;
565 int ret;
567 /* Broken xD cards report WP despite being writable */
568 if (chip->options & NAND_BROKEN_XD)
569 return 0;
571 /* Check the WP bit */
572 ret = nand_status_op(chip, &status);
573 if (ret)
574 return ret;
576 return status & NAND_STATUS_WP ? 0 : 1;
580 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
581 * @mtd: MTD device structure
582 * @ofs: offset from device start
584 * Check if the block is marked as reserved.
586 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
588 struct nand_chip *chip = mtd_to_nand(mtd);
590 if (!chip->bbt)
591 return 0;
592 /* Return info from the table */
593 return nand_isreserved_bbt(mtd, ofs);
597 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
598 * @mtd: MTD device structure
599 * @ofs: offset from device start
600 * @allowbbt: 1, if its allowed to access the bbt area
602 * Check, if the block is bad. Either by reading the bad block table or
603 * calling of the scan function.
605 static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
607 struct nand_chip *chip = mtd_to_nand(mtd);
609 if (!chip->bbt)
610 return chip->block_bad(mtd, ofs);
612 /* Return info from the table */
613 return nand_isbad_bbt(mtd, ofs, allowbbt);
617 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
618 * @mtd: MTD device structure
619 * @timeo: Timeout
621 * Helper function for nand_wait_ready used when needing to wait in interrupt
622 * context.
624 static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
626 struct nand_chip *chip = mtd_to_nand(mtd);
627 int i;
629 /* Wait for the device to get ready */
630 for (i = 0; i < timeo; i++) {
631 if (chip->dev_ready(mtd))
632 break;
633 touch_softlockup_watchdog();
634 mdelay(1);
639 * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
640 * @mtd: MTD device structure
642 * Wait for the ready pin after a command, and warn if a timeout occurs.
644 void nand_wait_ready(struct mtd_info *mtd)
646 struct nand_chip *chip = mtd_to_nand(mtd);
647 unsigned long timeo = 400;
649 if (in_interrupt() || oops_in_progress)
650 return panic_nand_wait_ready(mtd, timeo);
652 /* Wait until command is processed or timeout occurs */
653 timeo = jiffies + msecs_to_jiffies(timeo);
654 do {
655 if (chip->dev_ready(mtd))
656 return;
657 cond_resched();
658 } while (time_before(jiffies, timeo));
660 if (!chip->dev_ready(mtd))
661 pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
663 EXPORT_SYMBOL_GPL(nand_wait_ready);
666 * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
667 * @mtd: MTD device structure
668 * @timeo: Timeout in ms
670 * Wait for status ready (i.e. command done) or timeout.
672 static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
674 register struct nand_chip *chip = mtd_to_nand(mtd);
675 int ret;
677 timeo = jiffies + msecs_to_jiffies(timeo);
678 do {
679 u8 status;
681 ret = nand_read_data_op(chip, &status, sizeof(status), true);
682 if (ret)
683 return;
685 if (status & NAND_STATUS_READY)
686 break;
687 touch_softlockup_watchdog();
688 } while (time_before(jiffies, timeo));
692 * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
693 * @chip: NAND chip structure
694 * @timeout_ms: Timeout in ms
696 * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
697 * If that does not happen whitin the specified timeout, -ETIMEDOUT is
698 * returned.
700 * This helper is intended to be used when the controller does not have access
701 * to the NAND R/B pin.
703 * Be aware that calling this helper from an ->exec_op() implementation means
704 * ->exec_op() must be re-entrant.
706 * Return 0 if the NAND chip is ready, a negative error otherwise.
708 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
710 u8 status = 0;
711 int ret;
713 if (!chip->exec_op)
714 return -ENOTSUPP;
716 ret = nand_status_op(chip, NULL);
717 if (ret)
718 return ret;
720 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
721 do {
722 ret = nand_read_data_op(chip, &status, sizeof(status), true);
723 if (ret)
724 break;
726 if (status & NAND_STATUS_READY)
727 break;
730 * Typical lowest execution time for a tR on most NANDs is 10us,
731 * use this as polling delay before doing something smarter (ie.
732 * deriving a delay from the timeout value, timeout_ms/ratio).
734 udelay(10);
735 } while (time_before(jiffies, timeout_ms));
738 * We have to exit READ_STATUS mode in order to read real data on the
739 * bus in case the WAITRDY instruction is preceding a DATA_IN
740 * instruction.
742 nand_exit_status_op(chip);
744 if (ret)
745 return ret;
747 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
749 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
752 * nand_command - [DEFAULT] Send command to NAND device
753 * @mtd: MTD device structure
754 * @command: the command to be sent
755 * @column: the column address for this command, -1 if none
756 * @page_addr: the page address for this command, -1 if none
758 * Send command to NAND device. This function is used for small page devices
759 * (512 Bytes per page).
761 static void nand_command(struct mtd_info *mtd, unsigned int command,
762 int column, int page_addr)
764 register struct nand_chip *chip = mtd_to_nand(mtd);
765 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
767 /* Write out the command to the device */
768 if (command == NAND_CMD_SEQIN) {
769 int readcmd;
771 if (column >= mtd->writesize) {
772 /* OOB area */
773 column -= mtd->writesize;
774 readcmd = NAND_CMD_READOOB;
775 } else if (column < 256) {
776 /* First 256 bytes --> READ0 */
777 readcmd = NAND_CMD_READ0;
778 } else {
779 column -= 256;
780 readcmd = NAND_CMD_READ1;
782 chip->cmd_ctrl(mtd, readcmd, ctrl);
783 ctrl &= ~NAND_CTRL_CHANGE;
785 if (command != NAND_CMD_NONE)
786 chip->cmd_ctrl(mtd, command, ctrl);
788 /* Address cycle, when necessary */
789 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
790 /* Serially input address */
791 if (column != -1) {
792 /* Adjust columns for 16 bit buswidth */
793 if (chip->options & NAND_BUSWIDTH_16 &&
794 !nand_opcode_8bits(command))
795 column >>= 1;
796 chip->cmd_ctrl(mtd, column, ctrl);
797 ctrl &= ~NAND_CTRL_CHANGE;
799 if (page_addr != -1) {
800 chip->cmd_ctrl(mtd, page_addr, ctrl);
801 ctrl &= ~NAND_CTRL_CHANGE;
802 chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
803 if (chip->options & NAND_ROW_ADDR_3)
804 chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
806 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
809 * Program and erase have their own busy handlers status and sequential
810 * in needs no delay
812 switch (command) {
814 case NAND_CMD_NONE:
815 case NAND_CMD_PAGEPROG:
816 case NAND_CMD_ERASE1:
817 case NAND_CMD_ERASE2:
818 case NAND_CMD_SEQIN:
819 case NAND_CMD_STATUS:
820 case NAND_CMD_READID:
821 case NAND_CMD_SET_FEATURES:
822 return;
824 case NAND_CMD_RESET:
825 if (chip->dev_ready)
826 break;
827 udelay(chip->chip_delay);
828 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
829 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
830 chip->cmd_ctrl(mtd,
831 NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
832 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
833 nand_wait_status_ready(mtd, 250);
834 return;
836 /* This applies to read commands */
837 case NAND_CMD_READ0:
839 * READ0 is sometimes used to exit GET STATUS mode. When this
840 * is the case no address cycles are requested, and we can use
841 * this information to detect that we should not wait for the
842 * device to be ready.
844 if (column == -1 && page_addr == -1)
845 return;
847 default:
849 * If we don't have access to the busy pin, we apply the given
850 * command delay
852 if (!chip->dev_ready) {
853 udelay(chip->chip_delay);
854 return;
858 * Apply this short delay always to ensure that we do wait tWB in
859 * any case on any machine.
861 ndelay(100);
863 nand_wait_ready(mtd);
866 static void nand_ccs_delay(struct nand_chip *chip)
869 * The controller already takes care of waiting for tCCS when the RNDIN
870 * or RNDOUT command is sent, return directly.
872 if (!(chip->options & NAND_WAIT_TCCS))
873 return;
876 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
877 * (which should be safe for all NANDs).
879 if (chip->setup_data_interface)
880 ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
881 else
882 ndelay(500);
886 * nand_command_lp - [DEFAULT] Send command to NAND large page device
887 * @mtd: MTD device structure
888 * @command: the command to be sent
889 * @column: the column address for this command, -1 if none
890 * @page_addr: the page address for this command, -1 if none
892 * Send command to NAND device. This is the version for the new large page
893 * devices. We don't have the separate regions as we have in the small page
894 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
896 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
897 int column, int page_addr)
899 register struct nand_chip *chip = mtd_to_nand(mtd);
901 /* Emulate NAND_CMD_READOOB */
902 if (command == NAND_CMD_READOOB) {
903 column += mtd->writesize;
904 command = NAND_CMD_READ0;
907 /* Command latch cycle */
908 if (command != NAND_CMD_NONE)
909 chip->cmd_ctrl(mtd, command,
910 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
912 if (column != -1 || page_addr != -1) {
913 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
915 /* Serially input address */
916 if (column != -1) {
917 /* Adjust columns for 16 bit buswidth */
918 if (chip->options & NAND_BUSWIDTH_16 &&
919 !nand_opcode_8bits(command))
920 column >>= 1;
921 chip->cmd_ctrl(mtd, column, ctrl);
922 ctrl &= ~NAND_CTRL_CHANGE;
924 /* Only output a single addr cycle for 8bits opcodes. */
925 if (!nand_opcode_8bits(command))
926 chip->cmd_ctrl(mtd, column >> 8, ctrl);
928 if (page_addr != -1) {
929 chip->cmd_ctrl(mtd, page_addr, ctrl);
930 chip->cmd_ctrl(mtd, page_addr >> 8,
931 NAND_NCE | NAND_ALE);
932 if (chip->options & NAND_ROW_ADDR_3)
933 chip->cmd_ctrl(mtd, page_addr >> 16,
934 NAND_NCE | NAND_ALE);
937 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
940 * Program and erase have their own busy handlers status, sequential
941 * in and status need no delay.
943 switch (command) {
945 case NAND_CMD_NONE:
946 case NAND_CMD_CACHEDPROG:
947 case NAND_CMD_PAGEPROG:
948 case NAND_CMD_ERASE1:
949 case NAND_CMD_ERASE2:
950 case NAND_CMD_SEQIN:
951 case NAND_CMD_STATUS:
952 case NAND_CMD_READID:
953 case NAND_CMD_SET_FEATURES:
954 return;
956 case NAND_CMD_RNDIN:
957 nand_ccs_delay(chip);
958 return;
960 case NAND_CMD_RESET:
961 if (chip->dev_ready)
962 break;
963 udelay(chip->chip_delay);
964 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
965 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
966 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
967 NAND_NCE | NAND_CTRL_CHANGE);
968 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
969 nand_wait_status_ready(mtd, 250);
970 return;
972 case NAND_CMD_RNDOUT:
973 /* No ready / busy check necessary */
974 chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
975 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
976 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
977 NAND_NCE | NAND_CTRL_CHANGE);
979 nand_ccs_delay(chip);
980 return;
982 case NAND_CMD_READ0:
984 * READ0 is sometimes used to exit GET STATUS mode. When this
985 * is the case no address cycles are requested, and we can use
986 * this information to detect that READSTART should not be
987 * issued.
989 if (column == -1 && page_addr == -1)
990 return;
992 chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
993 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
994 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
995 NAND_NCE | NAND_CTRL_CHANGE);
997 /* This applies to read commands */
998 default:
1000 * If we don't have access to the busy pin, we apply the given
1001 * command delay.
1003 if (!chip->dev_ready) {
1004 udelay(chip->chip_delay);
1005 return;
1010 * Apply this short delay always to ensure that we do wait tWB in
1011 * any case on any machine.
1013 ndelay(100);
1015 nand_wait_ready(mtd);
1019 * panic_nand_get_device - [GENERIC] Get chip for selected access
1020 * @chip: the nand chip descriptor
1021 * @mtd: MTD device structure
1022 * @new_state: the state which is requested
1024 * Used when in panic, no locks are taken.
1026 static void panic_nand_get_device(struct nand_chip *chip,
1027 struct mtd_info *mtd, int new_state)
1029 /* Hardware controller shared among independent devices */
1030 chip->controller->active = chip;
1031 chip->state = new_state;
1035 * nand_get_device - [GENERIC] Get chip for selected access
1036 * @mtd: MTD device structure
1037 * @new_state: the state which is requested
1039 * Get the device and lock it for exclusive access
1041 static int
1042 nand_get_device(struct mtd_info *mtd, int new_state)
1044 struct nand_chip *chip = mtd_to_nand(mtd);
1045 spinlock_t *lock = &chip->controller->lock;
1046 wait_queue_head_t *wq = &chip->controller->wq;
1047 DECLARE_WAITQUEUE(wait, current);
1048 retry:
1049 spin_lock(lock);
1051 /* Hardware controller shared among independent devices */
1052 if (!chip->controller->active)
1053 chip->controller->active = chip;
1055 if (chip->controller->active == chip && chip->state == FL_READY) {
1056 chip->state = new_state;
1057 spin_unlock(lock);
1058 return 0;
1060 if (new_state == FL_PM_SUSPENDED) {
1061 if (chip->controller->active->state == FL_PM_SUSPENDED) {
1062 chip->state = FL_PM_SUSPENDED;
1063 spin_unlock(lock);
1064 return 0;
1067 set_current_state(TASK_UNINTERRUPTIBLE);
1068 add_wait_queue(wq, &wait);
1069 spin_unlock(lock);
1070 schedule();
1071 remove_wait_queue(wq, &wait);
1072 goto retry;
1076 * panic_nand_wait - [GENERIC] wait until the command is done
1077 * @mtd: MTD device structure
1078 * @chip: NAND chip structure
1079 * @timeo: timeout
1081 * Wait for command done. This is a helper function for nand_wait used when
1082 * we are in interrupt context. May happen when in panic and trying to write
1083 * an oops through mtdoops.
1085 static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
1086 unsigned long timeo)
1088 int i;
1089 for (i = 0; i < timeo; i++) {
1090 if (chip->dev_ready) {
1091 if (chip->dev_ready(mtd))
1092 break;
1093 } else {
1094 int ret;
1095 u8 status;
1097 ret = nand_read_data_op(chip, &status, sizeof(status),
1098 true);
1099 if (ret)
1100 return;
1102 if (status & NAND_STATUS_READY)
1103 break;
1105 mdelay(1);
1110 * nand_wait - [DEFAULT] wait until the command is done
1111 * @mtd: MTD device structure
1112 * @chip: NAND chip structure
1114 * Wait for command done. This applies to erase and program only.
1116 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1119 unsigned long timeo = 400;
1120 u8 status;
1121 int ret;
1124 * Apply this short delay always to ensure that we do wait tWB in any
1125 * case on any machine.
1127 ndelay(100);
1129 ret = nand_status_op(chip, NULL);
1130 if (ret)
1131 return ret;
1133 if (in_interrupt() || oops_in_progress)
1134 panic_nand_wait(mtd, chip, timeo);
1135 else {
1136 timeo = jiffies + msecs_to_jiffies(timeo);
1137 do {
1138 if (chip->dev_ready) {
1139 if (chip->dev_ready(mtd))
1140 break;
1141 } else {
1142 ret = nand_read_data_op(chip, &status,
1143 sizeof(status), true);
1144 if (ret)
1145 return ret;
1147 if (status & NAND_STATUS_READY)
1148 break;
1150 cond_resched();
1151 } while (time_before(jiffies, timeo));
1154 ret = nand_read_data_op(chip, &status, sizeof(status), true);
1155 if (ret)
1156 return ret;
1158 /* This can happen if in case of timeout or buggy dev_ready */
1159 WARN_ON(!(status & NAND_STATUS_READY));
1160 return status;
1164 * nand_reset_data_interface - Reset data interface and timings
1165 * @chip: The NAND chip
1166 * @chipnr: Internal die id
1168 * Reset the Data interface and timings to ONFI mode 0.
1170 * Returns 0 for success or negative error code otherwise.
1172 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
1174 struct mtd_info *mtd = nand_to_mtd(chip);
1175 int ret;
1177 if (!chip->setup_data_interface)
1178 return 0;
1181 * The ONFI specification says:
1183 * To transition from NV-DDR or NV-DDR2 to the SDR data
1184 * interface, the host shall use the Reset (FFh) command
1185 * using SDR timing mode 0. A device in any timing mode is
1186 * required to recognize Reset (FFh) command issued in SDR
1187 * timing mode 0.
1190 * Configure the data interface in SDR mode and set the
1191 * timings to timing mode 0.
1194 onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
1195 ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
1196 if (ret)
1197 pr_err("Failed to configure data interface to SDR timing mode 0\n");
1199 return ret;
1203 * nand_setup_data_interface - Setup the best data interface and timings
1204 * @chip: The NAND chip
1205 * @chipnr: Internal die id
1207 * Find and configure the best data interface and NAND timings supported by
1208 * the chip and the driver.
1209 * First tries to retrieve supported timing modes from ONFI information,
1210 * and if the NAND chip does not support ONFI, relies on the
1211 * ->onfi_timing_mode_default specified in the nand_ids table.
1213 * Returns 0 for success or negative error code otherwise.
1215 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
1217 struct mtd_info *mtd = nand_to_mtd(chip);
1218 int ret;
1220 if (!chip->setup_data_interface)
1221 return 0;
1224 * Ensure the timing mode has been changed on the chip side
1225 * before changing timings on the controller side.
1227 if (chip->onfi_version &&
1228 (le16_to_cpu(chip->onfi_params.opt_cmd) &
1229 ONFI_OPT_CMD_SET_GET_FEATURES)) {
1230 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1231 chip->onfi_timing_mode_default,
1234 ret = chip->onfi_set_features(mtd, chip,
1235 ONFI_FEATURE_ADDR_TIMING_MODE,
1236 tmode_param);
1237 if (ret)
1238 goto err;
1241 ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
1242 err:
1243 return ret;
1247 * nand_init_data_interface - find the best data interface and timings
1248 * @chip: The NAND chip
1250 * Find the best data interface and NAND timings supported by the chip
1251 * and the driver.
1252 * First tries to retrieve supported timing modes from ONFI information,
1253 * and if the NAND chip does not support ONFI, relies on the
1254 * ->onfi_timing_mode_default specified in the nand_ids table. After this
1255 * function nand_chip->data_interface is initialized with the best timing mode
1256 * available.
1258 * Returns 0 for success or negative error code otherwise.
1260 static int nand_init_data_interface(struct nand_chip *chip)
1262 struct mtd_info *mtd = nand_to_mtd(chip);
1263 int modes, mode, ret;
1265 if (!chip->setup_data_interface)
1266 return 0;
1269 * First try to identify the best timings from ONFI parameters and
1270 * if the NAND does not support ONFI, fallback to the default ONFI
1271 * timing mode.
1273 modes = onfi_get_async_timing_mode(chip);
1274 if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1275 if (!chip->onfi_timing_mode_default)
1276 return 0;
1278 modes = GENMASK(chip->onfi_timing_mode_default, 0);
1282 for (mode = fls(modes) - 1; mode >= 0; mode--) {
1283 ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
1284 if (ret)
1285 continue;
1288 * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
1289 * controller supports the requested timings.
1291 ret = chip->setup_data_interface(mtd,
1292 NAND_DATA_IFACE_CHECK_ONLY,
1293 &chip->data_interface);
1294 if (!ret) {
1295 chip->onfi_timing_mode_default = mode;
1296 break;
1300 return 0;
1304 * nand_fill_column_cycles - fill the column cycles of an address
1305 * @chip: The NAND chip
1306 * @addrs: Array of address cycles to fill
1307 * @offset_in_page: The offset in the page
1309 * Fills the first or the first two bytes of the @addrs field depending
1310 * on the NAND bus width and the page size.
1312 * Returns the number of cycles needed to encode the column, or a negative
1313 * error code in case one of the arguments is invalid.
1315 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
1316 unsigned int offset_in_page)
1318 struct mtd_info *mtd = nand_to_mtd(chip);
1320 /* Make sure the offset is less than the actual page size. */
1321 if (offset_in_page > mtd->writesize + mtd->oobsize)
1322 return -EINVAL;
1325 * On small page NANDs, there's a dedicated command to access the OOB
1326 * area, and the column address is relative to the start of the OOB
1327 * area, not the start of the page. Asjust the address accordingly.
1329 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
1330 offset_in_page -= mtd->writesize;
1333 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
1334 * wide, then it must be divided by 2.
1336 if (chip->options & NAND_BUSWIDTH_16) {
1337 if (WARN_ON(offset_in_page % 2))
1338 return -EINVAL;
1340 offset_in_page /= 2;
1343 addrs[0] = offset_in_page;
1346 * Small page NANDs use 1 cycle for the columns, while large page NANDs
1347 * need 2
1349 if (mtd->writesize <= 512)
1350 return 1;
1352 addrs[1] = offset_in_page >> 8;
1354 return 2;
1357 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1358 unsigned int offset_in_page, void *buf,
1359 unsigned int len)
1361 struct mtd_info *mtd = nand_to_mtd(chip);
1362 const struct nand_sdr_timings *sdr =
1363 nand_get_sdr_timings(&chip->data_interface);
1364 u8 addrs[4];
1365 struct nand_op_instr instrs[] = {
1366 NAND_OP_CMD(NAND_CMD_READ0, 0),
1367 NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
1368 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1369 PSEC_TO_NSEC(sdr->tRR_min)),
1370 NAND_OP_DATA_IN(len, buf, 0),
1372 struct nand_operation op = NAND_OPERATION(instrs);
1373 int ret;
1375 /* Drop the DATA_IN instruction if len is set to 0. */
1376 if (!len)
1377 op.ninstrs--;
1379 if (offset_in_page >= mtd->writesize)
1380 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1381 else if (offset_in_page >= 256 &&
1382 !(chip->options & NAND_BUSWIDTH_16))
1383 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1385 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1386 if (ret < 0)
1387 return ret;
1389 addrs[1] = page;
1390 addrs[2] = page >> 8;
1392 if (chip->options & NAND_ROW_ADDR_3) {
1393 addrs[3] = page >> 16;
1394 instrs[1].ctx.addr.naddrs++;
1397 return nand_exec_op(chip, &op);
1400 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1401 unsigned int offset_in_page, void *buf,
1402 unsigned int len)
1404 const struct nand_sdr_timings *sdr =
1405 nand_get_sdr_timings(&chip->data_interface);
1406 u8 addrs[5];
1407 struct nand_op_instr instrs[] = {
1408 NAND_OP_CMD(NAND_CMD_READ0, 0),
1409 NAND_OP_ADDR(4, addrs, 0),
1410 NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1411 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1412 PSEC_TO_NSEC(sdr->tRR_min)),
1413 NAND_OP_DATA_IN(len, buf, 0),
1415 struct nand_operation op = NAND_OPERATION(instrs);
1416 int ret;
1418 /* Drop the DATA_IN instruction if len is set to 0. */
1419 if (!len)
1420 op.ninstrs--;
1422 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1423 if (ret < 0)
1424 return ret;
1426 addrs[2] = page;
1427 addrs[3] = page >> 8;
1429 if (chip->options & NAND_ROW_ADDR_3) {
1430 addrs[4] = page >> 16;
1431 instrs[1].ctx.addr.naddrs++;
1434 return nand_exec_op(chip, &op);
1438 * nand_read_page_op - Do a READ PAGE operation
1439 * @chip: The NAND chip
1440 * @page: page to read
1441 * @offset_in_page: offset within the page
1442 * @buf: buffer used to store the data
1443 * @len: length of the buffer
1445 * This function issues a READ PAGE operation.
1446 * This function does not select/unselect the CS line.
1448 * Returns 0 on success, a negative error code otherwise.
1450 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1451 unsigned int offset_in_page, void *buf, unsigned int len)
1453 struct mtd_info *mtd = nand_to_mtd(chip);
1455 if (len && !buf)
1456 return -EINVAL;
1458 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1459 return -EINVAL;
1461 if (chip->exec_op) {
1462 if (mtd->writesize > 512)
1463 return nand_lp_exec_read_page_op(chip, page,
1464 offset_in_page, buf,
1465 len);
1467 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1468 buf, len);
1471 chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page);
1472 if (len)
1473 chip->read_buf(mtd, buf, len);
1475 return 0;
1477 EXPORT_SYMBOL_GPL(nand_read_page_op);
1480 * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1481 * @chip: The NAND chip
1482 * @page: parameter page to read
1483 * @buf: buffer used to store the data
1484 * @len: length of the buffer
1486 * This function issues a READ PARAMETER PAGE operation.
1487 * This function does not select/unselect the CS line.
1489 * Returns 0 on success, a negative error code otherwise.
1491 static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1492 unsigned int len)
1494 struct mtd_info *mtd = nand_to_mtd(chip);
1495 unsigned int i;
1496 u8 *p = buf;
1498 if (len && !buf)
1499 return -EINVAL;
1501 if (chip->exec_op) {
1502 const struct nand_sdr_timings *sdr =
1503 nand_get_sdr_timings(&chip->data_interface);
1504 struct nand_op_instr instrs[] = {
1505 NAND_OP_CMD(NAND_CMD_PARAM, 0),
1506 NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1507 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1508 PSEC_TO_NSEC(sdr->tRR_min)),
1509 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1511 struct nand_operation op = NAND_OPERATION(instrs);
1513 /* Drop the DATA_IN instruction if len is set to 0. */
1514 if (!len)
1515 op.ninstrs--;
1517 return nand_exec_op(chip, &op);
1520 chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1);
1521 for (i = 0; i < len; i++)
1522 p[i] = chip->read_byte(mtd);
1524 return 0;
1528 * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1529 * @chip: The NAND chip
1530 * @offset_in_page: offset within the page
1531 * @buf: buffer used to store the data
1532 * @len: length of the buffer
1533 * @force_8bit: force 8-bit bus access
1535 * This function issues a CHANGE READ COLUMN operation.
1536 * This function does not select/unselect the CS line.
1538 * Returns 0 on success, a negative error code otherwise.
1540 int nand_change_read_column_op(struct nand_chip *chip,
1541 unsigned int offset_in_page, void *buf,
1542 unsigned int len, bool force_8bit)
1544 struct mtd_info *mtd = nand_to_mtd(chip);
1546 if (len && !buf)
1547 return -EINVAL;
1549 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1550 return -EINVAL;
1552 /* Small page NANDs do not support column change. */
1553 if (mtd->writesize <= 512)
1554 return -ENOTSUPP;
1556 if (chip->exec_op) {
1557 const struct nand_sdr_timings *sdr =
1558 nand_get_sdr_timings(&chip->data_interface);
1559 u8 addrs[2] = {};
1560 struct nand_op_instr instrs[] = {
1561 NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1562 NAND_OP_ADDR(2, addrs, 0),
1563 NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1564 PSEC_TO_NSEC(sdr->tCCS_min)),
1565 NAND_OP_DATA_IN(len, buf, 0),
1567 struct nand_operation op = NAND_OPERATION(instrs);
1568 int ret;
1570 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1571 if (ret < 0)
1572 return ret;
1574 /* Drop the DATA_IN instruction if len is set to 0. */
1575 if (!len)
1576 op.ninstrs--;
1578 instrs[3].ctx.data.force_8bit = force_8bit;
1580 return nand_exec_op(chip, &op);
1583 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1);
1584 if (len)
1585 chip->read_buf(mtd, buf, len);
1587 return 0;
1589 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1592 * nand_read_oob_op - Do a READ OOB operation
1593 * @chip: The NAND chip
1594 * @page: page to read
1595 * @offset_in_oob: offset within the OOB area
1596 * @buf: buffer used to store the data
1597 * @len: length of the buffer
1599 * This function issues a READ OOB operation.
1600 * This function does not select/unselect the CS line.
1602 * Returns 0 on success, a negative error code otherwise.
1604 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1605 unsigned int offset_in_oob, void *buf, unsigned int len)
1607 struct mtd_info *mtd = nand_to_mtd(chip);
1609 if (len && !buf)
1610 return -EINVAL;
1612 if (offset_in_oob + len > mtd->oobsize)
1613 return -EINVAL;
1615 if (chip->exec_op)
1616 return nand_read_page_op(chip, page,
1617 mtd->writesize + offset_in_oob,
1618 buf, len);
1620 chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page);
1621 if (len)
1622 chip->read_buf(mtd, buf, len);
1624 return 0;
1626 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1628 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1629 unsigned int offset_in_page, const void *buf,
1630 unsigned int len, bool prog)
1632 struct mtd_info *mtd = nand_to_mtd(chip);
1633 const struct nand_sdr_timings *sdr =
1634 nand_get_sdr_timings(&chip->data_interface);
1635 u8 addrs[5] = {};
1636 struct nand_op_instr instrs[] = {
1638 * The first instruction will be dropped if we're dealing
1639 * with a large page NAND and adjusted if we're dealing
1640 * with a small page NAND and the page offset is > 255.
1642 NAND_OP_CMD(NAND_CMD_READ0, 0),
1643 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1644 NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1645 NAND_OP_DATA_OUT(len, buf, 0),
1646 NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1647 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1649 struct nand_operation op = NAND_OPERATION(instrs);
1650 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1651 int ret;
1652 u8 status;
1654 if (naddrs < 0)
1655 return naddrs;
1657 addrs[naddrs++] = page;
1658 addrs[naddrs++] = page >> 8;
1659 if (chip->options & NAND_ROW_ADDR_3)
1660 addrs[naddrs++] = page >> 16;
1662 instrs[2].ctx.addr.naddrs = naddrs;
1664 /* Drop the last two instructions if we're not programming the page. */
1665 if (!prog) {
1666 op.ninstrs -= 2;
1667 /* Also drop the DATA_OUT instruction if empty. */
1668 if (!len)
1669 op.ninstrs--;
1672 if (mtd->writesize <= 512) {
1674 * Small pages need some more tweaking: we have to adjust the
1675 * first instruction depending on the page offset we're trying
1676 * to access.
1678 if (offset_in_page >= mtd->writesize)
1679 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1680 else if (offset_in_page >= 256 &&
1681 !(chip->options & NAND_BUSWIDTH_16))
1682 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1683 } else {
1685 * Drop the first command if we're dealing with a large page
1686 * NAND.
1688 op.instrs++;
1689 op.ninstrs--;
1692 ret = nand_exec_op(chip, &op);
1693 if (!prog || ret)
1694 return ret;
1696 ret = nand_status_op(chip, &status);
1697 if (ret)
1698 return ret;
1700 return status;
1704 * nand_prog_page_begin_op - starts a PROG PAGE operation
1705 * @chip: The NAND chip
1706 * @page: page to write
1707 * @offset_in_page: offset within the page
1708 * @buf: buffer containing the data to write to the page
1709 * @len: length of the buffer
1711 * This function issues the first half of a PROG PAGE operation.
1712 * This function does not select/unselect the CS line.
1714 * Returns 0 on success, a negative error code otherwise.
1716 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1717 unsigned int offset_in_page, const void *buf,
1718 unsigned int len)
1720 struct mtd_info *mtd = nand_to_mtd(chip);
1722 if (len && !buf)
1723 return -EINVAL;
1725 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1726 return -EINVAL;
1728 if (chip->exec_op)
1729 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1730 len, false);
1732 chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
1734 if (buf)
1735 chip->write_buf(mtd, buf, len);
1737 return 0;
1739 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1742 * nand_prog_page_end_op - ends a PROG PAGE operation
1743 * @chip: The NAND chip
1745 * This function issues the second half of a PROG PAGE operation.
1746 * This function does not select/unselect the CS line.
1748 * Returns 0 on success, a negative error code otherwise.
1750 int nand_prog_page_end_op(struct nand_chip *chip)
1752 struct mtd_info *mtd = nand_to_mtd(chip);
1753 int ret;
1754 u8 status;
1756 if (chip->exec_op) {
1757 const struct nand_sdr_timings *sdr =
1758 nand_get_sdr_timings(&chip->data_interface);
1759 struct nand_op_instr instrs[] = {
1760 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1761 PSEC_TO_NSEC(sdr->tWB_max)),
1762 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1764 struct nand_operation op = NAND_OPERATION(instrs);
1766 ret = nand_exec_op(chip, &op);
1767 if (ret)
1768 return ret;
1770 ret = nand_status_op(chip, &status);
1771 if (ret)
1772 return ret;
1773 } else {
1774 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1775 ret = chip->waitfunc(mtd, chip);
1776 if (ret < 0)
1777 return ret;
1779 status = ret;
1782 if (status & NAND_STATUS_FAIL)
1783 return -EIO;
1785 return 0;
1787 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1790 * nand_prog_page_op - Do a full PROG PAGE operation
1791 * @chip: The NAND chip
1792 * @page: page to write
1793 * @offset_in_page: offset within the page
1794 * @buf: buffer containing the data to write to the page
1795 * @len: length of the buffer
1797 * This function issues a full PROG PAGE operation.
1798 * This function does not select/unselect the CS line.
1800 * Returns 0 on success, a negative error code otherwise.
1802 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1803 unsigned int offset_in_page, const void *buf,
1804 unsigned int len)
1806 struct mtd_info *mtd = nand_to_mtd(chip);
1807 int status;
1809 if (!len || !buf)
1810 return -EINVAL;
1812 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1813 return -EINVAL;
1815 if (chip->exec_op) {
1816 status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1817 len, true);
1818 } else {
1819 chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
1820 chip->write_buf(mtd, buf, len);
1821 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1822 status = chip->waitfunc(mtd, chip);
1825 if (status & NAND_STATUS_FAIL)
1826 return -EIO;
1828 return 0;
1830 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1833 * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1834 * @chip: The NAND chip
1835 * @offset_in_page: offset within the page
1836 * @buf: buffer containing the data to send to the NAND
1837 * @len: length of the buffer
1838 * @force_8bit: force 8-bit bus access
1840 * This function issues a CHANGE WRITE COLUMN operation.
1841 * This function does not select/unselect the CS line.
1843 * Returns 0 on success, a negative error code otherwise.
1845 int nand_change_write_column_op(struct nand_chip *chip,
1846 unsigned int offset_in_page,
1847 const void *buf, unsigned int len,
1848 bool force_8bit)
1850 struct mtd_info *mtd = nand_to_mtd(chip);
1852 if (len && !buf)
1853 return -EINVAL;
1855 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1856 return -EINVAL;
1858 /* Small page NANDs do not support column change. */
1859 if (mtd->writesize <= 512)
1860 return -ENOTSUPP;
1862 if (chip->exec_op) {
1863 const struct nand_sdr_timings *sdr =
1864 nand_get_sdr_timings(&chip->data_interface);
1865 u8 addrs[2];
1866 struct nand_op_instr instrs[] = {
1867 NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1868 NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1869 NAND_OP_DATA_OUT(len, buf, 0),
1871 struct nand_operation op = NAND_OPERATION(instrs);
1872 int ret;
1874 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1875 if (ret < 0)
1876 return ret;
1878 instrs[2].ctx.data.force_8bit = force_8bit;
1880 /* Drop the DATA_OUT instruction if len is set to 0. */
1881 if (!len)
1882 op.ninstrs--;
1884 return nand_exec_op(chip, &op);
1887 chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1);
1888 if (len)
1889 chip->write_buf(mtd, buf, len);
1891 return 0;
1893 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1896 * nand_readid_op - Do a READID operation
1897 * @chip: The NAND chip
1898 * @addr: address cycle to pass after the READID command
1899 * @buf: buffer used to store the ID
1900 * @len: length of the buffer
1902 * This function sends a READID command and reads back the ID returned by the
1903 * NAND.
1904 * This function does not select/unselect the CS line.
1906 * Returns 0 on success, a negative error code otherwise.
1908 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1909 unsigned int len)
1911 struct mtd_info *mtd = nand_to_mtd(chip);
1912 unsigned int i;
1913 u8 *id = buf;
1915 if (len && !buf)
1916 return -EINVAL;
1918 if (chip->exec_op) {
1919 const struct nand_sdr_timings *sdr =
1920 nand_get_sdr_timings(&chip->data_interface);
1921 struct nand_op_instr instrs[] = {
1922 NAND_OP_CMD(NAND_CMD_READID, 0),
1923 NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1924 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1926 struct nand_operation op = NAND_OPERATION(instrs);
1928 /* Drop the DATA_IN instruction if len is set to 0. */
1929 if (!len)
1930 op.ninstrs--;
1932 return nand_exec_op(chip, &op);
1935 chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1);
1937 for (i = 0; i < len; i++)
1938 id[i] = chip->read_byte(mtd);
1940 return 0;
1942 EXPORT_SYMBOL_GPL(nand_readid_op);
1945 * nand_status_op - Do a STATUS operation
1946 * @chip: The NAND chip
1947 * @status: out variable to store the NAND status
1949 * This function sends a STATUS command and reads back the status returned by
1950 * the NAND.
1951 * This function does not select/unselect the CS line.
1953 * Returns 0 on success, a negative error code otherwise.
1955 int nand_status_op(struct nand_chip *chip, u8 *status)
1957 struct mtd_info *mtd = nand_to_mtd(chip);
1959 if (chip->exec_op) {
1960 const struct nand_sdr_timings *sdr =
1961 nand_get_sdr_timings(&chip->data_interface);
1962 struct nand_op_instr instrs[] = {
1963 NAND_OP_CMD(NAND_CMD_STATUS,
1964 PSEC_TO_NSEC(sdr->tADL_min)),
1965 NAND_OP_8BIT_DATA_IN(1, status, 0),
1967 struct nand_operation op = NAND_OPERATION(instrs);
1969 if (!status)
1970 op.ninstrs--;
1972 return nand_exec_op(chip, &op);
1975 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
1976 if (status)
1977 *status = chip->read_byte(mtd);
1979 return 0;
1981 EXPORT_SYMBOL_GPL(nand_status_op);
1984 * nand_exit_status_op - Exit a STATUS operation
1985 * @chip: The NAND chip
1987 * This function sends a READ0 command to cancel the effect of the STATUS
1988 * command to avoid reading only the status until a new read command is sent.
1990 * This function does not select/unselect the CS line.
1992 * Returns 0 on success, a negative error code otherwise.
1994 int nand_exit_status_op(struct nand_chip *chip)
1996 struct mtd_info *mtd = nand_to_mtd(chip);
1998 if (chip->exec_op) {
1999 struct nand_op_instr instrs[] = {
2000 NAND_OP_CMD(NAND_CMD_READ0, 0),
2002 struct nand_operation op = NAND_OPERATION(instrs);
2004 return nand_exec_op(chip, &op);
2007 chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
2009 return 0;
2011 EXPORT_SYMBOL_GPL(nand_exit_status_op);
2014 * nand_erase_op - Do an erase operation
2015 * @chip: The NAND chip
2016 * @eraseblock: block to erase
2018 * This function sends an ERASE command and waits for the NAND to be ready
2019 * before returning.
2020 * This function does not select/unselect the CS line.
2022 * Returns 0 on success, a negative error code otherwise.
2024 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
2026 struct mtd_info *mtd = nand_to_mtd(chip);
2027 unsigned int page = eraseblock <<
2028 (chip->phys_erase_shift - chip->page_shift);
2029 int ret;
2030 u8 status;
2032 if (chip->exec_op) {
2033 const struct nand_sdr_timings *sdr =
2034 nand_get_sdr_timings(&chip->data_interface);
2035 u8 addrs[3] = { page, page >> 8, page >> 16 };
2036 struct nand_op_instr instrs[] = {
2037 NAND_OP_CMD(NAND_CMD_ERASE1, 0),
2038 NAND_OP_ADDR(2, addrs, 0),
2039 NAND_OP_CMD(NAND_CMD_ERASE2,
2040 PSEC_TO_MSEC(sdr->tWB_max)),
2041 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
2043 struct nand_operation op = NAND_OPERATION(instrs);
2045 if (chip->options & NAND_ROW_ADDR_3)
2046 instrs[1].ctx.addr.naddrs++;
2048 ret = nand_exec_op(chip, &op);
2049 if (ret)
2050 return ret;
2052 ret = nand_status_op(chip, &status);
2053 if (ret)
2054 return ret;
2055 } else {
2056 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
2057 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
2059 ret = chip->waitfunc(mtd, chip);
2060 if (ret < 0)
2061 return ret;
2063 status = ret;
2066 if (status & NAND_STATUS_FAIL)
2067 return -EIO;
2069 return 0;
2071 EXPORT_SYMBOL_GPL(nand_erase_op);
2074 * nand_set_features_op - Do a SET FEATURES operation
2075 * @chip: The NAND chip
2076 * @feature: feature id
2077 * @data: 4 bytes of data
2079 * This function sends a SET FEATURES command and waits for the NAND to be
2080 * ready before returning.
2081 * This function does not select/unselect the CS line.
2083 * Returns 0 on success, a negative error code otherwise.
2085 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
2086 const void *data)
2088 struct mtd_info *mtd = nand_to_mtd(chip);
2089 const u8 *params = data;
2090 int i, ret;
2091 u8 status;
2093 if (chip->exec_op) {
2094 const struct nand_sdr_timings *sdr =
2095 nand_get_sdr_timings(&chip->data_interface);
2096 struct nand_op_instr instrs[] = {
2097 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
2098 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
2099 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
2100 PSEC_TO_NSEC(sdr->tWB_max)),
2101 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
2103 struct nand_operation op = NAND_OPERATION(instrs);
2105 ret = nand_exec_op(chip, &op);
2106 if (ret)
2107 return ret;
2109 ret = nand_status_op(chip, &status);
2110 if (ret)
2111 return ret;
2112 } else {
2113 chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
2114 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
2115 chip->write_byte(mtd, params[i]);
2117 ret = chip->waitfunc(mtd, chip);
2118 if (ret < 0)
2119 return ret;
2121 status = ret;
2124 if (status & NAND_STATUS_FAIL)
2125 return -EIO;
2127 return 0;
2131 * nand_get_features_op - Do a GET FEATURES operation
2132 * @chip: The NAND chip
2133 * @feature: feature id
2134 * @data: 4 bytes of data
2136 * This function sends a GET FEATURES command and waits for the NAND to be
2137 * ready before returning.
2138 * This function does not select/unselect the CS line.
2140 * Returns 0 on success, a negative error code otherwise.
2142 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
2143 void *data)
2145 struct mtd_info *mtd = nand_to_mtd(chip);
2146 u8 *params = data;
2147 int i;
2149 if (chip->exec_op) {
2150 const struct nand_sdr_timings *sdr =
2151 nand_get_sdr_timings(&chip->data_interface);
2152 struct nand_op_instr instrs[] = {
2153 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
2154 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
2155 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
2156 PSEC_TO_NSEC(sdr->tRR_min)),
2157 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
2158 data, 0),
2160 struct nand_operation op = NAND_OPERATION(instrs);
2162 return nand_exec_op(chip, &op);
2165 chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1);
2166 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
2167 params[i] = chip->read_byte(mtd);
2169 return 0;
2173 * nand_reset_op - Do a reset operation
2174 * @chip: The NAND chip
2176 * This function sends a RESET command and waits for the NAND to be ready
2177 * before returning.
2178 * This function does not select/unselect the CS line.
2180 * Returns 0 on success, a negative error code otherwise.
2182 int nand_reset_op(struct nand_chip *chip)
2184 struct mtd_info *mtd = nand_to_mtd(chip);
2186 if (chip->exec_op) {
2187 const struct nand_sdr_timings *sdr =
2188 nand_get_sdr_timings(&chip->data_interface);
2189 struct nand_op_instr instrs[] = {
2190 NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
2191 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
2193 struct nand_operation op = NAND_OPERATION(instrs);
2195 return nand_exec_op(chip, &op);
2198 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
2200 return 0;
2202 EXPORT_SYMBOL_GPL(nand_reset_op);
2205 * nand_read_data_op - Read data from the NAND
2206 * @chip: The NAND chip
2207 * @buf: buffer used to store the data
2208 * @len: length of the buffer
2209 * @force_8bit: force 8-bit bus access
2211 * This function does a raw data read on the bus. Usually used after launching
2212 * another NAND operation like nand_read_page_op().
2213 * This function does not select/unselect the CS line.
2215 * Returns 0 on success, a negative error code otherwise.
2217 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
2218 bool force_8bit)
2220 struct mtd_info *mtd = nand_to_mtd(chip);
2222 if (!len || !buf)
2223 return -EINVAL;
2225 if (chip->exec_op) {
2226 struct nand_op_instr instrs[] = {
2227 NAND_OP_DATA_IN(len, buf, 0),
2229 struct nand_operation op = NAND_OPERATION(instrs);
2231 instrs[0].ctx.data.force_8bit = force_8bit;
2233 return nand_exec_op(chip, &op);
2236 if (force_8bit) {
2237 u8 *p = buf;
2238 unsigned int i;
2240 for (i = 0; i < len; i++)
2241 p[i] = chip->read_byte(mtd);
2242 } else {
2243 chip->read_buf(mtd, buf, len);
2246 return 0;
2248 EXPORT_SYMBOL_GPL(nand_read_data_op);
2251 * nand_write_data_op - Write data from the NAND
2252 * @chip: The NAND chip
2253 * @buf: buffer containing the data to send on the bus
2254 * @len: length of the buffer
2255 * @force_8bit: force 8-bit bus access
2257 * This function does a raw data write on the bus. Usually used after launching
2258 * another NAND operation like nand_write_page_begin_op().
2259 * This function does not select/unselect the CS line.
2261 * Returns 0 on success, a negative error code otherwise.
2263 int nand_write_data_op(struct nand_chip *chip, const void *buf,
2264 unsigned int len, bool force_8bit)
2266 struct mtd_info *mtd = nand_to_mtd(chip);
2268 if (!len || !buf)
2269 return -EINVAL;
2271 if (chip->exec_op) {
2272 struct nand_op_instr instrs[] = {
2273 NAND_OP_DATA_OUT(len, buf, 0),
2275 struct nand_operation op = NAND_OPERATION(instrs);
2277 instrs[0].ctx.data.force_8bit = force_8bit;
2279 return nand_exec_op(chip, &op);
2282 if (force_8bit) {
2283 const u8 *p = buf;
2284 unsigned int i;
2286 for (i = 0; i < len; i++)
2287 chip->write_byte(mtd, p[i]);
2288 } else {
2289 chip->write_buf(mtd, buf, len);
2292 return 0;
2294 EXPORT_SYMBOL_GPL(nand_write_data_op);
2297 * struct nand_op_parser_ctx - Context used by the parser
2298 * @instrs: array of all the instructions that must be addressed
2299 * @ninstrs: length of the @instrs array
2300 * @subop: Sub-operation to be passed to the NAND controller
2302 * This structure is used by the core to split NAND operations into
2303 * sub-operations that can be handled by the NAND controller.
2305 struct nand_op_parser_ctx {
2306 const struct nand_op_instr *instrs;
2307 unsigned int ninstrs;
2308 struct nand_subop subop;
2312 * nand_op_parser_must_split_instr - Checks if an instruction must be split
2313 * @pat: the parser pattern element that matches @instr
2314 * @instr: pointer to the instruction to check
2315 * @start_offset: this is an in/out parameter. If @instr has already been
2316 * split, then @start_offset is the offset from which to start
2317 * (either an address cycle or an offset in the data buffer).
2318 * Conversely, if the function returns true (ie. instr must be
2319 * split), this parameter is updated to point to the first
2320 * data/address cycle that has not been taken care of.
2322 * Some NAND controllers are limited and cannot send X address cycles with a
2323 * unique operation, or cannot read/write more than Y bytes at the same time.
2324 * In this case, split the instruction that does not fit in a single
2325 * controller-operation into two or more chunks.
2327 * Returns true if the instruction must be split, false otherwise.
2328 * The @start_offset parameter is also updated to the offset at which the next
2329 * bundle of instruction must start (if an address or a data instruction).
2331 static bool
2332 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
2333 const struct nand_op_instr *instr,
2334 unsigned int *start_offset)
2336 switch (pat->type) {
2337 case NAND_OP_ADDR_INSTR:
2338 if (!pat->ctx.addr.maxcycles)
2339 break;
2341 if (instr->ctx.addr.naddrs - *start_offset >
2342 pat->ctx.addr.maxcycles) {
2343 *start_offset += pat->ctx.addr.maxcycles;
2344 return true;
2346 break;
2348 case NAND_OP_DATA_IN_INSTR:
2349 case NAND_OP_DATA_OUT_INSTR:
2350 if (!pat->ctx.data.maxlen)
2351 break;
2353 if (instr->ctx.data.len - *start_offset >
2354 pat->ctx.data.maxlen) {
2355 *start_offset += pat->ctx.data.maxlen;
2356 return true;
2358 break;
2360 default:
2361 break;
2364 return false;
2368 * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2369 * remaining in the parser context
2370 * @pat: the pattern to test
2371 * @ctx: the parser context structure to match with the pattern @pat
2373 * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2374 * Returns true if this is the case, false ortherwise. When true is returned,
2375 * @ctx->subop is updated with the set of instructions to be passed to the
2376 * controller driver.
2378 static bool
2379 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2380 struct nand_op_parser_ctx *ctx)
2382 unsigned int instr_offset = ctx->subop.first_instr_start_off;
2383 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2384 const struct nand_op_instr *instr = ctx->subop.instrs;
2385 unsigned int i, ninstrs;
2387 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2389 * The pattern instruction does not match the operation
2390 * instruction. If the instruction is marked optional in the
2391 * pattern definition, we skip the pattern element and continue
2392 * to the next one. If the element is mandatory, there's no
2393 * match and we can return false directly.
2395 if (instr->type != pat->elems[i].type) {
2396 if (!pat->elems[i].optional)
2397 return false;
2399 continue;
2403 * Now check the pattern element constraints. If the pattern is
2404 * not able to handle the whole instruction in a single step,
2405 * we have to split it.
2406 * The last_instr_end_off value comes back updated to point to
2407 * the position where we have to split the instruction (the
2408 * start of the next subop chunk).
2410 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2411 &instr_offset)) {
2412 ninstrs++;
2413 i++;
2414 break;
2417 instr++;
2418 ninstrs++;
2419 instr_offset = 0;
2423 * This can happen if all instructions of a pattern are optional.
2424 * Still, if there's not at least one instruction handled by this
2425 * pattern, this is not a match, and we should try the next one (if
2426 * any).
2428 if (!ninstrs)
2429 return false;
2432 * We had a match on the pattern head, but the pattern may be longer
2433 * than the instructions we're asked to execute. We need to make sure
2434 * there's no mandatory elements in the pattern tail.
2436 for (; i < pat->nelems; i++) {
2437 if (!pat->elems[i].optional)
2438 return false;
2442 * We have a match: update the subop structure accordingly and return
2443 * true.
2445 ctx->subop.ninstrs = ninstrs;
2446 ctx->subop.last_instr_end_off = instr_offset;
2448 return true;
2451 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2452 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2454 const struct nand_op_instr *instr;
2455 char *prefix = " ";
2456 unsigned int i;
2458 pr_debug("executing subop:\n");
2460 for (i = 0; i < ctx->ninstrs; i++) {
2461 instr = &ctx->instrs[i];
2463 if (instr == &ctx->subop.instrs[0])
2464 prefix = " ->";
2466 switch (instr->type) {
2467 case NAND_OP_CMD_INSTR:
2468 pr_debug("%sCMD [0x%02x]\n", prefix,
2469 instr->ctx.cmd.opcode);
2470 break;
2471 case NAND_OP_ADDR_INSTR:
2472 pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
2473 instr->ctx.addr.naddrs,
2474 instr->ctx.addr.naddrs < 64 ?
2475 instr->ctx.addr.naddrs : 64,
2476 instr->ctx.addr.addrs);
2477 break;
2478 case NAND_OP_DATA_IN_INSTR:
2479 pr_debug("%sDATA_IN [%d B%s]\n", prefix,
2480 instr->ctx.data.len,
2481 instr->ctx.data.force_8bit ?
2482 ", force 8-bit" : "");
2483 break;
2484 case NAND_OP_DATA_OUT_INSTR:
2485 pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
2486 instr->ctx.data.len,
2487 instr->ctx.data.force_8bit ?
2488 ", force 8-bit" : "");
2489 break;
2490 case NAND_OP_WAITRDY_INSTR:
2491 pr_debug("%sWAITRDY [max %d ms]\n", prefix,
2492 instr->ctx.waitrdy.timeout_ms);
2493 break;
2496 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2497 prefix = " ";
2500 #else
2501 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2503 /* NOP */
2505 #endif
2508 * nand_op_parser_exec_op - exec_op parser
2509 * @chip: the NAND chip
2510 * @parser: patterns description provided by the controller driver
2511 * @op: the NAND operation to address
2512 * @check_only: when true, the function only checks if @op can be handled but
2513 * does not execute the operation
2515 * Helper function designed to ease integration of NAND controller drivers that
2516 * only support a limited set of instruction sequences. The supported sequences
2517 * are described in @parser, and the framework takes care of splitting @op into
2518 * multiple sub-operations (if required) and pass them back to the ->exec()
2519 * callback of the matching pattern if @check_only is set to false.
2521 * NAND controller drivers should call this function from their own ->exec_op()
2522 * implementation.
2524 * Returns 0 on success, a negative error code otherwise. A failure can be
2525 * caused by an unsupported operation (none of the supported patterns is able
2526 * to handle the requested operation), or an error returned by one of the
2527 * matching pattern->exec() hook.
2529 int nand_op_parser_exec_op(struct nand_chip *chip,
2530 const struct nand_op_parser *parser,
2531 const struct nand_operation *op, bool check_only)
2533 struct nand_op_parser_ctx ctx = {
2534 .subop.instrs = op->instrs,
2535 .instrs = op->instrs,
2536 .ninstrs = op->ninstrs,
2538 unsigned int i;
2540 while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2541 int ret;
2543 for (i = 0; i < parser->npatterns; i++) {
2544 const struct nand_op_parser_pattern *pattern;
2546 pattern = &parser->patterns[i];
2547 if (!nand_op_parser_match_pat(pattern, &ctx))
2548 continue;
2550 nand_op_parser_trace(&ctx);
2552 if (check_only)
2553 break;
2555 ret = pattern->exec(chip, &ctx.subop);
2556 if (ret)
2557 return ret;
2559 break;
2562 if (i == parser->npatterns) {
2563 pr_debug("->exec_op() parser: pattern not found!\n");
2564 return -ENOTSUPP;
2568 * Update the context structure by pointing to the start of the
2569 * next subop.
2571 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2572 if (ctx.subop.last_instr_end_off)
2573 ctx.subop.instrs -= 1;
2575 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2578 return 0;
2580 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2582 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2584 return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2585 instr->type == NAND_OP_DATA_OUT_INSTR);
2588 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2589 unsigned int instr_idx)
2591 return subop && instr_idx < subop->ninstrs;
2594 static int nand_subop_get_start_off(const struct nand_subop *subop,
2595 unsigned int instr_idx)
2597 if (instr_idx)
2598 return 0;
2600 return subop->first_instr_start_off;
2604 * nand_subop_get_addr_start_off - Get the start offset in an address array
2605 * @subop: The entire sub-operation
2606 * @instr_idx: Index of the instruction inside the sub-operation
2608 * During driver development, one could be tempted to directly use the
2609 * ->addr.addrs field of address instructions. This is wrong as address
2610 * instructions might be split.
2612 * Given an address instruction, returns the offset of the first cycle to issue.
2614 int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2615 unsigned int instr_idx)
2617 if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2618 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
2619 return -EINVAL;
2621 return nand_subop_get_start_off(subop, instr_idx);
2623 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2626 * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2627 * @subop: The entire sub-operation
2628 * @instr_idx: Index of the instruction inside the sub-operation
2630 * During driver development, one could be tempted to directly use the
2631 * ->addr->naddrs field of a data instruction. This is wrong as instructions
2632 * might be split.
2634 * Given an address instruction, returns the number of address cycle to issue.
2636 int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2637 unsigned int instr_idx)
2639 int start_off, end_off;
2641 if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2642 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
2643 return -EINVAL;
2645 start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2647 if (instr_idx == subop->ninstrs - 1 &&
2648 subop->last_instr_end_off)
2649 end_off = subop->last_instr_end_off;
2650 else
2651 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2653 return end_off - start_off;
2655 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2658 * nand_subop_get_data_start_off - Get the start offset in a data array
2659 * @subop: The entire sub-operation
2660 * @instr_idx: Index of the instruction inside the sub-operation
2662 * During driver development, one could be tempted to directly use the
2663 * ->data->buf.{in,out} field of data instructions. This is wrong as data
2664 * instructions might be split.
2666 * Given a data instruction, returns the offset to start from.
2668 int nand_subop_get_data_start_off(const struct nand_subop *subop,
2669 unsigned int instr_idx)
2671 if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2672 !nand_instr_is_data(&subop->instrs[instr_idx]))
2673 return -EINVAL;
2675 return nand_subop_get_start_off(subop, instr_idx);
2677 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2680 * nand_subop_get_data_len - Get the number of bytes to retrieve
2681 * @subop: The entire sub-operation
2682 * @instr_idx: Index of the instruction inside the sub-operation
2684 * During driver development, one could be tempted to directly use the
2685 * ->data->len field of a data instruction. This is wrong as data instructions
2686 * might be split.
2688 * Returns the length of the chunk of data to send/receive.
2690 int nand_subop_get_data_len(const struct nand_subop *subop,
2691 unsigned int instr_idx)
2693 int start_off = 0, end_off;
2695 if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2696 !nand_instr_is_data(&subop->instrs[instr_idx]))
2697 return -EINVAL;
2699 start_off = nand_subop_get_data_start_off(subop, instr_idx);
2701 if (instr_idx == subop->ninstrs - 1 &&
2702 subop->last_instr_end_off)
2703 end_off = subop->last_instr_end_off;
2704 else
2705 end_off = subop->instrs[instr_idx].ctx.data.len;
2707 return end_off - start_off;
2709 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2712 * nand_reset - Reset and initialize a NAND device
2713 * @chip: The NAND chip
2714 * @chipnr: Internal die id
2716 * Save the timings data structure, then apply SDR timings mode 0 (see
2717 * nand_reset_data_interface for details), do the reset operation, and
2718 * apply back the previous timings.
2720 * Returns 0 on success, a negative error code otherwise.
2722 int nand_reset(struct nand_chip *chip, int chipnr)
2724 struct mtd_info *mtd = nand_to_mtd(chip);
2725 struct nand_data_interface saved_data_intf = chip->data_interface;
2726 int ret;
2728 ret = nand_reset_data_interface(chip, chipnr);
2729 if (ret)
2730 return ret;
2733 * The CS line has to be released before we can apply the new NAND
2734 * interface settings, hence this weird ->select_chip() dance.
2736 chip->select_chip(mtd, chipnr);
2737 ret = nand_reset_op(chip);
2738 chip->select_chip(mtd, -1);
2739 if (ret)
2740 return ret;
2742 chip->select_chip(mtd, chipnr);
2743 chip->data_interface = saved_data_intf;
2744 ret = nand_setup_data_interface(chip, chipnr);
2745 chip->select_chip(mtd, -1);
2746 if (ret)
2747 return ret;
2749 return 0;
2751 EXPORT_SYMBOL_GPL(nand_reset);
2754 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2755 * @buf: buffer to test
2756 * @len: buffer length
2757 * @bitflips_threshold: maximum number of bitflips
2759 * Check if a buffer contains only 0xff, which means the underlying region
2760 * has been erased and is ready to be programmed.
2761 * The bitflips_threshold specify the maximum number of bitflips before
2762 * considering the region is not erased.
2763 * Note: The logic of this function has been extracted from the memweight
2764 * implementation, except that nand_check_erased_buf function exit before
2765 * testing the whole buffer if the number of bitflips exceed the
2766 * bitflips_threshold value.
2768 * Returns a positive number of bitflips less than or equal to
2769 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2770 * threshold.
2772 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2774 const unsigned char *bitmap = buf;
2775 int bitflips = 0;
2776 int weight;
2778 for (; len && ((uintptr_t)bitmap) % sizeof(long);
2779 len--, bitmap++) {
2780 weight = hweight8(*bitmap);
2781 bitflips += BITS_PER_BYTE - weight;
2782 if (unlikely(bitflips > bitflips_threshold))
2783 return -EBADMSG;
2786 for (; len >= sizeof(long);
2787 len -= sizeof(long), bitmap += sizeof(long)) {
2788 unsigned long d = *((unsigned long *)bitmap);
2789 if (d == ~0UL)
2790 continue;
2791 weight = hweight_long(d);
2792 bitflips += BITS_PER_LONG - weight;
2793 if (unlikely(bitflips > bitflips_threshold))
2794 return -EBADMSG;
2797 for (; len > 0; len--, bitmap++) {
2798 weight = hweight8(*bitmap);
2799 bitflips += BITS_PER_BYTE - weight;
2800 if (unlikely(bitflips > bitflips_threshold))
2801 return -EBADMSG;
2804 return bitflips;
2808 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2809 * 0xff data
2810 * @data: data buffer to test
2811 * @datalen: data length
2812 * @ecc: ECC buffer
2813 * @ecclen: ECC length
2814 * @extraoob: extra OOB buffer
2815 * @extraooblen: extra OOB length
2816 * @bitflips_threshold: maximum number of bitflips
2818 * Check if a data buffer and its associated ECC and OOB data contains only
2819 * 0xff pattern, which means the underlying region has been erased and is
2820 * ready to be programmed.
2821 * The bitflips_threshold specify the maximum number of bitflips before
2822 * considering the region as not erased.
2824 * Note:
2825 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2826 * different from the NAND page size. When fixing bitflips, ECC engines will
2827 * report the number of errors per chunk, and the NAND core infrastructure
2828 * expect you to return the maximum number of bitflips for the whole page.
2829 * This is why you should always use this function on a single chunk and
2830 * not on the whole page. After checking each chunk you should update your
2831 * max_bitflips value accordingly.
2832 * 2/ When checking for bitflips in erased pages you should not only check
2833 * the payload data but also their associated ECC data, because a user might
2834 * have programmed almost all bits to 1 but a few. In this case, we
2835 * shouldn't consider the chunk as erased, and checking ECC bytes prevent
2836 * this case.
2837 * 3/ The extraoob argument is optional, and should be used if some of your OOB
2838 * data are protected by the ECC engine.
2839 * It could also be used if you support subpages and want to attach some
2840 * extra OOB data to an ECC chunk.
2842 * Returns a positive number of bitflips less than or equal to
2843 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2844 * threshold. In case of success, the passed buffers are filled with 0xff.
2846 int nand_check_erased_ecc_chunk(void *data, int datalen,
2847 void *ecc, int ecclen,
2848 void *extraoob, int extraooblen,
2849 int bitflips_threshold)
2851 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2853 data_bitflips = nand_check_erased_buf(data, datalen,
2854 bitflips_threshold);
2855 if (data_bitflips < 0)
2856 return data_bitflips;
2858 bitflips_threshold -= data_bitflips;
2860 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2861 if (ecc_bitflips < 0)
2862 return ecc_bitflips;
2864 bitflips_threshold -= ecc_bitflips;
2866 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2867 bitflips_threshold);
2868 if (extraoob_bitflips < 0)
2869 return extraoob_bitflips;
2871 if (data_bitflips)
2872 memset(data, 0xff, datalen);
2874 if (ecc_bitflips)
2875 memset(ecc, 0xff, ecclen);
2877 if (extraoob_bitflips)
2878 memset(extraoob, 0xff, extraooblen);
2880 return data_bitflips + ecc_bitflips + extraoob_bitflips;
2882 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2885 * nand_read_page_raw - [INTERN] read raw page data without ecc
2886 * @mtd: mtd info structure
2887 * @chip: nand chip info structure
2888 * @buf: buffer to store read data
2889 * @oob_required: caller requires OOB data read to chip->oob_poi
2890 * @page: page number to read
2892 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2894 int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2895 uint8_t *buf, int oob_required, int page)
2897 int ret;
2899 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2900 if (ret)
2901 return ret;
2903 if (oob_required) {
2904 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2905 false);
2906 if (ret)
2907 return ret;
2910 return 0;
2912 EXPORT_SYMBOL(nand_read_page_raw);
2915 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2916 * @mtd: mtd info structure
2917 * @chip: nand chip info structure
2918 * @buf: buffer to store read data
2919 * @oob_required: caller requires OOB data read to chip->oob_poi
2920 * @page: page number to read
2922 * We need a special oob layout and handling even when OOB isn't used.
2924 static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
2925 struct nand_chip *chip, uint8_t *buf,
2926 int oob_required, int page)
2928 int eccsize = chip->ecc.size;
2929 int eccbytes = chip->ecc.bytes;
2930 uint8_t *oob = chip->oob_poi;
2931 int steps, size, ret;
2933 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2934 if (ret)
2935 return ret;
2937 for (steps = chip->ecc.steps; steps > 0; steps--) {
2938 ret = nand_read_data_op(chip, buf, eccsize, false);
2939 if (ret)
2940 return ret;
2942 buf += eccsize;
2944 if (chip->ecc.prepad) {
2945 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2946 false);
2947 if (ret)
2948 return ret;
2950 oob += chip->ecc.prepad;
2953 ret = nand_read_data_op(chip, oob, eccbytes, false);
2954 if (ret)
2955 return ret;
2957 oob += eccbytes;
2959 if (chip->ecc.postpad) {
2960 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2961 false);
2962 if (ret)
2963 return ret;
2965 oob += chip->ecc.postpad;
2969 size = mtd->oobsize - (oob - chip->oob_poi);
2970 if (size) {
2971 ret = nand_read_data_op(chip, oob, size, false);
2972 if (ret)
2973 return ret;
2976 return 0;
2980 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2981 * @mtd: mtd info structure
2982 * @chip: nand chip info structure
2983 * @buf: buffer to store read data
2984 * @oob_required: caller requires OOB data read to chip->oob_poi
2985 * @page: page number to read
2987 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2988 uint8_t *buf, int oob_required, int page)
2990 int i, eccsize = chip->ecc.size, ret;
2991 int eccbytes = chip->ecc.bytes;
2992 int eccsteps = chip->ecc.steps;
2993 uint8_t *p = buf;
2994 uint8_t *ecc_calc = chip->ecc.calc_buf;
2995 uint8_t *ecc_code = chip->ecc.code_buf;
2996 unsigned int max_bitflips = 0;
2998 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
3000 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3001 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
3003 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3004 chip->ecc.total);
3005 if (ret)
3006 return ret;
3008 eccsteps = chip->ecc.steps;
3009 p = buf;
3011 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3012 int stat;
3014 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
3015 if (stat < 0) {
3016 mtd->ecc_stats.failed++;
3017 } else {
3018 mtd->ecc_stats.corrected += stat;
3019 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3022 return max_bitflips;
3026 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
3027 * @mtd: mtd info structure
3028 * @chip: nand chip info structure
3029 * @data_offs: offset of requested data within the page
3030 * @readlen: data length
3031 * @bufpoi: buffer to store read data
3032 * @page: page number to read
3034 static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
3035 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
3036 int page)
3038 int start_step, end_step, num_steps, ret;
3039 uint8_t *p;
3040 int data_col_addr, i, gaps = 0;
3041 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
3042 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
3043 int index, section = 0;
3044 unsigned int max_bitflips = 0;
3045 struct mtd_oob_region oobregion = { };
3047 /* Column address within the page aligned to ECC size (256bytes) */
3048 start_step = data_offs / chip->ecc.size;
3049 end_step = (data_offs + readlen - 1) / chip->ecc.size;
3050 num_steps = end_step - start_step + 1;
3051 index = start_step * chip->ecc.bytes;
3053 /* Data size aligned to ECC ecc.size */
3054 datafrag_len = num_steps * chip->ecc.size;
3055 eccfrag_len = num_steps * chip->ecc.bytes;
3057 data_col_addr = start_step * chip->ecc.size;
3058 /* If we read not a page aligned data */
3059 p = bufpoi + data_col_addr;
3060 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
3061 if (ret)
3062 return ret;
3064 /* Calculate ECC */
3065 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
3066 chip->ecc.calculate(mtd, p, &chip->ecc.calc_buf[i]);
3069 * The performance is faster if we position offsets according to
3070 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
3072 ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
3073 if (ret)
3074 return ret;
3076 if (oobregion.length < eccfrag_len)
3077 gaps = 1;
3079 if (gaps) {
3080 ret = nand_change_read_column_op(chip, mtd->writesize,
3081 chip->oob_poi, mtd->oobsize,
3082 false);
3083 if (ret)
3084 return ret;
3085 } else {
3087 * Send the command to read the particular ECC bytes take care
3088 * about buswidth alignment in read_buf.
3090 aligned_pos = oobregion.offset & ~(busw - 1);
3091 aligned_len = eccfrag_len;
3092 if (oobregion.offset & (busw - 1))
3093 aligned_len++;
3094 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
3095 (busw - 1))
3096 aligned_len++;
3098 ret = nand_change_read_column_op(chip,
3099 mtd->writesize + aligned_pos,
3100 &chip->oob_poi[aligned_pos],
3101 aligned_len, false);
3102 if (ret)
3103 return ret;
3106 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
3107 chip->oob_poi, index, eccfrag_len);
3108 if (ret)
3109 return ret;
3111 p = bufpoi + data_col_addr;
3112 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
3113 int stat;
3115 stat = chip->ecc.correct(mtd, p, &chip->ecc.code_buf[i],
3116 &chip->ecc.calc_buf[i]);
3117 if (stat == -EBADMSG &&
3118 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3119 /* check for empty pages with bitflips */
3120 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3121 &chip->ecc.code_buf[i],
3122 chip->ecc.bytes,
3123 NULL, 0,
3124 chip->ecc.strength);
3127 if (stat < 0) {
3128 mtd->ecc_stats.failed++;
3129 } else {
3130 mtd->ecc_stats.corrected += stat;
3131 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3134 return max_bitflips;
3138 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
3139 * @mtd: mtd info structure
3140 * @chip: nand chip info structure
3141 * @buf: buffer to store read data
3142 * @oob_required: caller requires OOB data read to chip->oob_poi
3143 * @page: page number to read
3145 * Not for syndrome calculating ECC controllers which need a special oob layout.
3147 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
3148 uint8_t *buf, int oob_required, int page)
3150 int i, eccsize = chip->ecc.size, ret;
3151 int eccbytes = chip->ecc.bytes;
3152 int eccsteps = chip->ecc.steps;
3153 uint8_t *p = buf;
3154 uint8_t *ecc_calc = chip->ecc.calc_buf;
3155 uint8_t *ecc_code = chip->ecc.code_buf;
3156 unsigned int max_bitflips = 0;
3158 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3159 if (ret)
3160 return ret;
3162 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3163 chip->ecc.hwctl(mtd, NAND_ECC_READ);
3165 ret = nand_read_data_op(chip, p, eccsize, false);
3166 if (ret)
3167 return ret;
3169 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
3172 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3173 if (ret)
3174 return ret;
3176 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3177 chip->ecc.total);
3178 if (ret)
3179 return ret;
3181 eccsteps = chip->ecc.steps;
3182 p = buf;
3184 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3185 int stat;
3187 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
3188 if (stat == -EBADMSG &&
3189 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3190 /* check for empty pages with bitflips */
3191 stat = nand_check_erased_ecc_chunk(p, eccsize,
3192 &ecc_code[i], eccbytes,
3193 NULL, 0,
3194 chip->ecc.strength);
3197 if (stat < 0) {
3198 mtd->ecc_stats.failed++;
3199 } else {
3200 mtd->ecc_stats.corrected += stat;
3201 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3204 return max_bitflips;
3208 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
3209 * @mtd: mtd info structure
3210 * @chip: nand chip info structure
3211 * @buf: buffer to store read data
3212 * @oob_required: caller requires OOB data read to chip->oob_poi
3213 * @page: page number to read
3215 * Hardware ECC for large page chips, require OOB to be read first. For this
3216 * ECC mode, the write_page method is re-used from ECC_HW. These methods
3217 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
3218 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
3219 * the data area, by overwriting the NAND manufacturer bad block markings.
3221 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
3222 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
3224 int i, eccsize = chip->ecc.size, ret;
3225 int eccbytes = chip->ecc.bytes;
3226 int eccsteps = chip->ecc.steps;
3227 uint8_t *p = buf;
3228 uint8_t *ecc_code = chip->ecc.code_buf;
3229 uint8_t *ecc_calc = chip->ecc.calc_buf;
3230 unsigned int max_bitflips = 0;
3232 /* Read the OOB area first */
3233 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3234 if (ret)
3235 return ret;
3237 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3238 if (ret)
3239 return ret;
3241 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3242 chip->ecc.total);
3243 if (ret)
3244 return ret;
3246 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3247 int stat;
3249 chip->ecc.hwctl(mtd, NAND_ECC_READ);
3251 ret = nand_read_data_op(chip, p, eccsize, false);
3252 if (ret)
3253 return ret;
3255 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
3257 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
3258 if (stat == -EBADMSG &&
3259 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3260 /* check for empty pages with bitflips */
3261 stat = nand_check_erased_ecc_chunk(p, eccsize,
3262 &ecc_code[i], eccbytes,
3263 NULL, 0,
3264 chip->ecc.strength);
3267 if (stat < 0) {
3268 mtd->ecc_stats.failed++;
3269 } else {
3270 mtd->ecc_stats.corrected += stat;
3271 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3274 return max_bitflips;
3278 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
3279 * @mtd: mtd info structure
3280 * @chip: nand chip info structure
3281 * @buf: buffer to store read data
3282 * @oob_required: caller requires OOB data read to chip->oob_poi
3283 * @page: page number to read
3285 * The hw generator calculates the error syndrome automatically. Therefore we
3286 * need a special oob layout and handling.
3288 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
3289 uint8_t *buf, int oob_required, int page)
3291 int ret, i, eccsize = chip->ecc.size;
3292 int eccbytes = chip->ecc.bytes;
3293 int eccsteps = chip->ecc.steps;
3294 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
3295 uint8_t *p = buf;
3296 uint8_t *oob = chip->oob_poi;
3297 unsigned int max_bitflips = 0;
3299 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3300 if (ret)
3301 return ret;
3303 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3304 int stat;
3306 chip->ecc.hwctl(mtd, NAND_ECC_READ);
3308 ret = nand_read_data_op(chip, p, eccsize, false);
3309 if (ret)
3310 return ret;
3312 if (chip->ecc.prepad) {
3313 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3314 false);
3315 if (ret)
3316 return ret;
3318 oob += chip->ecc.prepad;
3321 chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
3323 ret = nand_read_data_op(chip, oob, eccbytes, false);
3324 if (ret)
3325 return ret;
3327 stat = chip->ecc.correct(mtd, p, oob, NULL);
3329 oob += eccbytes;
3331 if (chip->ecc.postpad) {
3332 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3333 false);
3334 if (ret)
3335 return ret;
3337 oob += chip->ecc.postpad;
3340 if (stat == -EBADMSG &&
3341 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3342 /* check for empty pages with bitflips */
3343 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3344 oob - eccpadbytes,
3345 eccpadbytes,
3346 NULL, 0,
3347 chip->ecc.strength);
3350 if (stat < 0) {
3351 mtd->ecc_stats.failed++;
3352 } else {
3353 mtd->ecc_stats.corrected += stat;
3354 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3358 /* Calculate remaining oob bytes */
3359 i = mtd->oobsize - (oob - chip->oob_poi);
3360 if (i) {
3361 ret = nand_read_data_op(chip, oob, i, false);
3362 if (ret)
3363 return ret;
3366 return max_bitflips;
3370 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3371 * @mtd: mtd info structure
3372 * @oob: oob destination address
3373 * @ops: oob ops structure
3374 * @len: size of oob to transfer
3376 static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
3377 struct mtd_oob_ops *ops, size_t len)
3379 struct nand_chip *chip = mtd_to_nand(mtd);
3380 int ret;
3382 switch (ops->mode) {
3384 case MTD_OPS_PLACE_OOB:
3385 case MTD_OPS_RAW:
3386 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3387 return oob + len;
3389 case MTD_OPS_AUTO_OOB:
3390 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3391 ops->ooboffs, len);
3392 BUG_ON(ret);
3393 return oob + len;
3395 default:
3396 BUG();
3398 return NULL;
3402 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3403 * @mtd: MTD device structure
3404 * @retry_mode: the retry mode to use
3406 * Some vendors supply a special command to shift the Vt threshold, to be used
3407 * when there are too many bitflips in a page (i.e., ECC error). After setting
3408 * a new threshold, the host should retry reading the page.
3410 static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
3412 struct nand_chip *chip = mtd_to_nand(mtd);
3414 pr_debug("setting READ RETRY mode %d\n", retry_mode);
3416 if (retry_mode >= chip->read_retries)
3417 return -EINVAL;
3419 if (!chip->setup_read_retry)
3420 return -EOPNOTSUPP;
3422 return chip->setup_read_retry(mtd, retry_mode);
3426 * nand_do_read_ops - [INTERN] Read data with ECC
3427 * @mtd: MTD device structure
3428 * @from: offset to read from
3429 * @ops: oob ops structure
3431 * Internal function. Called with chip held.
3433 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
3434 struct mtd_oob_ops *ops)
3436 int chipnr, page, realpage, col, bytes, aligned, oob_required;
3437 struct nand_chip *chip = mtd_to_nand(mtd);
3438 int ret = 0;
3439 uint32_t readlen = ops->len;
3440 uint32_t oobreadlen = ops->ooblen;
3441 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3443 uint8_t *bufpoi, *oob, *buf;
3444 int use_bufpoi;
3445 unsigned int max_bitflips = 0;
3446 int retry_mode = 0;
3447 bool ecc_fail = false;
3449 chipnr = (int)(from >> chip->chip_shift);
3450 chip->select_chip(mtd, chipnr);
3452 realpage = (int)(from >> chip->page_shift);
3453 page = realpage & chip->pagemask;
3455 col = (int)(from & (mtd->writesize - 1));
3457 buf = ops->datbuf;
3458 oob = ops->oobbuf;
3459 oob_required = oob ? 1 : 0;
3461 while (1) {
3462 unsigned int ecc_failures = mtd->ecc_stats.failed;
3464 bytes = min(mtd->writesize - col, readlen);
3465 aligned = (bytes == mtd->writesize);
3467 if (!aligned)
3468 use_bufpoi = 1;
3469 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3470 use_bufpoi = !virt_addr_valid(buf) ||
3471 !IS_ALIGNED((unsigned long)buf,
3472 chip->buf_align);
3473 else
3474 use_bufpoi = 0;
3476 /* Is the current page in the buffer? */
3477 if (realpage != chip->pagebuf || oob) {
3478 bufpoi = use_bufpoi ? chip->data_buf : buf;
3480 if (use_bufpoi && aligned)
3481 pr_debug("%s: using read bounce buffer for buf@%p\n",
3482 __func__, buf);
3484 read_retry:
3486 * Now read the page into the buffer. Absent an error,
3487 * the read methods return max bitflips per ecc step.
3489 if (unlikely(ops->mode == MTD_OPS_RAW))
3490 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
3491 oob_required,
3492 page);
3493 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3494 !oob)
3495 ret = chip->ecc.read_subpage(mtd, chip,
3496 col, bytes, bufpoi,
3497 page);
3498 else
3499 ret = chip->ecc.read_page(mtd, chip, bufpoi,
3500 oob_required, page);
3501 if (ret < 0) {
3502 if (use_bufpoi)
3503 /* Invalidate page cache */
3504 chip->pagebuf = -1;
3505 break;
3508 /* Transfer not aligned data */
3509 if (use_bufpoi) {
3510 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3511 !(mtd->ecc_stats.failed - ecc_failures) &&
3512 (ops->mode != MTD_OPS_RAW)) {
3513 chip->pagebuf = realpage;
3514 chip->pagebuf_bitflips = ret;
3515 } else {
3516 /* Invalidate page cache */
3517 chip->pagebuf = -1;
3519 memcpy(buf, chip->data_buf + col, bytes);
3522 if (unlikely(oob)) {
3523 int toread = min(oobreadlen, max_oobsize);
3525 if (toread) {
3526 oob = nand_transfer_oob(mtd,
3527 oob, ops, toread);
3528 oobreadlen -= toread;
3532 if (chip->options & NAND_NEED_READRDY) {
3533 /* Apply delay or wait for ready/busy pin */
3534 if (!chip->dev_ready)
3535 udelay(chip->chip_delay);
3536 else
3537 nand_wait_ready(mtd);
3540 if (mtd->ecc_stats.failed - ecc_failures) {
3541 if (retry_mode + 1 < chip->read_retries) {
3542 retry_mode++;
3543 ret = nand_setup_read_retry(mtd,
3544 retry_mode);
3545 if (ret < 0)
3546 break;
3548 /* Reset failures; retry */
3549 mtd->ecc_stats.failed = ecc_failures;
3550 goto read_retry;
3551 } else {
3552 /* No more retry modes; real failure */
3553 ecc_fail = true;
3557 buf += bytes;
3558 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3559 } else {
3560 memcpy(buf, chip->data_buf + col, bytes);
3561 buf += bytes;
3562 max_bitflips = max_t(unsigned int, max_bitflips,
3563 chip->pagebuf_bitflips);
3566 readlen -= bytes;
3568 /* Reset to retry mode 0 */
3569 if (retry_mode) {
3570 ret = nand_setup_read_retry(mtd, 0);
3571 if (ret < 0)
3572 break;
3573 retry_mode = 0;
3576 if (!readlen)
3577 break;
3579 /* For subsequent reads align to page boundary */
3580 col = 0;
3581 /* Increment page address */
3582 realpage++;
3584 page = realpage & chip->pagemask;
3585 /* Check, if we cross a chip boundary */
3586 if (!page) {
3587 chipnr++;
3588 chip->select_chip(mtd, -1);
3589 chip->select_chip(mtd, chipnr);
3592 chip->select_chip(mtd, -1);
3594 ops->retlen = ops->len - (size_t) readlen;
3595 if (oob)
3596 ops->oobretlen = ops->ooblen - oobreadlen;
3598 if (ret < 0)
3599 return ret;
3601 if (ecc_fail)
3602 return -EBADMSG;
3604 return max_bitflips;
3608 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3609 * @mtd: mtd info structure
3610 * @chip: nand chip info structure
3611 * @page: page number to read
3613 int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
3615 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3617 EXPORT_SYMBOL(nand_read_oob_std);
3620 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3621 * with syndromes
3622 * @mtd: mtd info structure
3623 * @chip: nand chip info structure
3624 * @page: page number to read
3626 int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
3627 int page)
3629 int length = mtd->oobsize;
3630 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3631 int eccsize = chip->ecc.size;
3632 uint8_t *bufpoi = chip->oob_poi;
3633 int i, toread, sndrnd = 0, pos, ret;
3635 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3636 if (ret)
3637 return ret;
3639 for (i = 0; i < chip->ecc.steps; i++) {
3640 if (sndrnd) {
3641 int ret;
3643 pos = eccsize + i * (eccsize + chunk);
3644 if (mtd->writesize > 512)
3645 ret = nand_change_read_column_op(chip, pos,
3646 NULL, 0,
3647 false);
3648 else
3649 ret = nand_read_page_op(chip, page, pos, NULL,
3652 if (ret)
3653 return ret;
3654 } else
3655 sndrnd = 1;
3656 toread = min_t(int, length, chunk);
3658 ret = nand_read_data_op(chip, bufpoi, toread, false);
3659 if (ret)
3660 return ret;
3662 bufpoi += toread;
3663 length -= toread;
3665 if (length > 0) {
3666 ret = nand_read_data_op(chip, bufpoi, length, false);
3667 if (ret)
3668 return ret;
3671 return 0;
3673 EXPORT_SYMBOL(nand_read_oob_syndrome);
3676 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3677 * @mtd: mtd info structure
3678 * @chip: nand chip info structure
3679 * @page: page number to write
3681 int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
3683 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3684 mtd->oobsize);
3686 EXPORT_SYMBOL(nand_write_oob_std);
3689 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3690 * with syndrome - only for large page flash
3691 * @mtd: mtd info structure
3692 * @chip: nand chip info structure
3693 * @page: page number to write
3695 int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
3696 int page)
3698 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3699 int eccsize = chip->ecc.size, length = mtd->oobsize;
3700 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3701 const uint8_t *bufpoi = chip->oob_poi;
3704 * data-ecc-data-ecc ... ecc-oob
3705 * or
3706 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3708 if (!chip->ecc.prepad && !chip->ecc.postpad) {
3709 pos = steps * (eccsize + chunk);
3710 steps = 0;
3711 } else
3712 pos = eccsize;
3714 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3715 if (ret)
3716 return ret;
3718 for (i = 0; i < steps; i++) {
3719 if (sndcmd) {
3720 if (mtd->writesize <= 512) {
3721 uint32_t fill = 0xFFFFFFFF;
3723 len = eccsize;
3724 while (len > 0) {
3725 int num = min_t(int, len, 4);
3727 ret = nand_write_data_op(chip, &fill,
3728 num, false);
3729 if (ret)
3730 return ret;
3732 len -= num;
3734 } else {
3735 pos = eccsize + i * (eccsize + chunk);
3736 ret = nand_change_write_column_op(chip, pos,
3737 NULL, 0,
3738 false);
3739 if (ret)
3740 return ret;
3742 } else
3743 sndcmd = 1;
3744 len = min_t(int, length, chunk);
3746 ret = nand_write_data_op(chip, bufpoi, len, false);
3747 if (ret)
3748 return ret;
3750 bufpoi += len;
3751 length -= len;
3753 if (length > 0) {
3754 ret = nand_write_data_op(chip, bufpoi, length, false);
3755 if (ret)
3756 return ret;
3759 return nand_prog_page_end_op(chip);
3761 EXPORT_SYMBOL(nand_write_oob_syndrome);
3764 * nand_do_read_oob - [INTERN] NAND read out-of-band
3765 * @mtd: MTD device structure
3766 * @from: offset to read from
3767 * @ops: oob operations description structure
3769 * NAND read out-of-band data from the spare area.
3771 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
3772 struct mtd_oob_ops *ops)
3774 unsigned int max_bitflips = 0;
3775 int page, realpage, chipnr;
3776 struct nand_chip *chip = mtd_to_nand(mtd);
3777 struct mtd_ecc_stats stats;
3778 int readlen = ops->ooblen;
3779 int len;
3780 uint8_t *buf = ops->oobbuf;
3781 int ret = 0;
3783 pr_debug("%s: from = 0x%08Lx, len = %i\n",
3784 __func__, (unsigned long long)from, readlen);
3786 stats = mtd->ecc_stats;
3788 len = mtd_oobavail(mtd, ops);
3790 chipnr = (int)(from >> chip->chip_shift);
3791 chip->select_chip(mtd, chipnr);
3793 /* Shift to get page */
3794 realpage = (int)(from >> chip->page_shift);
3795 page = realpage & chip->pagemask;
3797 while (1) {
3798 if (ops->mode == MTD_OPS_RAW)
3799 ret = chip->ecc.read_oob_raw(mtd, chip, page);
3800 else
3801 ret = chip->ecc.read_oob(mtd, chip, page);
3803 if (ret < 0)
3804 break;
3806 len = min(len, readlen);
3807 buf = nand_transfer_oob(mtd, buf, ops, len);
3809 if (chip->options & NAND_NEED_READRDY) {
3810 /* Apply delay or wait for ready/busy pin */
3811 if (!chip->dev_ready)
3812 udelay(chip->chip_delay);
3813 else
3814 nand_wait_ready(mtd);
3817 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3819 readlen -= len;
3820 if (!readlen)
3821 break;
3823 /* Increment page address */
3824 realpage++;
3826 page = realpage & chip->pagemask;
3827 /* Check, if we cross a chip boundary */
3828 if (!page) {
3829 chipnr++;
3830 chip->select_chip(mtd, -1);
3831 chip->select_chip(mtd, chipnr);
3834 chip->select_chip(mtd, -1);
3836 ops->oobretlen = ops->ooblen - readlen;
3838 if (ret < 0)
3839 return ret;
3841 if (mtd->ecc_stats.failed - stats.failed)
3842 return -EBADMSG;
3844 return max_bitflips;
3848 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3849 * @mtd: MTD device structure
3850 * @from: offset to read from
3851 * @ops: oob operation description structure
3853 * NAND read data and/or out-of-band data.
3855 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3856 struct mtd_oob_ops *ops)
3858 int ret;
3860 ops->retlen = 0;
3862 if (ops->mode != MTD_OPS_PLACE_OOB &&
3863 ops->mode != MTD_OPS_AUTO_OOB &&
3864 ops->mode != MTD_OPS_RAW)
3865 return -ENOTSUPP;
3867 nand_get_device(mtd, FL_READING);
3869 if (!ops->datbuf)
3870 ret = nand_do_read_oob(mtd, from, ops);
3871 else
3872 ret = nand_do_read_ops(mtd, from, ops);
3874 nand_release_device(mtd);
3875 return ret;
3880 * nand_write_page_raw - [INTERN] raw page write function
3881 * @mtd: mtd info structure
3882 * @chip: nand chip info structure
3883 * @buf: data buffer
3884 * @oob_required: must write chip->oob_poi to OOB
3885 * @page: page number to write
3887 * Not for syndrome calculating ECC controllers, which use a special oob layout.
3889 int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
3890 const uint8_t *buf, int oob_required, int page)
3892 int ret;
3894 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3895 if (ret)
3896 return ret;
3898 if (oob_required) {
3899 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3900 false);
3901 if (ret)
3902 return ret;
3905 return nand_prog_page_end_op(chip);
3907 EXPORT_SYMBOL(nand_write_page_raw);
3910 * nand_write_page_raw_syndrome - [INTERN] raw page write function
3911 * @mtd: mtd info structure
3912 * @chip: nand chip info structure
3913 * @buf: data buffer
3914 * @oob_required: must write chip->oob_poi to OOB
3915 * @page: page number to write
3917 * We need a special oob layout and handling even when ECC isn't checked.
3919 static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
3920 struct nand_chip *chip,
3921 const uint8_t *buf, int oob_required,
3922 int page)
3924 int eccsize = chip->ecc.size;
3925 int eccbytes = chip->ecc.bytes;
3926 uint8_t *oob = chip->oob_poi;
3927 int steps, size, ret;
3929 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3930 if (ret)
3931 return ret;
3933 for (steps = chip->ecc.steps; steps > 0; steps--) {
3934 ret = nand_write_data_op(chip, buf, eccsize, false);
3935 if (ret)
3936 return ret;
3938 buf += eccsize;
3940 if (chip->ecc.prepad) {
3941 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3942 false);
3943 if (ret)
3944 return ret;
3946 oob += chip->ecc.prepad;
3949 ret = nand_write_data_op(chip, oob, eccbytes, false);
3950 if (ret)
3951 return ret;
3953 oob += eccbytes;
3955 if (chip->ecc.postpad) {
3956 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3957 false);
3958 if (ret)
3959 return ret;
3961 oob += chip->ecc.postpad;
3965 size = mtd->oobsize - (oob - chip->oob_poi);
3966 if (size) {
3967 ret = nand_write_data_op(chip, oob, size, false);
3968 if (ret)
3969 return ret;
3972 return nand_prog_page_end_op(chip);
3975 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3976 * @mtd: mtd info structure
3977 * @chip: nand chip info structure
3978 * @buf: data buffer
3979 * @oob_required: must write chip->oob_poi to OOB
3980 * @page: page number to write
3982 static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
3983 const uint8_t *buf, int oob_required,
3984 int page)
3986 int i, eccsize = chip->ecc.size, ret;
3987 int eccbytes = chip->ecc.bytes;
3988 int eccsteps = chip->ecc.steps;
3989 uint8_t *ecc_calc = chip->ecc.calc_buf;
3990 const uint8_t *p = buf;
3992 /* Software ECC calculation */
3993 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3994 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
3996 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3997 chip->ecc.total);
3998 if (ret)
3999 return ret;
4001 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
4005 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
4006 * @mtd: mtd info structure
4007 * @chip: nand chip info structure
4008 * @buf: data buffer
4009 * @oob_required: must write chip->oob_poi to OOB
4010 * @page: page number to write
4012 static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
4013 const uint8_t *buf, int oob_required,
4014 int page)
4016 int i, eccsize = chip->ecc.size, ret;
4017 int eccbytes = chip->ecc.bytes;
4018 int eccsteps = chip->ecc.steps;
4019 uint8_t *ecc_calc = chip->ecc.calc_buf;
4020 const uint8_t *p = buf;
4022 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4023 if (ret)
4024 return ret;
4026 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4027 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
4029 ret = nand_write_data_op(chip, p, eccsize, false);
4030 if (ret)
4031 return ret;
4033 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
4036 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4037 chip->ecc.total);
4038 if (ret)
4039 return ret;
4041 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4042 if (ret)
4043 return ret;
4045 return nand_prog_page_end_op(chip);
4050 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
4051 * @mtd: mtd info structure
4052 * @chip: nand chip info structure
4053 * @offset: column address of subpage within the page
4054 * @data_len: data length
4055 * @buf: data buffer
4056 * @oob_required: must write chip->oob_poi to OOB
4057 * @page: page number to write
4059 static int nand_write_subpage_hwecc(struct mtd_info *mtd,
4060 struct nand_chip *chip, uint32_t offset,
4061 uint32_t data_len, const uint8_t *buf,
4062 int oob_required, int page)
4064 uint8_t *oob_buf = chip->oob_poi;
4065 uint8_t *ecc_calc = chip->ecc.calc_buf;
4066 int ecc_size = chip->ecc.size;
4067 int ecc_bytes = chip->ecc.bytes;
4068 int ecc_steps = chip->ecc.steps;
4069 uint32_t start_step = offset / ecc_size;
4070 uint32_t end_step = (offset + data_len - 1) / ecc_size;
4071 int oob_bytes = mtd->oobsize / ecc_steps;
4072 int step, ret;
4074 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4075 if (ret)
4076 return ret;
4078 for (step = 0; step < ecc_steps; step++) {
4079 /* configure controller for WRITE access */
4080 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
4082 /* write data (untouched subpages already masked by 0xFF) */
4083 ret = nand_write_data_op(chip, buf, ecc_size, false);
4084 if (ret)
4085 return ret;
4087 /* mask ECC of un-touched subpages by padding 0xFF */
4088 if ((step < start_step) || (step > end_step))
4089 memset(ecc_calc, 0xff, ecc_bytes);
4090 else
4091 chip->ecc.calculate(mtd, buf, ecc_calc);
4093 /* mask OOB of un-touched subpages by padding 0xFF */
4094 /* if oob_required, preserve OOB metadata of written subpage */
4095 if (!oob_required || (step < start_step) || (step > end_step))
4096 memset(oob_buf, 0xff, oob_bytes);
4098 buf += ecc_size;
4099 ecc_calc += ecc_bytes;
4100 oob_buf += oob_bytes;
4103 /* copy calculated ECC for whole page to chip->buffer->oob */
4104 /* this include masked-value(0xFF) for unwritten subpages */
4105 ecc_calc = chip->ecc.calc_buf;
4106 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4107 chip->ecc.total);
4108 if (ret)
4109 return ret;
4111 /* write OOB buffer to NAND device */
4112 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4113 if (ret)
4114 return ret;
4116 return nand_prog_page_end_op(chip);
4121 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
4122 * @mtd: mtd info structure
4123 * @chip: nand chip info structure
4124 * @buf: data buffer
4125 * @oob_required: must write chip->oob_poi to OOB
4126 * @page: page number to write
4128 * The hw generator calculates the error syndrome automatically. Therefore we
4129 * need a special oob layout and handling.
4131 static int nand_write_page_syndrome(struct mtd_info *mtd,
4132 struct nand_chip *chip,
4133 const uint8_t *buf, int oob_required,
4134 int page)
4136 int i, eccsize = chip->ecc.size;
4137 int eccbytes = chip->ecc.bytes;
4138 int eccsteps = chip->ecc.steps;
4139 const uint8_t *p = buf;
4140 uint8_t *oob = chip->oob_poi;
4141 int ret;
4143 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4144 if (ret)
4145 return ret;
4147 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4148 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
4150 ret = nand_write_data_op(chip, p, eccsize, false);
4151 if (ret)
4152 return ret;
4154 if (chip->ecc.prepad) {
4155 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
4156 false);
4157 if (ret)
4158 return ret;
4160 oob += chip->ecc.prepad;
4163 chip->ecc.calculate(mtd, p, oob);
4165 ret = nand_write_data_op(chip, oob, eccbytes, false);
4166 if (ret)
4167 return ret;
4169 oob += eccbytes;
4171 if (chip->ecc.postpad) {
4172 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
4173 false);
4174 if (ret)
4175 return ret;
4177 oob += chip->ecc.postpad;
4181 /* Calculate remaining oob bytes */
4182 i = mtd->oobsize - (oob - chip->oob_poi);
4183 if (i) {
4184 ret = nand_write_data_op(chip, oob, i, false);
4185 if (ret)
4186 return ret;
4189 return nand_prog_page_end_op(chip);
4193 * nand_write_page - write one page
4194 * @mtd: MTD device structure
4195 * @chip: NAND chip descriptor
4196 * @offset: address offset within the page
4197 * @data_len: length of actual data to be written
4198 * @buf: the data to write
4199 * @oob_required: must write chip->oob_poi to OOB
4200 * @page: page number to write
4201 * @raw: use _raw version of write_page
4203 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
4204 uint32_t offset, int data_len, const uint8_t *buf,
4205 int oob_required, int page, int raw)
4207 int status, subpage;
4209 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
4210 chip->ecc.write_subpage)
4211 subpage = offset || (data_len < mtd->writesize);
4212 else
4213 subpage = 0;
4215 if (unlikely(raw))
4216 status = chip->ecc.write_page_raw(mtd, chip, buf,
4217 oob_required, page);
4218 else if (subpage)
4219 status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
4220 buf, oob_required, page);
4221 else
4222 status = chip->ecc.write_page(mtd, chip, buf, oob_required,
4223 page);
4225 if (status < 0)
4226 return status;
4228 return 0;
4232 * nand_fill_oob - [INTERN] Transfer client buffer to oob
4233 * @mtd: MTD device structure
4234 * @oob: oob data buffer
4235 * @len: oob data write length
4236 * @ops: oob ops structure
4238 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
4239 struct mtd_oob_ops *ops)
4241 struct nand_chip *chip = mtd_to_nand(mtd);
4242 int ret;
4245 * Initialise to all 0xFF, to avoid the possibility of left over OOB
4246 * data from a previous OOB read.
4248 memset(chip->oob_poi, 0xff, mtd->oobsize);
4250 switch (ops->mode) {
4252 case MTD_OPS_PLACE_OOB:
4253 case MTD_OPS_RAW:
4254 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
4255 return oob + len;
4257 case MTD_OPS_AUTO_OOB:
4258 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
4259 ops->ooboffs, len);
4260 BUG_ON(ret);
4261 return oob + len;
4263 default:
4264 BUG();
4266 return NULL;
4269 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
4272 * nand_do_write_ops - [INTERN] NAND write with ECC
4273 * @mtd: MTD device structure
4274 * @to: offset to write to
4275 * @ops: oob operations description structure
4277 * NAND write with ECC.
4279 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
4280 struct mtd_oob_ops *ops)
4282 int chipnr, realpage, page, column;
4283 struct nand_chip *chip = mtd_to_nand(mtd);
4284 uint32_t writelen = ops->len;
4286 uint32_t oobwritelen = ops->ooblen;
4287 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
4289 uint8_t *oob = ops->oobbuf;
4290 uint8_t *buf = ops->datbuf;
4291 int ret;
4292 int oob_required = oob ? 1 : 0;
4294 ops->retlen = 0;
4295 if (!writelen)
4296 return 0;
4298 /* Reject writes, which are not page aligned */
4299 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
4300 pr_notice("%s: attempt to write non page aligned data\n",
4301 __func__);
4302 return -EINVAL;
4305 column = to & (mtd->writesize - 1);
4307 chipnr = (int)(to >> chip->chip_shift);
4308 chip->select_chip(mtd, chipnr);
4310 /* Check, if it is write protected */
4311 if (nand_check_wp(mtd)) {
4312 ret = -EIO;
4313 goto err_out;
4316 realpage = (int)(to >> chip->page_shift);
4317 page = realpage & chip->pagemask;
4319 /* Invalidate the page cache, when we write to the cached page */
4320 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
4321 ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
4322 chip->pagebuf = -1;
4324 /* Don't allow multipage oob writes with offset */
4325 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4326 ret = -EINVAL;
4327 goto err_out;
4330 while (1) {
4331 int bytes = mtd->writesize;
4332 uint8_t *wbuf = buf;
4333 int use_bufpoi;
4334 int part_pagewr = (column || writelen < mtd->writesize);
4336 if (part_pagewr)
4337 use_bufpoi = 1;
4338 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
4339 use_bufpoi = !virt_addr_valid(buf) ||
4340 !IS_ALIGNED((unsigned long)buf,
4341 chip->buf_align);
4342 else
4343 use_bufpoi = 0;
4345 /* Partial page write?, or need to use bounce buffer */
4346 if (use_bufpoi) {
4347 pr_debug("%s: using write bounce buffer for buf@%p\n",
4348 __func__, buf);
4349 if (part_pagewr)
4350 bytes = min_t(int, bytes - column, writelen);
4351 chip->pagebuf = -1;
4352 memset(chip->data_buf, 0xff, mtd->writesize);
4353 memcpy(&chip->data_buf[column], buf, bytes);
4354 wbuf = chip->data_buf;
4357 if (unlikely(oob)) {
4358 size_t len = min(oobwritelen, oobmaxlen);
4359 oob = nand_fill_oob(mtd, oob, len, ops);
4360 oobwritelen -= len;
4361 } else {
4362 /* We still need to erase leftover OOB data */
4363 memset(chip->oob_poi, 0xff, mtd->oobsize);
4366 ret = nand_write_page(mtd, chip, column, bytes, wbuf,
4367 oob_required, page,
4368 (ops->mode == MTD_OPS_RAW));
4369 if (ret)
4370 break;
4372 writelen -= bytes;
4373 if (!writelen)
4374 break;
4376 column = 0;
4377 buf += bytes;
4378 realpage++;
4380 page = realpage & chip->pagemask;
4381 /* Check, if we cross a chip boundary */
4382 if (!page) {
4383 chipnr++;
4384 chip->select_chip(mtd, -1);
4385 chip->select_chip(mtd, chipnr);
4389 ops->retlen = ops->len - writelen;
4390 if (unlikely(oob))
4391 ops->oobretlen = ops->ooblen;
4393 err_out:
4394 chip->select_chip(mtd, -1);
4395 return ret;
4399 * panic_nand_write - [MTD Interface] NAND write with ECC
4400 * @mtd: MTD device structure
4401 * @to: offset to write to
4402 * @len: number of bytes to write
4403 * @retlen: pointer to variable to store the number of written bytes
4404 * @buf: the data to write
4406 * NAND write with ECC. Used when performing writes in interrupt context, this
4407 * may for example be called by mtdoops when writing an oops while in panic.
4409 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4410 size_t *retlen, const uint8_t *buf)
4412 struct nand_chip *chip = mtd_to_nand(mtd);
4413 int chipnr = (int)(to >> chip->chip_shift);
4414 struct mtd_oob_ops ops;
4415 int ret;
4417 /* Grab the device */
4418 panic_nand_get_device(chip, mtd, FL_WRITING);
4420 chip->select_chip(mtd, chipnr);
4422 /* Wait for the device to get ready */
4423 panic_nand_wait(mtd, chip, 400);
4425 memset(&ops, 0, sizeof(ops));
4426 ops.len = len;
4427 ops.datbuf = (uint8_t *)buf;
4428 ops.mode = MTD_OPS_PLACE_OOB;
4430 ret = nand_do_write_ops(mtd, to, &ops);
4432 *retlen = ops.retlen;
4433 return ret;
4437 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
4438 * @mtd: MTD device structure
4439 * @to: offset to write to
4440 * @ops: oob operation description structure
4442 * NAND write out-of-band.
4444 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
4445 struct mtd_oob_ops *ops)
4447 int chipnr, page, status, len;
4448 struct nand_chip *chip = mtd_to_nand(mtd);
4450 pr_debug("%s: to = 0x%08x, len = %i\n",
4451 __func__, (unsigned int)to, (int)ops->ooblen);
4453 len = mtd_oobavail(mtd, ops);
4455 /* Do not allow write past end of page */
4456 if ((ops->ooboffs + ops->ooblen) > len) {
4457 pr_debug("%s: attempt to write past end of page\n",
4458 __func__);
4459 return -EINVAL;
4462 chipnr = (int)(to >> chip->chip_shift);
4465 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
4466 * of my DiskOnChip 2000 test units) will clear the whole data page too
4467 * if we don't do this. I have no clue why, but I seem to have 'fixed'
4468 * it in the doc2000 driver in August 1999. dwmw2.
4470 nand_reset(chip, chipnr);
4472 chip->select_chip(mtd, chipnr);
4474 /* Shift to get page */
4475 page = (int)(to >> chip->page_shift);
4477 /* Check, if it is write protected */
4478 if (nand_check_wp(mtd)) {
4479 chip->select_chip(mtd, -1);
4480 return -EROFS;
4483 /* Invalidate the page cache, if we write to the cached page */
4484 if (page == chip->pagebuf)
4485 chip->pagebuf = -1;
4487 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
4489 if (ops->mode == MTD_OPS_RAW)
4490 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
4491 else
4492 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
4494 chip->select_chip(mtd, -1);
4496 if (status)
4497 return status;
4499 ops->oobretlen = ops->ooblen;
4501 return 0;
4505 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4506 * @mtd: MTD device structure
4507 * @to: offset to write to
4508 * @ops: oob operation description structure
4510 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4511 struct mtd_oob_ops *ops)
4513 int ret = -ENOTSUPP;
4515 ops->retlen = 0;
4517 nand_get_device(mtd, FL_WRITING);
4519 switch (ops->mode) {
4520 case MTD_OPS_PLACE_OOB:
4521 case MTD_OPS_AUTO_OOB:
4522 case MTD_OPS_RAW:
4523 break;
4525 default:
4526 goto out;
4529 if (!ops->datbuf)
4530 ret = nand_do_write_oob(mtd, to, ops);
4531 else
4532 ret = nand_do_write_ops(mtd, to, ops);
4534 out:
4535 nand_release_device(mtd);
4536 return ret;
4540 * single_erase - [GENERIC] NAND standard block erase command function
4541 * @mtd: MTD device structure
4542 * @page: the page address of the block which will be erased
4544 * Standard erase command for NAND chips. Returns NAND status.
4546 static int single_erase(struct mtd_info *mtd, int page)
4548 struct nand_chip *chip = mtd_to_nand(mtd);
4549 unsigned int eraseblock;
4551 /* Send commands to erase a block */
4552 eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
4554 return nand_erase_op(chip, eraseblock);
4558 * nand_erase - [MTD Interface] erase block(s)
4559 * @mtd: MTD device structure
4560 * @instr: erase instruction
4562 * Erase one ore more blocks.
4564 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4566 return nand_erase_nand(mtd, instr, 0);
4570 * nand_erase_nand - [INTERN] erase block(s)
4571 * @mtd: MTD device structure
4572 * @instr: erase instruction
4573 * @allowbbt: allow erasing the bbt area
4575 * Erase one ore more blocks.
4577 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
4578 int allowbbt)
4580 int page, status, pages_per_block, ret, chipnr;
4581 struct nand_chip *chip = mtd_to_nand(mtd);
4582 loff_t len;
4584 pr_debug("%s: start = 0x%012llx, len = %llu\n",
4585 __func__, (unsigned long long)instr->addr,
4586 (unsigned long long)instr->len);
4588 if (check_offs_len(mtd, instr->addr, instr->len))
4589 return -EINVAL;
4591 /* Grab the lock and see if the device is available */
4592 nand_get_device(mtd, FL_ERASING);
4594 /* Shift to get first page */
4595 page = (int)(instr->addr >> chip->page_shift);
4596 chipnr = (int)(instr->addr >> chip->chip_shift);
4598 /* Calculate pages in each block */
4599 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4601 /* Select the NAND device */
4602 chip->select_chip(mtd, chipnr);
4604 /* Check, if it is write protected */
4605 if (nand_check_wp(mtd)) {
4606 pr_debug("%s: device is write protected!\n",
4607 __func__);
4608 instr->state = MTD_ERASE_FAILED;
4609 goto erase_exit;
4612 /* Loop through the pages */
4613 len = instr->len;
4615 instr->state = MTD_ERASING;
4617 while (len) {
4618 /* Check if we have a bad block, we do not erase bad blocks! */
4619 if (nand_block_checkbad(mtd, ((loff_t) page) <<
4620 chip->page_shift, allowbbt)) {
4621 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4622 __func__, page);
4623 instr->state = MTD_ERASE_FAILED;
4624 goto erase_exit;
4628 * Invalidate the page cache, if we erase the block which
4629 * contains the current cached page.
4631 if (page <= chip->pagebuf && chip->pagebuf <
4632 (page + pages_per_block))
4633 chip->pagebuf = -1;
4635 status = chip->erase(mtd, page & chip->pagemask);
4637 /* See if block erase succeeded */
4638 if (status) {
4639 pr_debug("%s: failed erase, page 0x%08x\n",
4640 __func__, page);
4641 instr->state = MTD_ERASE_FAILED;
4642 instr->fail_addr =
4643 ((loff_t)page << chip->page_shift);
4644 goto erase_exit;
4647 /* Increment page address and decrement length */
4648 len -= (1ULL << chip->phys_erase_shift);
4649 page += pages_per_block;
4651 /* Check, if we cross a chip boundary */
4652 if (len && !(page & chip->pagemask)) {
4653 chipnr++;
4654 chip->select_chip(mtd, -1);
4655 chip->select_chip(mtd, chipnr);
4658 instr->state = MTD_ERASE_DONE;
4660 erase_exit:
4662 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
4664 /* Deselect and wake up anyone waiting on the device */
4665 chip->select_chip(mtd, -1);
4666 nand_release_device(mtd);
4668 /* Do call back function */
4669 if (!ret)
4670 mtd_erase_callback(instr);
4672 /* Return more or less happy */
4673 return ret;
4677 * nand_sync - [MTD Interface] sync
4678 * @mtd: MTD device structure
4680 * Sync is actually a wait for chip ready function.
4682 static void nand_sync(struct mtd_info *mtd)
4684 pr_debug("%s: called\n", __func__);
4686 /* Grab the lock and see if the device is available */
4687 nand_get_device(mtd, FL_SYNCING);
4688 /* Release it and go back */
4689 nand_release_device(mtd);
4693 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4694 * @mtd: MTD device structure
4695 * @offs: offset relative to mtd start
4697 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4699 struct nand_chip *chip = mtd_to_nand(mtd);
4700 int chipnr = (int)(offs >> chip->chip_shift);
4701 int ret;
4703 /* Select the NAND device */
4704 nand_get_device(mtd, FL_READING);
4705 chip->select_chip(mtd, chipnr);
4707 ret = nand_block_checkbad(mtd, offs, 0);
4709 chip->select_chip(mtd, -1);
4710 nand_release_device(mtd);
4712 return ret;
4716 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4717 * @mtd: MTD device structure
4718 * @ofs: offset relative to mtd start
4720 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4722 int ret;
4724 ret = nand_block_isbad(mtd, ofs);
4725 if (ret) {
4726 /* If it was bad already, return success and do nothing */
4727 if (ret > 0)
4728 return 0;
4729 return ret;
4732 return nand_block_markbad_lowlevel(mtd, ofs);
4736 * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
4737 * @mtd: MTD device structure
4738 * @ofs: offset relative to mtd start
4739 * @len: length of mtd
4741 static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
4743 struct nand_chip *chip = mtd_to_nand(mtd);
4744 u32 part_start_block;
4745 u32 part_end_block;
4746 u32 part_start_die;
4747 u32 part_end_die;
4750 * max_bb_per_die and blocks_per_die used to determine
4751 * the maximum bad block count.
4753 if (!chip->max_bb_per_die || !chip->blocks_per_die)
4754 return -ENOTSUPP;
4756 /* Get the start and end of the partition in erase blocks. */
4757 part_start_block = mtd_div_by_eb(ofs, mtd);
4758 part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
4760 /* Get the start and end LUNs of the partition. */
4761 part_start_die = part_start_block / chip->blocks_per_die;
4762 part_end_die = part_end_block / chip->blocks_per_die;
4765 * Look up the bad blocks per unit and multiply by the number of units
4766 * that the partition spans.
4768 return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
4772 * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
4773 * @mtd: MTD device structure
4774 * @chip: nand chip info structure
4775 * @addr: feature address.
4776 * @subfeature_param: the subfeature parameters, a four bytes array.
4778 static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
4779 int addr, uint8_t *subfeature_param)
4781 if (!chip->onfi_version ||
4782 !(le16_to_cpu(chip->onfi_params.opt_cmd)
4783 & ONFI_OPT_CMD_SET_GET_FEATURES))
4784 return -EINVAL;
4786 return nand_set_features_op(chip, addr, subfeature_param);
4790 * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
4791 * @mtd: MTD device structure
4792 * @chip: nand chip info structure
4793 * @addr: feature address.
4794 * @subfeature_param: the subfeature parameters, a four bytes array.
4796 static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
4797 int addr, uint8_t *subfeature_param)
4799 if (!chip->onfi_version ||
4800 !(le16_to_cpu(chip->onfi_params.opt_cmd)
4801 & ONFI_OPT_CMD_SET_GET_FEATURES))
4802 return -EINVAL;
4804 return nand_get_features_op(chip, addr, subfeature_param);
4808 * nand_onfi_get_set_features_notsupp - set/get features stub returning
4809 * -ENOTSUPP
4810 * @mtd: MTD device structure
4811 * @chip: nand chip info structure
4812 * @addr: feature address.
4813 * @subfeature_param: the subfeature parameters, a four bytes array.
4815 * Should be used by NAND controller drivers that do not support the SET/GET
4816 * FEATURES operations.
4818 int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd,
4819 struct nand_chip *chip, int addr,
4820 u8 *subfeature_param)
4822 return -ENOTSUPP;
4824 EXPORT_SYMBOL(nand_onfi_get_set_features_notsupp);
4827 * nand_suspend - [MTD Interface] Suspend the NAND flash
4828 * @mtd: MTD device structure
4830 static int nand_suspend(struct mtd_info *mtd)
4832 return nand_get_device(mtd, FL_PM_SUSPENDED);
4836 * nand_resume - [MTD Interface] Resume the NAND flash
4837 * @mtd: MTD device structure
4839 static void nand_resume(struct mtd_info *mtd)
4841 struct nand_chip *chip = mtd_to_nand(mtd);
4843 if (chip->state == FL_PM_SUSPENDED)
4844 nand_release_device(mtd);
4845 else
4846 pr_err("%s called for a chip which is not in suspended state\n",
4847 __func__);
4851 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4852 * prevent further operations
4853 * @mtd: MTD device structure
4855 static void nand_shutdown(struct mtd_info *mtd)
4857 nand_get_device(mtd, FL_PM_SUSPENDED);
4860 /* Set default functions */
4861 static void nand_set_defaults(struct nand_chip *chip)
4863 unsigned int busw = chip->options & NAND_BUSWIDTH_16;
4865 /* check for proper chip_delay setup, set 20us if not */
4866 if (!chip->chip_delay)
4867 chip->chip_delay = 20;
4869 /* check, if a user supplied command function given */
4870 if (!chip->cmdfunc && !chip->exec_op)
4871 chip->cmdfunc = nand_command;
4873 /* check, if a user supplied wait function given */
4874 if (chip->waitfunc == NULL)
4875 chip->waitfunc = nand_wait;
4877 if (!chip->select_chip)
4878 chip->select_chip = nand_select_chip;
4880 /* set for ONFI nand */
4881 if (!chip->onfi_set_features)
4882 chip->onfi_set_features = nand_onfi_set_features;
4883 if (!chip->onfi_get_features)
4884 chip->onfi_get_features = nand_onfi_get_features;
4886 /* If called twice, pointers that depend on busw may need to be reset */
4887 if (!chip->read_byte || chip->read_byte == nand_read_byte)
4888 chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
4889 if (!chip->read_word)
4890 chip->read_word = nand_read_word;
4891 if (!chip->block_bad)
4892 chip->block_bad = nand_block_bad;
4893 if (!chip->block_markbad)
4894 chip->block_markbad = nand_default_block_markbad;
4895 if (!chip->write_buf || chip->write_buf == nand_write_buf)
4896 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
4897 if (!chip->write_byte || chip->write_byte == nand_write_byte)
4898 chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
4899 if (!chip->read_buf || chip->read_buf == nand_read_buf)
4900 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
4901 if (!chip->scan_bbt)
4902 chip->scan_bbt = nand_default_bbt;
4904 if (!chip->controller) {
4905 chip->controller = &chip->hwcontrol;
4906 nand_hw_control_init(chip->controller);
4909 if (!chip->buf_align)
4910 chip->buf_align = 1;
4913 /* Sanitize ONFI strings so we can safely print them */
4914 static void sanitize_string(uint8_t *s, size_t len)
4916 ssize_t i;
4918 /* Null terminate */
4919 s[len - 1] = 0;
4921 /* Remove non printable chars */
4922 for (i = 0; i < len - 1; i++) {
4923 if (s[i] < ' ' || s[i] > 127)
4924 s[i] = '?';
4927 /* Remove trailing spaces */
4928 strim(s);
4931 static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
4933 int i;
4934 while (len--) {
4935 crc ^= *p++ << 8;
4936 for (i = 0; i < 8; i++)
4937 crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
4940 return crc;
4943 /* Parse the Extended Parameter Page. */
4944 static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
4945 struct nand_onfi_params *p)
4947 struct onfi_ext_param_page *ep;
4948 struct onfi_ext_section *s;
4949 struct onfi_ext_ecc_info *ecc;
4950 uint8_t *cursor;
4951 int ret;
4952 int len;
4953 int i;
4955 len = le16_to_cpu(p->ext_param_page_length) * 16;
4956 ep = kmalloc(len, GFP_KERNEL);
4957 if (!ep)
4958 return -ENOMEM;
4960 /* Send our own NAND_CMD_PARAM. */
4961 ret = nand_read_param_page_op(chip, 0, NULL, 0);
4962 if (ret)
4963 goto ext_out;
4965 /* Use the Change Read Column command to skip the ONFI param pages. */
4966 ret = nand_change_read_column_op(chip,
4967 sizeof(*p) * p->num_of_param_pages,
4968 ep, len, true);
4969 if (ret)
4970 goto ext_out;
4972 ret = -EINVAL;
4973 if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
4974 != le16_to_cpu(ep->crc))) {
4975 pr_debug("fail in the CRC.\n");
4976 goto ext_out;
4980 * Check the signature.
4981 * Do not strictly follow the ONFI spec, maybe changed in future.
4983 if (strncmp(ep->sig, "EPPS", 4)) {
4984 pr_debug("The signature is invalid.\n");
4985 goto ext_out;
4988 /* find the ECC section. */
4989 cursor = (uint8_t *)(ep + 1);
4990 for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
4991 s = ep->sections + i;
4992 if (s->type == ONFI_SECTION_TYPE_2)
4993 break;
4994 cursor += s->length * 16;
4996 if (i == ONFI_EXT_SECTION_MAX) {
4997 pr_debug("We can not find the ECC section.\n");
4998 goto ext_out;
5001 /* get the info we want. */
5002 ecc = (struct onfi_ext_ecc_info *)cursor;
5004 if (!ecc->codeword_size) {
5005 pr_debug("Invalid codeword size\n");
5006 goto ext_out;
5009 chip->ecc_strength_ds = ecc->ecc_bits;
5010 chip->ecc_step_ds = 1 << ecc->codeword_size;
5011 ret = 0;
5013 ext_out:
5014 kfree(ep);
5015 return ret;
5019 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
5021 static int nand_flash_detect_onfi(struct nand_chip *chip)
5023 struct mtd_info *mtd = nand_to_mtd(chip);
5024 struct nand_onfi_params *p = &chip->onfi_params;
5025 char id[4];
5026 int i, ret, val;
5028 /* Try ONFI for unknown chip or LP */
5029 ret = nand_readid_op(chip, 0x20, id, sizeof(id));
5030 if (ret || strncmp(id, "ONFI", 4))
5031 return 0;
5033 ret = nand_read_param_page_op(chip, 0, NULL, 0);
5034 if (ret)
5035 return 0;
5037 for (i = 0; i < 3; i++) {
5038 ret = nand_read_data_op(chip, p, sizeof(*p), true);
5039 if (ret)
5040 return 0;
5042 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
5043 le16_to_cpu(p->crc)) {
5044 break;
5048 if (i == 3) {
5049 pr_err("Could not find valid ONFI parameter page; aborting\n");
5050 return 0;
5053 /* Check version */
5054 val = le16_to_cpu(p->revision);
5055 if (val & (1 << 5))
5056 chip->onfi_version = 23;
5057 else if (val & (1 << 4))
5058 chip->onfi_version = 22;
5059 else if (val & (1 << 3))
5060 chip->onfi_version = 21;
5061 else if (val & (1 << 2))
5062 chip->onfi_version = 20;
5063 else if (val & (1 << 1))
5064 chip->onfi_version = 10;
5066 if (!chip->onfi_version) {
5067 pr_info("unsupported ONFI version: %d\n", val);
5068 return 0;
5071 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
5072 sanitize_string(p->model, sizeof(p->model));
5073 if (!mtd->name)
5074 mtd->name = p->model;
5076 mtd->writesize = le32_to_cpu(p->byte_per_page);
5079 * pages_per_block and blocks_per_lun may not be a power-of-2 size
5080 * (don't ask me who thought of this...). MTD assumes that these
5081 * dimensions will be power-of-2, so just truncate the remaining area.
5083 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
5084 mtd->erasesize *= mtd->writesize;
5086 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
5088 /* See erasesize comment */
5089 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
5090 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
5091 chip->bits_per_cell = p->bits_per_cell;
5093 chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
5094 chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
5096 if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
5097 chip->options |= NAND_BUSWIDTH_16;
5099 if (p->ecc_bits != 0xff) {
5100 chip->ecc_strength_ds = p->ecc_bits;
5101 chip->ecc_step_ds = 512;
5102 } else if (chip->onfi_version >= 21 &&
5103 (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
5106 * The nand_flash_detect_ext_param_page() uses the
5107 * Change Read Column command which maybe not supported
5108 * by the chip->cmdfunc. So try to update the chip->cmdfunc
5109 * now. We do not replace user supplied command function.
5111 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
5112 chip->cmdfunc = nand_command_lp;
5114 /* The Extended Parameter Page is supported since ONFI 2.1. */
5115 if (nand_flash_detect_ext_param_page(chip, p))
5116 pr_warn("Failed to detect ONFI extended param page\n");
5117 } else {
5118 pr_warn("Could not retrieve ONFI ECC requirements\n");
5121 return 1;
5125 * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
5127 static int nand_flash_detect_jedec(struct nand_chip *chip)
5129 struct mtd_info *mtd = nand_to_mtd(chip);
5130 struct nand_jedec_params *p = &chip->jedec_params;
5131 struct jedec_ecc_info *ecc;
5132 char id[5];
5133 int i, val, ret;
5135 /* Try JEDEC for unknown chip or LP */
5136 ret = nand_readid_op(chip, 0x40, id, sizeof(id));
5137 if (ret || strncmp(id, "JEDEC", sizeof(id)))
5138 return 0;
5140 ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
5141 if (ret)
5142 return 0;
5144 for (i = 0; i < 3; i++) {
5145 ret = nand_read_data_op(chip, p, sizeof(*p), true);
5146 if (ret)
5147 return 0;
5149 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
5150 le16_to_cpu(p->crc))
5151 break;
5154 if (i == 3) {
5155 pr_err("Could not find valid JEDEC parameter page; aborting\n");
5156 return 0;
5159 /* Check version */
5160 val = le16_to_cpu(p->revision);
5161 if (val & (1 << 2))
5162 chip->jedec_version = 10;
5163 else if (val & (1 << 1))
5164 chip->jedec_version = 1; /* vendor specific version */
5166 if (!chip->jedec_version) {
5167 pr_info("unsupported JEDEC version: %d\n", val);
5168 return 0;
5171 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
5172 sanitize_string(p->model, sizeof(p->model));
5173 if (!mtd->name)
5174 mtd->name = p->model;
5176 mtd->writesize = le32_to_cpu(p->byte_per_page);
5178 /* Please reference to the comment for nand_flash_detect_onfi. */
5179 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
5180 mtd->erasesize *= mtd->writesize;
5182 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
5184 /* Please reference to the comment for nand_flash_detect_onfi. */
5185 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
5186 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
5187 chip->bits_per_cell = p->bits_per_cell;
5189 if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
5190 chip->options |= NAND_BUSWIDTH_16;
5192 /* ECC info */
5193 ecc = &p->ecc_info[0];
5195 if (ecc->codeword_size >= 9) {
5196 chip->ecc_strength_ds = ecc->ecc_bits;
5197 chip->ecc_step_ds = 1 << ecc->codeword_size;
5198 } else {
5199 pr_warn("Invalid codeword size\n");
5202 return 1;
5206 * nand_id_has_period - Check if an ID string has a given wraparound period
5207 * @id_data: the ID string
5208 * @arrlen: the length of the @id_data array
5209 * @period: the period of repitition
5211 * Check if an ID string is repeated within a given sequence of bytes at
5212 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
5213 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
5214 * if the repetition has a period of @period; otherwise, returns zero.
5216 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
5218 int i, j;
5219 for (i = 0; i < period; i++)
5220 for (j = i + period; j < arrlen; j += period)
5221 if (id_data[i] != id_data[j])
5222 return 0;
5223 return 1;
5227 * nand_id_len - Get the length of an ID string returned by CMD_READID
5228 * @id_data: the ID string
5229 * @arrlen: the length of the @id_data array
5231 * Returns the length of the ID string, according to known wraparound/trailing
5232 * zero patterns. If no pattern exists, returns the length of the array.
5234 static int nand_id_len(u8 *id_data, int arrlen)
5236 int last_nonzero, period;
5238 /* Find last non-zero byte */
5239 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
5240 if (id_data[last_nonzero])
5241 break;
5243 /* All zeros */
5244 if (last_nonzero < 0)
5245 return 0;
5247 /* Calculate wraparound period */
5248 for (period = 1; period < arrlen; period++)
5249 if (nand_id_has_period(id_data, arrlen, period))
5250 break;
5252 /* There's a repeated pattern */
5253 if (period < arrlen)
5254 return period;
5256 /* There are trailing zeros */
5257 if (last_nonzero < arrlen - 1)
5258 return last_nonzero + 1;
5260 /* No pattern detected */
5261 return arrlen;
5264 /* Extract the bits of per cell from the 3rd byte of the extended ID */
5265 static int nand_get_bits_per_cell(u8 cellinfo)
5267 int bits;
5269 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
5270 bits >>= NAND_CI_CELLTYPE_SHIFT;
5271 return bits + 1;
5275 * Many new NAND share similar device ID codes, which represent the size of the
5276 * chip. The rest of the parameters must be decoded according to generic or
5277 * manufacturer-specific "extended ID" decoding patterns.
5279 void nand_decode_ext_id(struct nand_chip *chip)
5281 struct mtd_info *mtd = nand_to_mtd(chip);
5282 int extid;
5283 u8 *id_data = chip->id.data;
5284 /* The 3rd id byte holds MLC / multichip data */
5285 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
5286 /* The 4th id byte is the important one */
5287 extid = id_data[3];
5289 /* Calc pagesize */
5290 mtd->writesize = 1024 << (extid & 0x03);
5291 extid >>= 2;
5292 /* Calc oobsize */
5293 mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
5294 extid >>= 2;
5295 /* Calc blocksize. Blocksize is multiples of 64KiB */
5296 mtd->erasesize = (64 * 1024) << (extid & 0x03);
5297 extid >>= 2;
5298 /* Get buswidth information */
5299 if (extid & 0x1)
5300 chip->options |= NAND_BUSWIDTH_16;
5302 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
5305 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
5306 * decodes a matching ID table entry and assigns the MTD size parameters for
5307 * the chip.
5309 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
5311 struct mtd_info *mtd = nand_to_mtd(chip);
5313 mtd->erasesize = type->erasesize;
5314 mtd->writesize = type->pagesize;
5315 mtd->oobsize = mtd->writesize / 32;
5317 /* All legacy ID NAND are small-page, SLC */
5318 chip->bits_per_cell = 1;
5322 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
5323 * heuristic patterns using various detected parameters (e.g., manufacturer,
5324 * page size, cell-type information).
5326 static void nand_decode_bbm_options(struct nand_chip *chip)
5328 struct mtd_info *mtd = nand_to_mtd(chip);
5330 /* Set the bad block position */
5331 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
5332 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
5333 else
5334 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
5337 static inline bool is_full_id_nand(struct nand_flash_dev *type)
5339 return type->id_len;
5342 static bool find_full_id_nand(struct nand_chip *chip,
5343 struct nand_flash_dev *type)
5345 struct mtd_info *mtd = nand_to_mtd(chip);
5346 u8 *id_data = chip->id.data;
5348 if (!strncmp(type->id, id_data, type->id_len)) {
5349 mtd->writesize = type->pagesize;
5350 mtd->erasesize = type->erasesize;
5351 mtd->oobsize = type->oobsize;
5353 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
5354 chip->chipsize = (uint64_t)type->chipsize << 20;
5355 chip->options |= type->options;
5356 chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
5357 chip->ecc_step_ds = NAND_ECC_STEP(type);
5358 chip->onfi_timing_mode_default =
5359 type->onfi_timing_mode_default;
5361 if (!mtd->name)
5362 mtd->name = type->name;
5364 return true;
5366 return false;
5370 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
5371 * compliant and does not have a full-id or legacy-id entry in the nand_ids
5372 * table.
5374 static void nand_manufacturer_detect(struct nand_chip *chip)
5377 * Try manufacturer detection if available and use
5378 * nand_decode_ext_id() otherwise.
5380 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
5381 chip->manufacturer.desc->ops->detect) {
5382 /* The 3rd id byte holds MLC / multichip data */
5383 chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
5384 chip->manufacturer.desc->ops->detect(chip);
5385 } else {
5386 nand_decode_ext_id(chip);
5391 * Manufacturer initialization. This function is called for all NANDs including
5392 * ONFI and JEDEC compliant ones.
5393 * Manufacturer drivers should put all their specific initialization code in
5394 * their ->init() hook.
5396 static int nand_manufacturer_init(struct nand_chip *chip)
5398 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
5399 !chip->manufacturer.desc->ops->init)
5400 return 0;
5402 return chip->manufacturer.desc->ops->init(chip);
5406 * Manufacturer cleanup. This function is called for all NANDs including
5407 * ONFI and JEDEC compliant ones.
5408 * Manufacturer drivers should put all their specific cleanup code in their
5409 * ->cleanup() hook.
5411 static void nand_manufacturer_cleanup(struct nand_chip *chip)
5413 /* Release manufacturer private data */
5414 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
5415 chip->manufacturer.desc->ops->cleanup)
5416 chip->manufacturer.desc->ops->cleanup(chip);
5420 * Get the flash and manufacturer id and lookup if the type is supported.
5422 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
5424 const struct nand_manufacturer *manufacturer;
5425 struct mtd_info *mtd = nand_to_mtd(chip);
5426 int busw, ret;
5427 u8 *id_data = chip->id.data;
5428 u8 maf_id, dev_id;
5431 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
5432 * after power-up.
5434 ret = nand_reset(chip, 0);
5435 if (ret)
5436 return ret;
5438 /* Select the device */
5439 chip->select_chip(mtd, 0);
5441 /* Send the command for reading device ID */
5442 ret = nand_readid_op(chip, 0, id_data, 2);
5443 if (ret)
5444 return ret;
5446 /* Read manufacturer and device IDs */
5447 maf_id = id_data[0];
5448 dev_id = id_data[1];
5451 * Try again to make sure, as some systems the bus-hold or other
5452 * interface concerns can cause random data which looks like a
5453 * possibly credible NAND flash to appear. If the two results do
5454 * not match, ignore the device completely.
5457 /* Read entire ID string */
5458 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
5459 if (ret)
5460 return ret;
5462 if (id_data[0] != maf_id || id_data[1] != dev_id) {
5463 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
5464 maf_id, dev_id, id_data[0], id_data[1]);
5465 return -ENODEV;
5468 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
5470 /* Try to identify manufacturer */
5471 manufacturer = nand_get_manufacturer(maf_id);
5472 chip->manufacturer.desc = manufacturer;
5474 if (!type)
5475 type = nand_flash_ids;
5478 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
5479 * override it.
5480 * This is required to make sure initial NAND bus width set by the
5481 * NAND controller driver is coherent with the real NAND bus width
5482 * (extracted by auto-detection code).
5484 busw = chip->options & NAND_BUSWIDTH_16;
5487 * The flag is only set (never cleared), reset it to its default value
5488 * before starting auto-detection.
5490 chip->options &= ~NAND_BUSWIDTH_16;
5492 for (; type->name != NULL; type++) {
5493 if (is_full_id_nand(type)) {
5494 if (find_full_id_nand(chip, type))
5495 goto ident_done;
5496 } else if (dev_id == type->dev_id) {
5497 break;
5501 chip->onfi_version = 0;
5502 if (!type->name || !type->pagesize) {
5503 /* Check if the chip is ONFI compliant */
5504 if (nand_flash_detect_onfi(chip))
5505 goto ident_done;
5507 /* Check if the chip is JEDEC compliant */
5508 if (nand_flash_detect_jedec(chip))
5509 goto ident_done;
5512 if (!type->name)
5513 return -ENODEV;
5515 if (!mtd->name)
5516 mtd->name = type->name;
5518 chip->chipsize = (uint64_t)type->chipsize << 20;
5520 if (!type->pagesize)
5521 nand_manufacturer_detect(chip);
5522 else
5523 nand_decode_id(chip, type);
5525 /* Get chip options */
5526 chip->options |= type->options;
5528 ident_done:
5530 if (chip->options & NAND_BUSWIDTH_AUTO) {
5531 WARN_ON(busw & NAND_BUSWIDTH_16);
5532 nand_set_defaults(chip);
5533 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
5535 * Check, if buswidth is correct. Hardware drivers should set
5536 * chip correct!
5538 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5539 maf_id, dev_id);
5540 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
5541 mtd->name);
5542 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
5543 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
5544 return -EINVAL;
5547 nand_decode_bbm_options(chip);
5549 /* Calculate the address shift from the page size */
5550 chip->page_shift = ffs(mtd->writesize) - 1;
5551 /* Convert chipsize to number of pages per chip -1 */
5552 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
5554 chip->bbt_erase_shift = chip->phys_erase_shift =
5555 ffs(mtd->erasesize) - 1;
5556 if (chip->chipsize & 0xffffffff)
5557 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
5558 else {
5559 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
5560 chip->chip_shift += 32 - 1;
5563 if (chip->chip_shift - chip->page_shift > 16)
5564 chip->options |= NAND_ROW_ADDR_3;
5566 chip->badblockbits = 8;
5567 chip->erase = single_erase;
5569 /* Do not replace user supplied command function! */
5570 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
5571 chip->cmdfunc = nand_command_lp;
5573 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5574 maf_id, dev_id);
5576 if (chip->onfi_version)
5577 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
5578 chip->onfi_params.model);
5579 else if (chip->jedec_version)
5580 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
5581 chip->jedec_params.model);
5582 else
5583 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
5584 type->name);
5586 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
5587 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
5588 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
5589 return 0;
5592 static const char * const nand_ecc_modes[] = {
5593 [NAND_ECC_NONE] = "none",
5594 [NAND_ECC_SOFT] = "soft",
5595 [NAND_ECC_HW] = "hw",
5596 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
5597 [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
5598 [NAND_ECC_ON_DIE] = "on-die",
5601 static int of_get_nand_ecc_mode(struct device_node *np)
5603 const char *pm;
5604 int err, i;
5606 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5607 if (err < 0)
5608 return err;
5610 for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
5611 if (!strcasecmp(pm, nand_ecc_modes[i]))
5612 return i;
5615 * For backward compatibility we support few obsoleted values that don't
5616 * have their mappings into nand_ecc_modes_t anymore (they were merged
5617 * with other enums).
5619 if (!strcasecmp(pm, "soft_bch"))
5620 return NAND_ECC_SOFT;
5622 return -ENODEV;
5625 static const char * const nand_ecc_algos[] = {
5626 [NAND_ECC_HAMMING] = "hamming",
5627 [NAND_ECC_BCH] = "bch",
5630 static int of_get_nand_ecc_algo(struct device_node *np)
5632 const char *pm;
5633 int err, i;
5635 err = of_property_read_string(np, "nand-ecc-algo", &pm);
5636 if (!err) {
5637 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
5638 if (!strcasecmp(pm, nand_ecc_algos[i]))
5639 return i;
5640 return -ENODEV;
5644 * For backward compatibility we also read "nand-ecc-mode" checking
5645 * for some obsoleted values that were specifying ECC algorithm.
5647 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5648 if (err < 0)
5649 return err;
5651 if (!strcasecmp(pm, "soft"))
5652 return NAND_ECC_HAMMING;
5653 else if (!strcasecmp(pm, "soft_bch"))
5654 return NAND_ECC_BCH;
5656 return -ENODEV;
5659 static int of_get_nand_ecc_step_size(struct device_node *np)
5661 int ret;
5662 u32 val;
5664 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
5665 return ret ? ret : val;
5668 static int of_get_nand_ecc_strength(struct device_node *np)
5670 int ret;
5671 u32 val;
5673 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
5674 return ret ? ret : val;
5677 static int of_get_nand_bus_width(struct device_node *np)
5679 u32 val;
5681 if (of_property_read_u32(np, "nand-bus-width", &val))
5682 return 8;
5684 switch (val) {
5685 case 8:
5686 case 16:
5687 return val;
5688 default:
5689 return -EIO;
5693 static bool of_get_nand_on_flash_bbt(struct device_node *np)
5695 return of_property_read_bool(np, "nand-on-flash-bbt");
5698 static int nand_dt_init(struct nand_chip *chip)
5700 struct device_node *dn = nand_get_flash_node(chip);
5701 int ecc_mode, ecc_algo, ecc_strength, ecc_step;
5703 if (!dn)
5704 return 0;
5706 if (of_get_nand_bus_width(dn) == 16)
5707 chip->options |= NAND_BUSWIDTH_16;
5709 if (of_get_nand_on_flash_bbt(dn))
5710 chip->bbt_options |= NAND_BBT_USE_FLASH;
5712 ecc_mode = of_get_nand_ecc_mode(dn);
5713 ecc_algo = of_get_nand_ecc_algo(dn);
5714 ecc_strength = of_get_nand_ecc_strength(dn);
5715 ecc_step = of_get_nand_ecc_step_size(dn);
5717 if (ecc_mode >= 0)
5718 chip->ecc.mode = ecc_mode;
5720 if (ecc_algo >= 0)
5721 chip->ecc.algo = ecc_algo;
5723 if (ecc_strength >= 0)
5724 chip->ecc.strength = ecc_strength;
5726 if (ecc_step > 0)
5727 chip->ecc.size = ecc_step;
5729 if (of_property_read_bool(dn, "nand-ecc-maximize"))
5730 chip->ecc.options |= NAND_ECC_MAXIMIZE;
5732 return 0;
5736 * nand_scan_ident - [NAND Interface] Scan for the NAND device
5737 * @mtd: MTD device structure
5738 * @maxchips: number of chips to scan for
5739 * @table: alternative NAND ID table
5741 * This is the first phase of the normal nand_scan() function. It reads the
5742 * flash ID and sets up MTD fields accordingly.
5745 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
5746 struct nand_flash_dev *table)
5748 int i, nand_maf_id, nand_dev_id;
5749 struct nand_chip *chip = mtd_to_nand(mtd);
5750 int ret;
5752 /* Enforce the right timings for reset/detection */
5753 onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
5755 ret = nand_dt_init(chip);
5756 if (ret)
5757 return ret;
5759 if (!mtd->name && mtd->dev.parent)
5760 mtd->name = dev_name(mtd->dev.parent);
5763 * ->cmdfunc() is legacy and will only be used if ->exec_op() is not
5764 * populated.
5766 if (!chip->exec_op) {
5768 * Default functions assigned for ->cmdfunc() and
5769 * ->select_chip() both expect ->cmd_ctrl() to be populated.
5771 if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
5772 pr_err("->cmd_ctrl() should be provided\n");
5773 return -EINVAL;
5777 /* Set the default functions */
5778 nand_set_defaults(chip);
5780 /* Read the flash type */
5781 ret = nand_detect(chip, table);
5782 if (ret) {
5783 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5784 pr_warn("No NAND device found\n");
5785 chip->select_chip(mtd, -1);
5786 return ret;
5789 nand_maf_id = chip->id.data[0];
5790 nand_dev_id = chip->id.data[1];
5792 chip->select_chip(mtd, -1);
5794 /* Check for a chip array */
5795 for (i = 1; i < maxchips; i++) {
5796 u8 id[2];
5798 /* See comment in nand_get_flash_type for reset */
5799 nand_reset(chip, i);
5801 chip->select_chip(mtd, i);
5802 /* Send the command for reading device ID */
5803 nand_readid_op(chip, 0, id, sizeof(id));
5804 /* Read manufacturer and device IDs */
5805 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5806 chip->select_chip(mtd, -1);
5807 break;
5809 chip->select_chip(mtd, -1);
5811 if (i > 1)
5812 pr_info("%d chips detected\n", i);
5814 /* Store the number of chips and calc total size for mtd */
5815 chip->numchips = i;
5816 mtd->size = i * chip->chipsize;
5818 return 0;
5820 EXPORT_SYMBOL(nand_scan_ident);
5822 static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
5824 struct nand_chip *chip = mtd_to_nand(mtd);
5825 struct nand_ecc_ctrl *ecc = &chip->ecc;
5827 if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
5828 return -EINVAL;
5830 switch (ecc->algo) {
5831 case NAND_ECC_HAMMING:
5832 ecc->calculate = nand_calculate_ecc;
5833 ecc->correct = nand_correct_data;
5834 ecc->read_page = nand_read_page_swecc;
5835 ecc->read_subpage = nand_read_subpage;
5836 ecc->write_page = nand_write_page_swecc;
5837 ecc->read_page_raw = nand_read_page_raw;
5838 ecc->write_page_raw = nand_write_page_raw;
5839 ecc->read_oob = nand_read_oob_std;
5840 ecc->write_oob = nand_write_oob_std;
5841 if (!ecc->size)
5842 ecc->size = 256;
5843 ecc->bytes = 3;
5844 ecc->strength = 1;
5845 return 0;
5846 case NAND_ECC_BCH:
5847 if (!mtd_nand_has_bch()) {
5848 WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
5849 return -EINVAL;
5851 ecc->calculate = nand_bch_calculate_ecc;
5852 ecc->correct = nand_bch_correct_data;
5853 ecc->read_page = nand_read_page_swecc;
5854 ecc->read_subpage = nand_read_subpage;
5855 ecc->write_page = nand_write_page_swecc;
5856 ecc->read_page_raw = nand_read_page_raw;
5857 ecc->write_page_raw = nand_write_page_raw;
5858 ecc->read_oob = nand_read_oob_std;
5859 ecc->write_oob = nand_write_oob_std;
5862 * Board driver should supply ecc.size and ecc.strength
5863 * values to select how many bits are correctable.
5864 * Otherwise, default to 4 bits for large page devices.
5866 if (!ecc->size && (mtd->oobsize >= 64)) {
5867 ecc->size = 512;
5868 ecc->strength = 4;
5872 * if no ecc placement scheme was provided pickup the default
5873 * large page one.
5875 if (!mtd->ooblayout) {
5876 /* handle large page devices only */
5877 if (mtd->oobsize < 64) {
5878 WARN(1, "OOB layout is required when using software BCH on small pages\n");
5879 return -EINVAL;
5882 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
5887 * We can only maximize ECC config when the default layout is
5888 * used, otherwise we don't know how many bytes can really be
5889 * used.
5891 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
5892 ecc->options & NAND_ECC_MAXIMIZE) {
5893 int steps, bytes;
5895 /* Always prefer 1k blocks over 512bytes ones */
5896 ecc->size = 1024;
5897 steps = mtd->writesize / ecc->size;
5899 /* Reserve 2 bytes for the BBM */
5900 bytes = (mtd->oobsize - 2) / steps;
5901 ecc->strength = bytes * 8 / fls(8 * ecc->size);
5904 /* See nand_bch_init() for details. */
5905 ecc->bytes = 0;
5906 ecc->priv = nand_bch_init(mtd);
5907 if (!ecc->priv) {
5908 WARN(1, "BCH ECC initialization failed!\n");
5909 return -EINVAL;
5911 return 0;
5912 default:
5913 WARN(1, "Unsupported ECC algorithm!\n");
5914 return -EINVAL;
5919 * nand_check_ecc_caps - check the sanity of preset ECC settings
5920 * @chip: nand chip info structure
5921 * @caps: ECC caps info structure
5922 * @oobavail: OOB size that the ECC engine can use
5924 * When ECC step size and strength are already set, check if they are supported
5925 * by the controller and the calculated ECC bytes fit within the chip's OOB.
5926 * On success, the calculated ECC bytes is set.
5928 int nand_check_ecc_caps(struct nand_chip *chip,
5929 const struct nand_ecc_caps *caps, int oobavail)
5931 struct mtd_info *mtd = nand_to_mtd(chip);
5932 const struct nand_ecc_step_info *stepinfo;
5933 int preset_step = chip->ecc.size;
5934 int preset_strength = chip->ecc.strength;
5935 int nsteps, ecc_bytes;
5936 int i, j;
5938 if (WARN_ON(oobavail < 0))
5939 return -EINVAL;
5941 if (!preset_step || !preset_strength)
5942 return -ENODATA;
5944 nsteps = mtd->writesize / preset_step;
5946 for (i = 0; i < caps->nstepinfos; i++) {
5947 stepinfo = &caps->stepinfos[i];
5949 if (stepinfo->stepsize != preset_step)
5950 continue;
5952 for (j = 0; j < stepinfo->nstrengths; j++) {
5953 if (stepinfo->strengths[j] != preset_strength)
5954 continue;
5956 ecc_bytes = caps->calc_ecc_bytes(preset_step,
5957 preset_strength);
5958 if (WARN_ON_ONCE(ecc_bytes < 0))
5959 return ecc_bytes;
5961 if (ecc_bytes * nsteps > oobavail) {
5962 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5963 preset_step, preset_strength);
5964 return -ENOSPC;
5967 chip->ecc.bytes = ecc_bytes;
5969 return 0;
5973 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5974 preset_step, preset_strength);
5976 return -ENOTSUPP;
5978 EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
5981 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5982 * @chip: nand chip info structure
5983 * @caps: ECC engine caps info structure
5984 * @oobavail: OOB size that the ECC engine can use
5986 * If a chip's ECC requirement is provided, try to meet it with the least
5987 * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5988 * On success, the chosen ECC settings are set.
5990 int nand_match_ecc_req(struct nand_chip *chip,
5991 const struct nand_ecc_caps *caps, int oobavail)
5993 struct mtd_info *mtd = nand_to_mtd(chip);
5994 const struct nand_ecc_step_info *stepinfo;
5995 int req_step = chip->ecc_step_ds;
5996 int req_strength = chip->ecc_strength_ds;
5997 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5998 int best_step, best_strength, best_ecc_bytes;
5999 int best_ecc_bytes_total = INT_MAX;
6000 int i, j;
6002 if (WARN_ON(oobavail < 0))
6003 return -EINVAL;
6005 /* No information provided by the NAND chip */
6006 if (!req_step || !req_strength)
6007 return -ENOTSUPP;
6009 /* number of correctable bits the chip requires in a page */
6010 req_corr = mtd->writesize / req_step * req_strength;
6012 for (i = 0; i < caps->nstepinfos; i++) {
6013 stepinfo = &caps->stepinfos[i];
6014 step_size = stepinfo->stepsize;
6016 for (j = 0; j < stepinfo->nstrengths; j++) {
6017 strength = stepinfo->strengths[j];
6020 * If both step size and strength are smaller than the
6021 * chip's requirement, it is not easy to compare the
6022 * resulted reliability.
6024 if (step_size < req_step && strength < req_strength)
6025 continue;
6027 if (mtd->writesize % step_size)
6028 continue;
6030 nsteps = mtd->writesize / step_size;
6032 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
6033 if (WARN_ON_ONCE(ecc_bytes < 0))
6034 continue;
6035 ecc_bytes_total = ecc_bytes * nsteps;
6037 if (ecc_bytes_total > oobavail ||
6038 strength * nsteps < req_corr)
6039 continue;
6042 * We assume the best is to meet the chip's requrement
6043 * with the least number of ECC bytes.
6045 if (ecc_bytes_total < best_ecc_bytes_total) {
6046 best_ecc_bytes_total = ecc_bytes_total;
6047 best_step = step_size;
6048 best_strength = strength;
6049 best_ecc_bytes = ecc_bytes;
6054 if (best_ecc_bytes_total == INT_MAX)
6055 return -ENOTSUPP;
6057 chip->ecc.size = best_step;
6058 chip->ecc.strength = best_strength;
6059 chip->ecc.bytes = best_ecc_bytes;
6061 return 0;
6063 EXPORT_SYMBOL_GPL(nand_match_ecc_req);
6066 * nand_maximize_ecc - choose the max ECC strength available
6067 * @chip: nand chip info structure
6068 * @caps: ECC engine caps info structure
6069 * @oobavail: OOB size that the ECC engine can use
6071 * Choose the max ECC strength that is supported on the controller, and can fit
6072 * within the chip's OOB. On success, the chosen ECC settings are set.
6074 int nand_maximize_ecc(struct nand_chip *chip,
6075 const struct nand_ecc_caps *caps, int oobavail)
6077 struct mtd_info *mtd = nand_to_mtd(chip);
6078 const struct nand_ecc_step_info *stepinfo;
6079 int step_size, strength, nsteps, ecc_bytes, corr;
6080 int best_corr = 0;
6081 int best_step = 0;
6082 int best_strength, best_ecc_bytes;
6083 int i, j;
6085 if (WARN_ON(oobavail < 0))
6086 return -EINVAL;
6088 for (i = 0; i < caps->nstepinfos; i++) {
6089 stepinfo = &caps->stepinfos[i];
6090 step_size = stepinfo->stepsize;
6092 /* If chip->ecc.size is already set, respect it */
6093 if (chip->ecc.size && step_size != chip->ecc.size)
6094 continue;
6096 for (j = 0; j < stepinfo->nstrengths; j++) {
6097 strength = stepinfo->strengths[j];
6099 if (mtd->writesize % step_size)
6100 continue;
6102 nsteps = mtd->writesize / step_size;
6104 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
6105 if (WARN_ON_ONCE(ecc_bytes < 0))
6106 continue;
6108 if (ecc_bytes * nsteps > oobavail)
6109 continue;
6111 corr = strength * nsteps;
6114 * If the number of correctable bits is the same,
6115 * bigger step_size has more reliability.
6117 if (corr > best_corr ||
6118 (corr == best_corr && step_size > best_step)) {
6119 best_corr = corr;
6120 best_step = step_size;
6121 best_strength = strength;
6122 best_ecc_bytes = ecc_bytes;
6127 if (!best_corr)
6128 return -ENOTSUPP;
6130 chip->ecc.size = best_step;
6131 chip->ecc.strength = best_strength;
6132 chip->ecc.bytes = best_ecc_bytes;
6134 return 0;
6136 EXPORT_SYMBOL_GPL(nand_maximize_ecc);
6139 * Check if the chip configuration meet the datasheet requirements.
6141 * If our configuration corrects A bits per B bytes and the minimum
6142 * required correction level is X bits per Y bytes, then we must ensure
6143 * both of the following are true:
6145 * (1) A / B >= X / Y
6146 * (2) A >= X
6148 * Requirement (1) ensures we can correct for the required bitflip density.
6149 * Requirement (2) ensures we can correct even when all bitflips are clumped
6150 * in the same sector.
6152 static bool nand_ecc_strength_good(struct mtd_info *mtd)
6154 struct nand_chip *chip = mtd_to_nand(mtd);
6155 struct nand_ecc_ctrl *ecc = &chip->ecc;
6156 int corr, ds_corr;
6158 if (ecc->size == 0 || chip->ecc_step_ds == 0)
6159 /* Not enough information */
6160 return true;
6163 * We get the number of corrected bits per page to compare
6164 * the correction density.
6166 corr = (mtd->writesize * ecc->strength) / ecc->size;
6167 ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
6169 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
6173 * nand_scan_tail - [NAND Interface] Scan for the NAND device
6174 * @mtd: MTD device structure
6176 * This is the second phase of the normal nand_scan() function. It fills out
6177 * all the uninitialized function pointers with the defaults and scans for a
6178 * bad block table if appropriate.
6180 int nand_scan_tail(struct mtd_info *mtd)
6182 struct nand_chip *chip = mtd_to_nand(mtd);
6183 struct nand_ecc_ctrl *ecc = &chip->ecc;
6184 int ret, i;
6186 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
6187 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
6188 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
6189 return -EINVAL;
6192 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
6193 if (!chip->data_buf)
6194 return -ENOMEM;
6197 * FIXME: some NAND manufacturer drivers expect the first die to be
6198 * selected when manufacturer->init() is called. They should be fixed
6199 * to explictly select the relevant die when interacting with the NAND
6200 * chip.
6202 chip->select_chip(mtd, 0);
6203 ret = nand_manufacturer_init(chip);
6204 chip->select_chip(mtd, -1);
6205 if (ret)
6206 goto err_free_buf;
6208 /* Set the internal oob buffer location, just after the page data */
6209 chip->oob_poi = chip->data_buf + mtd->writesize;
6212 * If no default placement scheme is given, select an appropriate one.
6214 if (!mtd->ooblayout &&
6215 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
6216 switch (mtd->oobsize) {
6217 case 8:
6218 case 16:
6219 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
6220 break;
6221 case 64:
6222 case 128:
6223 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
6224 break;
6225 default:
6227 * Expose the whole OOB area to users if ECC_NONE
6228 * is passed. We could do that for all kind of
6229 * ->oobsize, but we must keep the old large/small
6230 * page with ECC layout when ->oobsize <= 128 for
6231 * compatibility reasons.
6233 if (ecc->mode == NAND_ECC_NONE) {
6234 mtd_set_ooblayout(mtd,
6235 &nand_ooblayout_lp_ops);
6236 break;
6239 WARN(1, "No oob scheme defined for oobsize %d\n",
6240 mtd->oobsize);
6241 ret = -EINVAL;
6242 goto err_nand_manuf_cleanup;
6247 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
6248 * selected and we have 256 byte pagesize fallback to software ECC
6251 switch (ecc->mode) {
6252 case NAND_ECC_HW_OOB_FIRST:
6253 /* Similar to NAND_ECC_HW, but a separate read_page handle */
6254 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
6255 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
6256 ret = -EINVAL;
6257 goto err_nand_manuf_cleanup;
6259 if (!ecc->read_page)
6260 ecc->read_page = nand_read_page_hwecc_oob_first;
6262 case NAND_ECC_HW:
6263 /* Use standard hwecc read page function? */
6264 if (!ecc->read_page)
6265 ecc->read_page = nand_read_page_hwecc;
6266 if (!ecc->write_page)
6267 ecc->write_page = nand_write_page_hwecc;
6268 if (!ecc->read_page_raw)
6269 ecc->read_page_raw = nand_read_page_raw;
6270 if (!ecc->write_page_raw)
6271 ecc->write_page_raw = nand_write_page_raw;
6272 if (!ecc->read_oob)
6273 ecc->read_oob = nand_read_oob_std;
6274 if (!ecc->write_oob)
6275 ecc->write_oob = nand_write_oob_std;
6276 if (!ecc->read_subpage)
6277 ecc->read_subpage = nand_read_subpage;
6278 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
6279 ecc->write_subpage = nand_write_subpage_hwecc;
6281 case NAND_ECC_HW_SYNDROME:
6282 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
6283 (!ecc->read_page ||
6284 ecc->read_page == nand_read_page_hwecc ||
6285 !ecc->write_page ||
6286 ecc->write_page == nand_write_page_hwecc)) {
6287 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
6288 ret = -EINVAL;
6289 goto err_nand_manuf_cleanup;
6291 /* Use standard syndrome read/write page function? */
6292 if (!ecc->read_page)
6293 ecc->read_page = nand_read_page_syndrome;
6294 if (!ecc->write_page)
6295 ecc->write_page = nand_write_page_syndrome;
6296 if (!ecc->read_page_raw)
6297 ecc->read_page_raw = nand_read_page_raw_syndrome;
6298 if (!ecc->write_page_raw)
6299 ecc->write_page_raw = nand_write_page_raw_syndrome;
6300 if (!ecc->read_oob)
6301 ecc->read_oob = nand_read_oob_syndrome;
6302 if (!ecc->write_oob)
6303 ecc->write_oob = nand_write_oob_syndrome;
6305 if (mtd->writesize >= ecc->size) {
6306 if (!ecc->strength) {
6307 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
6308 ret = -EINVAL;
6309 goto err_nand_manuf_cleanup;
6311 break;
6313 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
6314 ecc->size, mtd->writesize);
6315 ecc->mode = NAND_ECC_SOFT;
6316 ecc->algo = NAND_ECC_HAMMING;
6318 case NAND_ECC_SOFT:
6319 ret = nand_set_ecc_soft_ops(mtd);
6320 if (ret) {
6321 ret = -EINVAL;
6322 goto err_nand_manuf_cleanup;
6324 break;
6326 case NAND_ECC_ON_DIE:
6327 if (!ecc->read_page || !ecc->write_page) {
6328 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
6329 ret = -EINVAL;
6330 goto err_nand_manuf_cleanup;
6332 if (!ecc->read_oob)
6333 ecc->read_oob = nand_read_oob_std;
6334 if (!ecc->write_oob)
6335 ecc->write_oob = nand_write_oob_std;
6336 break;
6338 case NAND_ECC_NONE:
6339 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
6340 ecc->read_page = nand_read_page_raw;
6341 ecc->write_page = nand_write_page_raw;
6342 ecc->read_oob = nand_read_oob_std;
6343 ecc->read_page_raw = nand_read_page_raw;
6344 ecc->write_page_raw = nand_write_page_raw;
6345 ecc->write_oob = nand_write_oob_std;
6346 ecc->size = mtd->writesize;
6347 ecc->bytes = 0;
6348 ecc->strength = 0;
6349 break;
6351 default:
6352 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
6353 ret = -EINVAL;
6354 goto err_nand_manuf_cleanup;
6357 if (ecc->correct || ecc->calculate) {
6358 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6359 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6360 if (!ecc->calc_buf || !ecc->code_buf) {
6361 ret = -ENOMEM;
6362 goto err_nand_manuf_cleanup;
6366 /* For many systems, the standard OOB write also works for raw */
6367 if (!ecc->read_oob_raw)
6368 ecc->read_oob_raw = ecc->read_oob;
6369 if (!ecc->write_oob_raw)
6370 ecc->write_oob_raw = ecc->write_oob;
6372 /* propagate ecc info to mtd_info */
6373 mtd->ecc_strength = ecc->strength;
6374 mtd->ecc_step_size = ecc->size;
6377 * Set the number of read / write steps for one page depending on ECC
6378 * mode.
6380 ecc->steps = mtd->writesize / ecc->size;
6381 if (ecc->steps * ecc->size != mtd->writesize) {
6382 WARN(1, "Invalid ECC parameters\n");
6383 ret = -EINVAL;
6384 goto err_nand_manuf_cleanup;
6386 ecc->total = ecc->steps * ecc->bytes;
6387 if (ecc->total > mtd->oobsize) {
6388 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
6389 ret = -EINVAL;
6390 goto err_nand_manuf_cleanup;
6394 * The number of bytes available for a client to place data into
6395 * the out of band area.
6397 ret = mtd_ooblayout_count_freebytes(mtd);
6398 if (ret < 0)
6399 ret = 0;
6401 mtd->oobavail = ret;
6403 /* ECC sanity check: warn if it's too weak */
6404 if (!nand_ecc_strength_good(mtd))
6405 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
6406 mtd->name);
6408 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
6409 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
6410 switch (ecc->steps) {
6411 case 2:
6412 mtd->subpage_sft = 1;
6413 break;
6414 case 4:
6415 case 8:
6416 case 16:
6417 mtd->subpage_sft = 2;
6418 break;
6421 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
6423 /* Initialize state */
6424 chip->state = FL_READY;
6426 /* Invalidate the pagebuffer reference */
6427 chip->pagebuf = -1;
6429 /* Large page NAND with SOFT_ECC should support subpage reads */
6430 switch (ecc->mode) {
6431 case NAND_ECC_SOFT:
6432 if (chip->page_shift > 9)
6433 chip->options |= NAND_SUBPAGE_READ;
6434 break;
6436 default:
6437 break;
6440 /* Fill in remaining MTD driver data */
6441 mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
6442 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
6443 MTD_CAP_NANDFLASH;
6444 mtd->_erase = nand_erase;
6445 mtd->_point = NULL;
6446 mtd->_unpoint = NULL;
6447 mtd->_panic_write = panic_nand_write;
6448 mtd->_read_oob = nand_read_oob;
6449 mtd->_write_oob = nand_write_oob;
6450 mtd->_sync = nand_sync;
6451 mtd->_lock = NULL;
6452 mtd->_unlock = NULL;
6453 mtd->_suspend = nand_suspend;
6454 mtd->_resume = nand_resume;
6455 mtd->_reboot = nand_shutdown;
6456 mtd->_block_isreserved = nand_block_isreserved;
6457 mtd->_block_isbad = nand_block_isbad;
6458 mtd->_block_markbad = nand_block_markbad;
6459 mtd->_max_bad_blocks = nand_max_bad_blocks;
6460 mtd->writebufsize = mtd->writesize;
6463 * Initialize bitflip_threshold to its default prior scan_bbt() call.
6464 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
6465 * properly set.
6467 if (!mtd->bitflip_threshold)
6468 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
6470 /* Initialize the ->data_interface field. */
6471 ret = nand_init_data_interface(chip);
6472 if (ret)
6473 goto err_nand_manuf_cleanup;
6475 /* Enter fastest possible mode on all dies. */
6476 for (i = 0; i < chip->numchips; i++) {
6477 chip->select_chip(mtd, i);
6478 ret = nand_setup_data_interface(chip, i);
6479 chip->select_chip(mtd, -1);
6481 if (ret)
6482 goto err_nand_manuf_cleanup;
6485 /* Check, if we should skip the bad block table scan */
6486 if (chip->options & NAND_SKIP_BBTSCAN)
6487 return 0;
6489 /* Build bad block table */
6490 ret = chip->scan_bbt(mtd);
6491 if (ret)
6492 goto err_nand_manuf_cleanup;
6494 return 0;
6497 err_nand_manuf_cleanup:
6498 nand_manufacturer_cleanup(chip);
6500 err_free_buf:
6501 kfree(chip->data_buf);
6502 kfree(ecc->code_buf);
6503 kfree(ecc->calc_buf);
6505 return ret;
6507 EXPORT_SYMBOL(nand_scan_tail);
6510 * is_module_text_address() isn't exported, and it's mostly a pointless
6511 * test if this is a module _anyway_ -- they'd have to try _really_ hard
6512 * to call us from in-kernel code if the core NAND support is modular.
6514 #ifdef MODULE
6515 #define caller_is_module() (1)
6516 #else
6517 #define caller_is_module() \
6518 is_module_text_address((unsigned long)__builtin_return_address(0))
6519 #endif
6522 * nand_scan - [NAND Interface] Scan for the NAND device
6523 * @mtd: MTD device structure
6524 * @maxchips: number of chips to scan for
6526 * This fills out all the uninitialized function pointers with the defaults.
6527 * The flash ID is read and the mtd/chip structures are filled with the
6528 * appropriate values.
6530 int nand_scan(struct mtd_info *mtd, int maxchips)
6532 int ret;
6534 ret = nand_scan_ident(mtd, maxchips, NULL);
6535 if (!ret)
6536 ret = nand_scan_tail(mtd);
6537 return ret;
6539 EXPORT_SYMBOL(nand_scan);
6542 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
6543 * @chip: NAND chip object
6545 void nand_cleanup(struct nand_chip *chip)
6547 if (chip->ecc.mode == NAND_ECC_SOFT &&
6548 chip->ecc.algo == NAND_ECC_BCH)
6549 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
6551 /* Free bad block table memory */
6552 kfree(chip->bbt);
6553 kfree(chip->data_buf);
6554 kfree(chip->ecc.code_buf);
6555 kfree(chip->ecc.calc_buf);
6557 /* Free bad block descriptor memory */
6558 if (chip->badblock_pattern && chip->badblock_pattern->options
6559 & NAND_BBT_DYNAMICSTRUCT)
6560 kfree(chip->badblock_pattern);
6562 /* Free manufacturer priv data. */
6563 nand_manufacturer_cleanup(chip);
6565 EXPORT_SYMBOL_GPL(nand_cleanup);
6568 * nand_release - [NAND Interface] Unregister the MTD device and free resources
6569 * held by the NAND device
6570 * @mtd: MTD device structure
6572 void nand_release(struct mtd_info *mtd)
6574 mtd_device_unregister(mtd);
6575 nand_cleanup(mtd_to_nand(mtd));
6577 EXPORT_SYMBOL_GPL(nand_release);
6579 MODULE_LICENSE("GPL");
6580 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
6581 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
6582 MODULE_DESCRIPTION("Generic NAND flash driver code");