3 * This is the generic MTD driver for NAND flash devices. It should be
4 * capable of working with almost all NAND chips currently available.
6 * Additional technical information is available on
7 * http://www.linux-mtd.infradead.org/doc/nand.html
9 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
13 * David Woodhouse for adding multichip support
15 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16 * rework for 2K page size chips
19 * Enable cached programming for 2k page size chips
20 * Check, if mtd->ecctype should be set to MTD_ECC_HW
21 * if we have HW ECC support.
22 * BBT table is not serialized, has to be fixed
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
39 #include <linux/nmi.h>
40 #include <linux/types.h>
41 #include <linux/mtd/mtd.h>
42 #include <linux/mtd/rawnand.h>
43 #include <linux/mtd/nand_ecc.h>
44 #include <linux/mtd/nand_bch.h>
45 #include <linux/interrupt.h>
46 #include <linux/bitops.h>
48 #include <linux/mtd/partitions.h>
51 static int nand_get_device(struct mtd_info
*mtd
, int new_state
);
53 static int nand_do_write_oob(struct mtd_info
*mtd
, loff_t to
,
54 struct mtd_oob_ops
*ops
);
56 /* Define default oob placement schemes for large and small page devices */
57 static int nand_ooblayout_ecc_sp(struct mtd_info
*mtd
, int section
,
58 struct mtd_oob_region
*oobregion
)
60 struct nand_chip
*chip
= mtd_to_nand(mtd
);
61 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
67 oobregion
->offset
= 0;
68 if (mtd
->oobsize
== 16)
69 oobregion
->length
= 4;
71 oobregion
->length
= 3;
73 if (mtd
->oobsize
== 8)
76 oobregion
->offset
= 6;
77 oobregion
->length
= ecc
->total
- 4;
83 static int nand_ooblayout_free_sp(struct mtd_info
*mtd
, int section
,
84 struct mtd_oob_region
*oobregion
)
89 if (mtd
->oobsize
== 16) {
93 oobregion
->length
= 8;
94 oobregion
->offset
= 8;
96 oobregion
->length
= 2;
98 oobregion
->offset
= 3;
100 oobregion
->offset
= 6;
106 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops
= {
107 .ecc
= nand_ooblayout_ecc_sp
,
108 .free
= nand_ooblayout_free_sp
,
110 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops
);
112 static int nand_ooblayout_ecc_lp(struct mtd_info
*mtd
, int section
,
113 struct mtd_oob_region
*oobregion
)
115 struct nand_chip
*chip
= mtd_to_nand(mtd
);
116 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
118 if (section
|| !ecc
->total
)
121 oobregion
->length
= ecc
->total
;
122 oobregion
->offset
= mtd
->oobsize
- oobregion
->length
;
127 static int nand_ooblayout_free_lp(struct mtd_info
*mtd
, int section
,
128 struct mtd_oob_region
*oobregion
)
130 struct nand_chip
*chip
= mtd_to_nand(mtd
);
131 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
136 oobregion
->length
= mtd
->oobsize
- ecc
->total
- 2;
137 oobregion
->offset
= 2;
142 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops
= {
143 .ecc
= nand_ooblayout_ecc_lp
,
144 .free
= nand_ooblayout_free_lp
,
146 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops
);
149 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
150 * are placed at a fixed offset.
152 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info
*mtd
, int section
,
153 struct mtd_oob_region
*oobregion
)
155 struct nand_chip
*chip
= mtd_to_nand(mtd
);
156 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
161 switch (mtd
->oobsize
) {
163 oobregion
->offset
= 40;
166 oobregion
->offset
= 80;
172 oobregion
->length
= ecc
->total
;
173 if (oobregion
->offset
+ oobregion
->length
> mtd
->oobsize
)
179 static int nand_ooblayout_free_lp_hamming(struct mtd_info
*mtd
, int section
,
180 struct mtd_oob_region
*oobregion
)
182 struct nand_chip
*chip
= mtd_to_nand(mtd
);
183 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
186 if (section
< 0 || section
> 1)
189 switch (mtd
->oobsize
) {
201 oobregion
->offset
= 2;
202 oobregion
->length
= ecc_offset
- 2;
204 oobregion
->offset
= ecc_offset
+ ecc
->total
;
205 oobregion
->length
= mtd
->oobsize
- oobregion
->offset
;
211 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops
= {
212 .ecc
= nand_ooblayout_ecc_lp_hamming
,
213 .free
= nand_ooblayout_free_lp_hamming
,
216 static int check_offs_len(struct mtd_info
*mtd
,
217 loff_t ofs
, uint64_t len
)
219 struct nand_chip
*chip
= mtd_to_nand(mtd
);
222 /* Start address must align on block boundary */
223 if (ofs
& ((1ULL << chip
->phys_erase_shift
) - 1)) {
224 pr_debug("%s: unaligned address\n", __func__
);
228 /* Length must align on block boundary */
229 if (len
& ((1ULL << chip
->phys_erase_shift
) - 1)) {
230 pr_debug("%s: length not block aligned\n", __func__
);
238 * nand_release_device - [GENERIC] release chip
239 * @mtd: MTD device structure
241 * Release chip lock and wake up anyone waiting on the device.
243 static void nand_release_device(struct mtd_info
*mtd
)
245 struct nand_chip
*chip
= mtd_to_nand(mtd
);
247 /* Release the controller and the chip */
248 spin_lock(&chip
->controller
->lock
);
249 chip
->controller
->active
= NULL
;
250 chip
->state
= FL_READY
;
251 wake_up(&chip
->controller
->wq
);
252 spin_unlock(&chip
->controller
->lock
);
256 * nand_read_byte - [DEFAULT] read one byte from the chip
257 * @mtd: MTD device structure
259 * Default read function for 8bit buswidth
261 static uint8_t nand_read_byte(struct mtd_info
*mtd
)
263 struct nand_chip
*chip
= mtd_to_nand(mtd
);
264 return readb(chip
->IO_ADDR_R
);
268 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
269 * @mtd: MTD device structure
271 * Default read function for 16bit buswidth with endianness conversion.
274 static uint8_t nand_read_byte16(struct mtd_info
*mtd
)
276 struct nand_chip
*chip
= mtd_to_nand(mtd
);
277 return (uint8_t) cpu_to_le16(readw(chip
->IO_ADDR_R
));
281 * nand_read_word - [DEFAULT] read one word from the chip
282 * @mtd: MTD device structure
284 * Default read function for 16bit buswidth without endianness conversion.
286 static u16
nand_read_word(struct mtd_info
*mtd
)
288 struct nand_chip
*chip
= mtd_to_nand(mtd
);
289 return readw(chip
->IO_ADDR_R
);
293 * nand_select_chip - [DEFAULT] control CE line
294 * @mtd: MTD device structure
295 * @chipnr: chipnumber to select, -1 for deselect
297 * Default select function for 1 chip devices.
299 static void nand_select_chip(struct mtd_info
*mtd
, int chipnr
)
301 struct nand_chip
*chip
= mtd_to_nand(mtd
);
305 chip
->cmd_ctrl(mtd
, NAND_CMD_NONE
, 0 | NAND_CTRL_CHANGE
);
316 * nand_write_byte - [DEFAULT] write single byte to chip
317 * @mtd: MTD device structure
318 * @byte: value to write
320 * Default function to write a byte to I/O[7:0]
322 static void nand_write_byte(struct mtd_info
*mtd
, uint8_t byte
)
324 struct nand_chip
*chip
= mtd_to_nand(mtd
);
326 chip
->write_buf(mtd
, &byte
, 1);
330 * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
331 * @mtd: MTD device structure
332 * @byte: value to write
334 * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
336 static void nand_write_byte16(struct mtd_info
*mtd
, uint8_t byte
)
338 struct nand_chip
*chip
= mtd_to_nand(mtd
);
339 uint16_t word
= byte
;
342 * It's not entirely clear what should happen to I/O[15:8] when writing
343 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
345 * When the host supports a 16-bit bus width, only data is
346 * transferred at the 16-bit width. All address and command line
347 * transfers shall use only the lower 8-bits of the data bus. During
348 * command transfers, the host may place any value on the upper
349 * 8-bits of the data bus. During address transfers, the host shall
350 * set the upper 8-bits of the data bus to 00h.
352 * One user of the write_byte callback is nand_set_features. The
353 * four parameters are specified to be written to I/O[7:0], but this is
354 * neither an address nor a command transfer. Let's assume a 0 on the
355 * upper I/O lines is OK.
357 chip
->write_buf(mtd
, (uint8_t *)&word
, 2);
361 * nand_write_buf - [DEFAULT] write buffer to chip
362 * @mtd: MTD device structure
364 * @len: number of bytes to write
366 * Default write function for 8bit buswidth.
368 static void nand_write_buf(struct mtd_info
*mtd
, const uint8_t *buf
, int len
)
370 struct nand_chip
*chip
= mtd_to_nand(mtd
);
372 iowrite8_rep(chip
->IO_ADDR_W
, buf
, len
);
376 * nand_read_buf - [DEFAULT] read chip data into buffer
377 * @mtd: MTD device structure
378 * @buf: buffer to store date
379 * @len: number of bytes to read
381 * Default read function for 8bit buswidth.
383 static void nand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
385 struct nand_chip
*chip
= mtd_to_nand(mtd
);
387 ioread8_rep(chip
->IO_ADDR_R
, buf
, len
);
391 * nand_write_buf16 - [DEFAULT] write buffer to chip
392 * @mtd: MTD device structure
394 * @len: number of bytes to write
396 * Default write function for 16bit buswidth.
398 static void nand_write_buf16(struct mtd_info
*mtd
, const uint8_t *buf
, int len
)
400 struct nand_chip
*chip
= mtd_to_nand(mtd
);
401 u16
*p
= (u16
*) buf
;
403 iowrite16_rep(chip
->IO_ADDR_W
, p
, len
>> 1);
407 * nand_read_buf16 - [DEFAULT] read chip data into buffer
408 * @mtd: MTD device structure
409 * @buf: buffer to store date
410 * @len: number of bytes to read
412 * Default read function for 16bit buswidth.
414 static void nand_read_buf16(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
416 struct nand_chip
*chip
= mtd_to_nand(mtd
);
417 u16
*p
= (u16
*) buf
;
419 ioread16_rep(chip
->IO_ADDR_R
, p
, len
>> 1);
423 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
424 * @mtd: MTD device structure
425 * @ofs: offset from device start
427 * Check, if the block is bad.
429 static int nand_block_bad(struct mtd_info
*mtd
, loff_t ofs
)
431 int page
, page_end
, res
;
432 struct nand_chip
*chip
= mtd_to_nand(mtd
);
435 if (chip
->bbt_options
& NAND_BBT_SCANLASTPAGE
)
436 ofs
+= mtd
->erasesize
- mtd
->writesize
;
438 page
= (int)(ofs
>> chip
->page_shift
) & chip
->pagemask
;
439 page_end
= page
+ (chip
->bbt_options
& NAND_BBT_SCAN2NDPAGE
? 2 : 1);
441 for (; page
< page_end
; page
++) {
442 res
= chip
->ecc
.read_oob(mtd
, chip
, page
);
446 bad
= chip
->oob_poi
[chip
->badblockpos
];
448 if (likely(chip
->badblockbits
== 8))
451 res
= hweight8(bad
) < chip
->badblockbits
;
460 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
461 * @mtd: MTD device structure
462 * @ofs: offset from device start
464 * This is the default implementation, which can be overridden by a hardware
465 * specific driver. It provides the details for writing a bad block marker to a
468 static int nand_default_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
470 struct nand_chip
*chip
= mtd_to_nand(mtd
);
471 struct mtd_oob_ops ops
;
472 uint8_t buf
[2] = { 0, 0 };
473 int ret
= 0, res
, i
= 0;
475 memset(&ops
, 0, sizeof(ops
));
477 ops
.ooboffs
= chip
->badblockpos
;
478 if (chip
->options
& NAND_BUSWIDTH_16
) {
479 ops
.ooboffs
&= ~0x01;
480 ops
.len
= ops
.ooblen
= 2;
482 ops
.len
= ops
.ooblen
= 1;
484 ops
.mode
= MTD_OPS_PLACE_OOB
;
486 /* Write to first/last page(s) if necessary */
487 if (chip
->bbt_options
& NAND_BBT_SCANLASTPAGE
)
488 ofs
+= mtd
->erasesize
- mtd
->writesize
;
490 res
= nand_do_write_oob(mtd
, ofs
, &ops
);
495 ofs
+= mtd
->writesize
;
496 } while ((chip
->bbt_options
& NAND_BBT_SCAN2NDPAGE
) && i
< 2);
502 * nand_block_markbad_lowlevel - mark a block bad
503 * @mtd: MTD device structure
504 * @ofs: offset from device start
506 * This function performs the generic NAND bad block marking steps (i.e., bad
507 * block table(s) and/or marker(s)). We only allow the hardware driver to
508 * specify how to write bad block markers to OOB (chip->block_markbad).
510 * We try operations in the following order:
512 * (1) erase the affected block, to allow OOB marker to be written cleanly
513 * (2) write bad block marker to OOB area of affected block (unless flag
514 * NAND_BBT_NO_OOB_BBM is present)
517 * Note that we retain the first error encountered in (2) or (3), finish the
518 * procedures, and dump the error in the end.
520 static int nand_block_markbad_lowlevel(struct mtd_info
*mtd
, loff_t ofs
)
522 struct nand_chip
*chip
= mtd_to_nand(mtd
);
525 if (!(chip
->bbt_options
& NAND_BBT_NO_OOB_BBM
)) {
526 struct erase_info einfo
;
528 /* Attempt erase before marking OOB */
529 memset(&einfo
, 0, sizeof(einfo
));
531 einfo
.len
= 1ULL << chip
->phys_erase_shift
;
532 nand_erase_nand(mtd
, &einfo
, 0);
534 /* Write bad block marker to OOB */
535 nand_get_device(mtd
, FL_WRITING
);
536 ret
= chip
->block_markbad(mtd
, ofs
);
537 nand_release_device(mtd
);
540 /* Mark block bad in BBT */
542 res
= nand_markbad_bbt(mtd
, ofs
);
548 mtd
->ecc_stats
.badblocks
++;
554 * nand_check_wp - [GENERIC] check if the chip is write protected
555 * @mtd: MTD device structure
557 * Check, if the device is write protected. The function expects, that the
558 * device is already selected.
560 static int nand_check_wp(struct mtd_info
*mtd
)
562 struct nand_chip
*chip
= mtd_to_nand(mtd
);
566 /* Broken xD cards report WP despite being writable */
567 if (chip
->options
& NAND_BROKEN_XD
)
570 /* Check the WP bit */
571 ret
= nand_status_op(chip
, &status
);
575 return status
& NAND_STATUS_WP
? 0 : 1;
579 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
580 * @mtd: MTD device structure
581 * @ofs: offset from device start
583 * Check if the block is marked as reserved.
585 static int nand_block_isreserved(struct mtd_info
*mtd
, loff_t ofs
)
587 struct nand_chip
*chip
= mtd_to_nand(mtd
);
591 /* Return info from the table */
592 return nand_isreserved_bbt(mtd
, ofs
);
596 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
597 * @mtd: MTD device structure
598 * @ofs: offset from device start
599 * @allowbbt: 1, if its allowed to access the bbt area
601 * Check, if the block is bad. Either by reading the bad block table or
602 * calling of the scan function.
604 static int nand_block_checkbad(struct mtd_info
*mtd
, loff_t ofs
, int allowbbt
)
606 struct nand_chip
*chip
= mtd_to_nand(mtd
);
609 return chip
->block_bad(mtd
, ofs
);
611 /* Return info from the table */
612 return nand_isbad_bbt(mtd
, ofs
, allowbbt
);
616 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
617 * @mtd: MTD device structure
620 * Helper function for nand_wait_ready used when needing to wait in interrupt
623 static void panic_nand_wait_ready(struct mtd_info
*mtd
, unsigned long timeo
)
625 struct nand_chip
*chip
= mtd_to_nand(mtd
);
628 /* Wait for the device to get ready */
629 for (i
= 0; i
< timeo
; i
++) {
630 if (chip
->dev_ready(mtd
))
632 touch_softlockup_watchdog();
638 * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
639 * @mtd: MTD device structure
641 * Wait for the ready pin after a command, and warn if a timeout occurs.
643 void nand_wait_ready(struct mtd_info
*mtd
)
645 struct nand_chip
*chip
= mtd_to_nand(mtd
);
646 unsigned long timeo
= 400;
648 if (in_interrupt() || oops_in_progress
)
649 return panic_nand_wait_ready(mtd
, timeo
);
651 /* Wait until command is processed or timeout occurs */
652 timeo
= jiffies
+ msecs_to_jiffies(timeo
);
654 if (chip
->dev_ready(mtd
))
657 } while (time_before(jiffies
, timeo
));
659 if (!chip
->dev_ready(mtd
))
660 pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
662 EXPORT_SYMBOL_GPL(nand_wait_ready
);
665 * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
666 * @mtd: MTD device structure
667 * @timeo: Timeout in ms
669 * Wait for status ready (i.e. command done) or timeout.
671 static void nand_wait_status_ready(struct mtd_info
*mtd
, unsigned long timeo
)
673 register struct nand_chip
*chip
= mtd_to_nand(mtd
);
676 timeo
= jiffies
+ msecs_to_jiffies(timeo
);
680 ret
= nand_read_data_op(chip
, &status
, sizeof(status
), true);
684 if (status
& NAND_STATUS_READY
)
686 touch_softlockup_watchdog();
687 } while (time_before(jiffies
, timeo
));
691 * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
692 * @chip: NAND chip structure
693 * @timeout_ms: Timeout in ms
695 * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
696 * If that does not happen whitin the specified timeout, -ETIMEDOUT is
699 * This helper is intended to be used when the controller does not have access
700 * to the NAND R/B pin.
702 * Be aware that calling this helper from an ->exec_op() implementation means
703 * ->exec_op() must be re-entrant.
705 * Return 0 if the NAND chip is ready, a negative error otherwise.
707 int nand_soft_waitrdy(struct nand_chip
*chip
, unsigned long timeout_ms
)
709 const struct nand_sdr_timings
*timings
;
716 /* Wait tWB before polling the STATUS reg. */
717 timings
= nand_get_sdr_timings(&chip
->data_interface
);
718 ndelay(PSEC_TO_NSEC(timings
->tWB_max
));
720 ret
= nand_status_op(chip
, NULL
);
724 timeout_ms
= jiffies
+ msecs_to_jiffies(timeout_ms
);
726 ret
= nand_read_data_op(chip
, &status
, sizeof(status
), true);
730 if (status
& NAND_STATUS_READY
)
734 * Typical lowest execution time for a tR on most NANDs is 10us,
735 * use this as polling delay before doing something smarter (ie.
736 * deriving a delay from the timeout value, timeout_ms/ratio).
739 } while (time_before(jiffies
, timeout_ms
));
742 * We have to exit READ_STATUS mode in order to read real data on the
743 * bus in case the WAITRDY instruction is preceding a DATA_IN
746 nand_exit_status_op(chip
);
751 return status
& NAND_STATUS_READY
? 0 : -ETIMEDOUT
;
753 EXPORT_SYMBOL_GPL(nand_soft_waitrdy
);
756 * nand_command - [DEFAULT] Send command to NAND device
757 * @mtd: MTD device structure
758 * @command: the command to be sent
759 * @column: the column address for this command, -1 if none
760 * @page_addr: the page address for this command, -1 if none
762 * Send command to NAND device. This function is used for small page devices
763 * (512 Bytes per page).
765 static void nand_command(struct mtd_info
*mtd
, unsigned int command
,
766 int column
, int page_addr
)
768 register struct nand_chip
*chip
= mtd_to_nand(mtd
);
769 int ctrl
= NAND_CTRL_CLE
| NAND_CTRL_CHANGE
;
771 /* Write out the command to the device */
772 if (command
== NAND_CMD_SEQIN
) {
775 if (column
>= mtd
->writesize
) {
777 column
-= mtd
->writesize
;
778 readcmd
= NAND_CMD_READOOB
;
779 } else if (column
< 256) {
780 /* First 256 bytes --> READ0 */
781 readcmd
= NAND_CMD_READ0
;
784 readcmd
= NAND_CMD_READ1
;
786 chip
->cmd_ctrl(mtd
, readcmd
, ctrl
);
787 ctrl
&= ~NAND_CTRL_CHANGE
;
789 if (command
!= NAND_CMD_NONE
)
790 chip
->cmd_ctrl(mtd
, command
, ctrl
);
792 /* Address cycle, when necessary */
793 ctrl
= NAND_CTRL_ALE
| NAND_CTRL_CHANGE
;
794 /* Serially input address */
796 /* Adjust columns for 16 bit buswidth */
797 if (chip
->options
& NAND_BUSWIDTH_16
&&
798 !nand_opcode_8bits(command
))
800 chip
->cmd_ctrl(mtd
, column
, ctrl
);
801 ctrl
&= ~NAND_CTRL_CHANGE
;
803 if (page_addr
!= -1) {
804 chip
->cmd_ctrl(mtd
, page_addr
, ctrl
);
805 ctrl
&= ~NAND_CTRL_CHANGE
;
806 chip
->cmd_ctrl(mtd
, page_addr
>> 8, ctrl
);
807 if (chip
->options
& NAND_ROW_ADDR_3
)
808 chip
->cmd_ctrl(mtd
, page_addr
>> 16, ctrl
);
810 chip
->cmd_ctrl(mtd
, NAND_CMD_NONE
, NAND_NCE
| NAND_CTRL_CHANGE
);
813 * Program and erase have their own busy handlers status and sequential
819 case NAND_CMD_PAGEPROG
:
820 case NAND_CMD_ERASE1
:
821 case NAND_CMD_ERASE2
:
823 case NAND_CMD_STATUS
:
824 case NAND_CMD_READID
:
825 case NAND_CMD_SET_FEATURES
:
831 udelay(chip
->chip_delay
);
832 chip
->cmd_ctrl(mtd
, NAND_CMD_STATUS
,
833 NAND_CTRL_CLE
| NAND_CTRL_CHANGE
);
835 NAND_CMD_NONE
, NAND_NCE
| NAND_CTRL_CHANGE
);
836 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
837 nand_wait_status_ready(mtd
, 250);
840 /* This applies to read commands */
843 * READ0 is sometimes used to exit GET STATUS mode. When this
844 * is the case no address cycles are requested, and we can use
845 * this information to detect that we should not wait for the
846 * device to be ready.
848 if (column
== -1 && page_addr
== -1)
853 * If we don't have access to the busy pin, we apply the given
856 if (!chip
->dev_ready
) {
857 udelay(chip
->chip_delay
);
862 * Apply this short delay always to ensure that we do wait tWB in
863 * any case on any machine.
867 nand_wait_ready(mtd
);
870 static void nand_ccs_delay(struct nand_chip
*chip
)
873 * The controller already takes care of waiting for tCCS when the RNDIN
874 * or RNDOUT command is sent, return directly.
876 if (!(chip
->options
& NAND_WAIT_TCCS
))
880 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
881 * (which should be safe for all NANDs).
883 if (chip
->setup_data_interface
)
884 ndelay(chip
->data_interface
.timings
.sdr
.tCCS_min
/ 1000);
890 * nand_command_lp - [DEFAULT] Send command to NAND large page device
891 * @mtd: MTD device structure
892 * @command: the command to be sent
893 * @column: the column address for this command, -1 if none
894 * @page_addr: the page address for this command, -1 if none
896 * Send command to NAND device. This is the version for the new large page
897 * devices. We don't have the separate regions as we have in the small page
898 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
900 static void nand_command_lp(struct mtd_info
*mtd
, unsigned int command
,
901 int column
, int page_addr
)
903 register struct nand_chip
*chip
= mtd_to_nand(mtd
);
905 /* Emulate NAND_CMD_READOOB */
906 if (command
== NAND_CMD_READOOB
) {
907 column
+= mtd
->writesize
;
908 command
= NAND_CMD_READ0
;
911 /* Command latch cycle */
912 if (command
!= NAND_CMD_NONE
)
913 chip
->cmd_ctrl(mtd
, command
,
914 NAND_NCE
| NAND_CLE
| NAND_CTRL_CHANGE
);
916 if (column
!= -1 || page_addr
!= -1) {
917 int ctrl
= NAND_CTRL_CHANGE
| NAND_NCE
| NAND_ALE
;
919 /* Serially input address */
921 /* Adjust columns for 16 bit buswidth */
922 if (chip
->options
& NAND_BUSWIDTH_16
&&
923 !nand_opcode_8bits(command
))
925 chip
->cmd_ctrl(mtd
, column
, ctrl
);
926 ctrl
&= ~NAND_CTRL_CHANGE
;
928 /* Only output a single addr cycle for 8bits opcodes. */
929 if (!nand_opcode_8bits(command
))
930 chip
->cmd_ctrl(mtd
, column
>> 8, ctrl
);
932 if (page_addr
!= -1) {
933 chip
->cmd_ctrl(mtd
, page_addr
, ctrl
);
934 chip
->cmd_ctrl(mtd
, page_addr
>> 8,
935 NAND_NCE
| NAND_ALE
);
936 if (chip
->options
& NAND_ROW_ADDR_3
)
937 chip
->cmd_ctrl(mtd
, page_addr
>> 16,
938 NAND_NCE
| NAND_ALE
);
941 chip
->cmd_ctrl(mtd
, NAND_CMD_NONE
, NAND_NCE
| NAND_CTRL_CHANGE
);
944 * Program and erase have their own busy handlers status, sequential
945 * in and status need no delay.
950 case NAND_CMD_CACHEDPROG
:
951 case NAND_CMD_PAGEPROG
:
952 case NAND_CMD_ERASE1
:
953 case NAND_CMD_ERASE2
:
955 case NAND_CMD_STATUS
:
956 case NAND_CMD_READID
:
957 case NAND_CMD_SET_FEATURES
:
961 nand_ccs_delay(chip
);
967 udelay(chip
->chip_delay
);
968 chip
->cmd_ctrl(mtd
, NAND_CMD_STATUS
,
969 NAND_NCE
| NAND_CLE
| NAND_CTRL_CHANGE
);
970 chip
->cmd_ctrl(mtd
, NAND_CMD_NONE
,
971 NAND_NCE
| NAND_CTRL_CHANGE
);
972 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
973 nand_wait_status_ready(mtd
, 250);
976 case NAND_CMD_RNDOUT
:
977 /* No ready / busy check necessary */
978 chip
->cmd_ctrl(mtd
, NAND_CMD_RNDOUTSTART
,
979 NAND_NCE
| NAND_CLE
| NAND_CTRL_CHANGE
);
980 chip
->cmd_ctrl(mtd
, NAND_CMD_NONE
,
981 NAND_NCE
| NAND_CTRL_CHANGE
);
983 nand_ccs_delay(chip
);
988 * READ0 is sometimes used to exit GET STATUS mode. When this
989 * is the case no address cycles are requested, and we can use
990 * this information to detect that READSTART should not be
993 if (column
== -1 && page_addr
== -1)
996 chip
->cmd_ctrl(mtd
, NAND_CMD_READSTART
,
997 NAND_NCE
| NAND_CLE
| NAND_CTRL_CHANGE
);
998 chip
->cmd_ctrl(mtd
, NAND_CMD_NONE
,
999 NAND_NCE
| NAND_CTRL_CHANGE
);
1001 /* This applies to read commands */
1004 * If we don't have access to the busy pin, we apply the given
1007 if (!chip
->dev_ready
) {
1008 udelay(chip
->chip_delay
);
1014 * Apply this short delay always to ensure that we do wait tWB in
1015 * any case on any machine.
1019 nand_wait_ready(mtd
);
1023 * panic_nand_get_device - [GENERIC] Get chip for selected access
1024 * @chip: the nand chip descriptor
1025 * @mtd: MTD device structure
1026 * @new_state: the state which is requested
1028 * Used when in panic, no locks are taken.
1030 static void panic_nand_get_device(struct nand_chip
*chip
,
1031 struct mtd_info
*mtd
, int new_state
)
1033 /* Hardware controller shared among independent devices */
1034 chip
->controller
->active
= chip
;
1035 chip
->state
= new_state
;
1039 * nand_get_device - [GENERIC] Get chip for selected access
1040 * @mtd: MTD device structure
1041 * @new_state: the state which is requested
1043 * Get the device and lock it for exclusive access
1046 nand_get_device(struct mtd_info
*mtd
, int new_state
)
1048 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1049 spinlock_t
*lock
= &chip
->controller
->lock
;
1050 wait_queue_head_t
*wq
= &chip
->controller
->wq
;
1051 DECLARE_WAITQUEUE(wait
, current
);
1055 /* Hardware controller shared among independent devices */
1056 if (!chip
->controller
->active
)
1057 chip
->controller
->active
= chip
;
1059 if (chip
->controller
->active
== chip
&& chip
->state
== FL_READY
) {
1060 chip
->state
= new_state
;
1064 if (new_state
== FL_PM_SUSPENDED
) {
1065 if (chip
->controller
->active
->state
== FL_PM_SUSPENDED
) {
1066 chip
->state
= FL_PM_SUSPENDED
;
1071 set_current_state(TASK_UNINTERRUPTIBLE
);
1072 add_wait_queue(wq
, &wait
);
1075 remove_wait_queue(wq
, &wait
);
1080 * panic_nand_wait - [GENERIC] wait until the command is done
1081 * @mtd: MTD device structure
1082 * @chip: NAND chip structure
1085 * Wait for command done. This is a helper function for nand_wait used when
1086 * we are in interrupt context. May happen when in panic and trying to write
1087 * an oops through mtdoops.
1089 static void panic_nand_wait(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1090 unsigned long timeo
)
1093 for (i
= 0; i
< timeo
; i
++) {
1094 if (chip
->dev_ready
) {
1095 if (chip
->dev_ready(mtd
))
1101 ret
= nand_read_data_op(chip
, &status
, sizeof(status
),
1106 if (status
& NAND_STATUS_READY
)
1114 * nand_wait - [DEFAULT] wait until the command is done
1115 * @mtd: MTD device structure
1116 * @chip: NAND chip structure
1118 * Wait for command done. This applies to erase and program only.
1120 static int nand_wait(struct mtd_info
*mtd
, struct nand_chip
*chip
)
1123 unsigned long timeo
= 400;
1128 * Apply this short delay always to ensure that we do wait tWB in any
1129 * case on any machine.
1133 ret
= nand_status_op(chip
, NULL
);
1137 if (in_interrupt() || oops_in_progress
)
1138 panic_nand_wait(mtd
, chip
, timeo
);
1140 timeo
= jiffies
+ msecs_to_jiffies(timeo
);
1142 if (chip
->dev_ready
) {
1143 if (chip
->dev_ready(mtd
))
1146 ret
= nand_read_data_op(chip
, &status
,
1147 sizeof(status
), true);
1151 if (status
& NAND_STATUS_READY
)
1155 } while (time_before(jiffies
, timeo
));
1158 ret
= nand_read_data_op(chip
, &status
, sizeof(status
), true);
1162 /* This can happen if in case of timeout or buggy dev_ready */
1163 WARN_ON(!(status
& NAND_STATUS_READY
));
1167 static bool nand_supports_get_features(struct nand_chip
*chip
, int addr
)
1169 return (chip
->parameters
.supports_set_get_features
&&
1170 test_bit(addr
, chip
->parameters
.get_feature_list
));
1173 static bool nand_supports_set_features(struct nand_chip
*chip
, int addr
)
1175 return (chip
->parameters
.supports_set_get_features
&&
1176 test_bit(addr
, chip
->parameters
.set_feature_list
));
1180 * nand_get_features - wrapper to perform a GET_FEATURE
1181 * @chip: NAND chip info structure
1182 * @addr: feature address
1183 * @subfeature_param: the subfeature parameters, a four bytes array
1185 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
1186 * operation cannot be handled.
1188 int nand_get_features(struct nand_chip
*chip
, int addr
,
1189 u8
*subfeature_param
)
1191 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1193 if (!nand_supports_get_features(chip
, addr
))
1196 return chip
->get_features(mtd
, chip
, addr
, subfeature_param
);
1198 EXPORT_SYMBOL_GPL(nand_get_features
);
1201 * nand_set_features - wrapper to perform a SET_FEATURE
1202 * @chip: NAND chip info structure
1203 * @addr: feature address
1204 * @subfeature_param: the subfeature parameters, a four bytes array
1206 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
1207 * operation cannot be handled.
1209 int nand_set_features(struct nand_chip
*chip
, int addr
,
1210 u8
*subfeature_param
)
1212 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1214 if (!nand_supports_set_features(chip
, addr
))
1217 return chip
->set_features(mtd
, chip
, addr
, subfeature_param
);
1219 EXPORT_SYMBOL_GPL(nand_set_features
);
1222 * nand_reset_data_interface - Reset data interface and timings
1223 * @chip: The NAND chip
1224 * @chipnr: Internal die id
1226 * Reset the Data interface and timings to ONFI mode 0.
1228 * Returns 0 for success or negative error code otherwise.
1230 static int nand_reset_data_interface(struct nand_chip
*chip
, int chipnr
)
1232 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1235 if (!chip
->setup_data_interface
)
1239 * The ONFI specification says:
1241 * To transition from NV-DDR or NV-DDR2 to the SDR data
1242 * interface, the host shall use the Reset (FFh) command
1243 * using SDR timing mode 0. A device in any timing mode is
1244 * required to recognize Reset (FFh) command issued in SDR
1248 * Configure the data interface in SDR mode and set the
1249 * timings to timing mode 0.
1252 onfi_fill_data_interface(chip
, NAND_SDR_IFACE
, 0);
1253 ret
= chip
->setup_data_interface(mtd
, chipnr
, &chip
->data_interface
);
1255 pr_err("Failed to configure data interface to SDR timing mode 0\n");
1261 * nand_setup_data_interface - Setup the best data interface and timings
1262 * @chip: The NAND chip
1263 * @chipnr: Internal die id
1265 * Find and configure the best data interface and NAND timings supported by
1266 * the chip and the driver.
1267 * First tries to retrieve supported timing modes from ONFI information,
1268 * and if the NAND chip does not support ONFI, relies on the
1269 * ->onfi_timing_mode_default specified in the nand_ids table.
1271 * Returns 0 for success or negative error code otherwise.
1273 static int nand_setup_data_interface(struct nand_chip
*chip
, int chipnr
)
1275 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1276 u8 tmode_param
[ONFI_SUBFEATURE_PARAM_LEN
] = {
1277 chip
->onfi_timing_mode_default
,
1281 if (!chip
->setup_data_interface
)
1284 /* Change the mode on the chip side (if supported by the NAND chip) */
1285 if (nand_supports_set_features(chip
, ONFI_FEATURE_ADDR_TIMING_MODE
)) {
1286 chip
->select_chip(mtd
, chipnr
);
1287 ret
= nand_set_features(chip
, ONFI_FEATURE_ADDR_TIMING_MODE
,
1289 chip
->select_chip(mtd
, -1);
1294 /* Change the mode on the controller side */
1295 ret
= chip
->setup_data_interface(mtd
, chipnr
, &chip
->data_interface
);
1299 /* Check the mode has been accepted by the chip, if supported */
1300 if (!nand_supports_get_features(chip
, ONFI_FEATURE_ADDR_TIMING_MODE
))
1303 memset(tmode_param
, 0, ONFI_SUBFEATURE_PARAM_LEN
);
1304 chip
->select_chip(mtd
, chipnr
);
1305 ret
= nand_get_features(chip
, ONFI_FEATURE_ADDR_TIMING_MODE
,
1307 chip
->select_chip(mtd
, -1);
1309 goto err_reset_chip
;
1311 if (tmode_param
[0] != chip
->onfi_timing_mode_default
) {
1312 pr_warn("timing mode %d not acknowledged by the NAND chip\n",
1313 chip
->onfi_timing_mode_default
);
1314 goto err_reset_chip
;
1321 * Fallback to mode 0 if the chip explicitly did not ack the chosen
1324 nand_reset_data_interface(chip
, chipnr
);
1325 chip
->select_chip(mtd
, chipnr
);
1326 nand_reset_op(chip
);
1327 chip
->select_chip(mtd
, -1);
1333 * nand_init_data_interface - find the best data interface and timings
1334 * @chip: The NAND chip
1336 * Find the best data interface and NAND timings supported by the chip
1338 * First tries to retrieve supported timing modes from ONFI information,
1339 * and if the NAND chip does not support ONFI, relies on the
1340 * ->onfi_timing_mode_default specified in the nand_ids table. After this
1341 * function nand_chip->data_interface is initialized with the best timing mode
1344 * Returns 0 for success or negative error code otherwise.
1346 static int nand_init_data_interface(struct nand_chip
*chip
)
1348 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1349 int modes
, mode
, ret
;
1351 if (!chip
->setup_data_interface
)
1355 * First try to identify the best timings from ONFI parameters and
1356 * if the NAND does not support ONFI, fallback to the default ONFI
1359 modes
= onfi_get_async_timing_mode(chip
);
1360 if (modes
== ONFI_TIMING_MODE_UNKNOWN
) {
1361 if (!chip
->onfi_timing_mode_default
)
1364 modes
= GENMASK(chip
->onfi_timing_mode_default
, 0);
1368 for (mode
= fls(modes
) - 1; mode
>= 0; mode
--) {
1369 ret
= onfi_fill_data_interface(chip
, NAND_SDR_IFACE
, mode
);
1374 * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
1375 * controller supports the requested timings.
1377 ret
= chip
->setup_data_interface(mtd
,
1378 NAND_DATA_IFACE_CHECK_ONLY
,
1379 &chip
->data_interface
);
1381 chip
->onfi_timing_mode_default
= mode
;
1390 * nand_fill_column_cycles - fill the column cycles of an address
1391 * @chip: The NAND chip
1392 * @addrs: Array of address cycles to fill
1393 * @offset_in_page: The offset in the page
1395 * Fills the first or the first two bytes of the @addrs field depending
1396 * on the NAND bus width and the page size.
1398 * Returns the number of cycles needed to encode the column, or a negative
1399 * error code in case one of the arguments is invalid.
1401 static int nand_fill_column_cycles(struct nand_chip
*chip
, u8
*addrs
,
1402 unsigned int offset_in_page
)
1404 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1406 /* Make sure the offset is less than the actual page size. */
1407 if (offset_in_page
> mtd
->writesize
+ mtd
->oobsize
)
1411 * On small page NANDs, there's a dedicated command to access the OOB
1412 * area, and the column address is relative to the start of the OOB
1413 * area, not the start of the page. Asjust the address accordingly.
1415 if (mtd
->writesize
<= 512 && offset_in_page
>= mtd
->writesize
)
1416 offset_in_page
-= mtd
->writesize
;
1419 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
1420 * wide, then it must be divided by 2.
1422 if (chip
->options
& NAND_BUSWIDTH_16
) {
1423 if (WARN_ON(offset_in_page
% 2))
1426 offset_in_page
/= 2;
1429 addrs
[0] = offset_in_page
;
1432 * Small page NANDs use 1 cycle for the columns, while large page NANDs
1435 if (mtd
->writesize
<= 512)
1438 addrs
[1] = offset_in_page
>> 8;
1443 static int nand_sp_exec_read_page_op(struct nand_chip
*chip
, unsigned int page
,
1444 unsigned int offset_in_page
, void *buf
,
1447 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1448 const struct nand_sdr_timings
*sdr
=
1449 nand_get_sdr_timings(&chip
->data_interface
);
1451 struct nand_op_instr instrs
[] = {
1452 NAND_OP_CMD(NAND_CMD_READ0
, 0),
1453 NAND_OP_ADDR(3, addrs
, PSEC_TO_NSEC(sdr
->tWB_max
)),
1454 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr
->tR_max
),
1455 PSEC_TO_NSEC(sdr
->tRR_min
)),
1456 NAND_OP_DATA_IN(len
, buf
, 0),
1458 struct nand_operation op
= NAND_OPERATION(instrs
);
1461 /* Drop the DATA_IN instruction if len is set to 0. */
1465 if (offset_in_page
>= mtd
->writesize
)
1466 instrs
[0].ctx
.cmd
.opcode
= NAND_CMD_READOOB
;
1467 else if (offset_in_page
>= 256 &&
1468 !(chip
->options
& NAND_BUSWIDTH_16
))
1469 instrs
[0].ctx
.cmd
.opcode
= NAND_CMD_READ1
;
1471 ret
= nand_fill_column_cycles(chip
, addrs
, offset_in_page
);
1476 addrs
[2] = page
>> 8;
1478 if (chip
->options
& NAND_ROW_ADDR_3
) {
1479 addrs
[3] = page
>> 16;
1480 instrs
[1].ctx
.addr
.naddrs
++;
1483 return nand_exec_op(chip
, &op
);
1486 static int nand_lp_exec_read_page_op(struct nand_chip
*chip
, unsigned int page
,
1487 unsigned int offset_in_page
, void *buf
,
1490 const struct nand_sdr_timings
*sdr
=
1491 nand_get_sdr_timings(&chip
->data_interface
);
1493 struct nand_op_instr instrs
[] = {
1494 NAND_OP_CMD(NAND_CMD_READ0
, 0),
1495 NAND_OP_ADDR(4, addrs
, 0),
1496 NAND_OP_CMD(NAND_CMD_READSTART
, PSEC_TO_NSEC(sdr
->tWB_max
)),
1497 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr
->tR_max
),
1498 PSEC_TO_NSEC(sdr
->tRR_min
)),
1499 NAND_OP_DATA_IN(len
, buf
, 0),
1501 struct nand_operation op
= NAND_OPERATION(instrs
);
1504 /* Drop the DATA_IN instruction if len is set to 0. */
1508 ret
= nand_fill_column_cycles(chip
, addrs
, offset_in_page
);
1513 addrs
[3] = page
>> 8;
1515 if (chip
->options
& NAND_ROW_ADDR_3
) {
1516 addrs
[4] = page
>> 16;
1517 instrs
[1].ctx
.addr
.naddrs
++;
1520 return nand_exec_op(chip
, &op
);
1524 * nand_read_page_op - Do a READ PAGE operation
1525 * @chip: The NAND chip
1526 * @page: page to read
1527 * @offset_in_page: offset within the page
1528 * @buf: buffer used to store the data
1529 * @len: length of the buffer
1531 * This function issues a READ PAGE operation.
1532 * This function does not select/unselect the CS line.
1534 * Returns 0 on success, a negative error code otherwise.
1536 int nand_read_page_op(struct nand_chip
*chip
, unsigned int page
,
1537 unsigned int offset_in_page
, void *buf
, unsigned int len
)
1539 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1544 if (offset_in_page
+ len
> mtd
->writesize
+ mtd
->oobsize
)
1547 if (chip
->exec_op
) {
1548 if (mtd
->writesize
> 512)
1549 return nand_lp_exec_read_page_op(chip
, page
,
1550 offset_in_page
, buf
,
1553 return nand_sp_exec_read_page_op(chip
, page
, offset_in_page
,
1557 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, offset_in_page
, page
);
1559 chip
->read_buf(mtd
, buf
, len
);
1563 EXPORT_SYMBOL_GPL(nand_read_page_op
);
1566 * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1567 * @chip: The NAND chip
1568 * @page: parameter page to read
1569 * @buf: buffer used to store the data
1570 * @len: length of the buffer
1572 * This function issues a READ PARAMETER PAGE operation.
1573 * This function does not select/unselect the CS line.
1575 * Returns 0 on success, a negative error code otherwise.
1577 static int nand_read_param_page_op(struct nand_chip
*chip
, u8 page
, void *buf
,
1580 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1587 if (chip
->exec_op
) {
1588 const struct nand_sdr_timings
*sdr
=
1589 nand_get_sdr_timings(&chip
->data_interface
);
1590 struct nand_op_instr instrs
[] = {
1591 NAND_OP_CMD(NAND_CMD_PARAM
, 0),
1592 NAND_OP_ADDR(1, &page
, PSEC_TO_NSEC(sdr
->tWB_max
)),
1593 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr
->tR_max
),
1594 PSEC_TO_NSEC(sdr
->tRR_min
)),
1595 NAND_OP_8BIT_DATA_IN(len
, buf
, 0),
1597 struct nand_operation op
= NAND_OPERATION(instrs
);
1599 /* Drop the DATA_IN instruction if len is set to 0. */
1603 return nand_exec_op(chip
, &op
);
1606 chip
->cmdfunc(mtd
, NAND_CMD_PARAM
, page
, -1);
1607 for (i
= 0; i
< len
; i
++)
1608 p
[i
] = chip
->read_byte(mtd
);
1614 * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1615 * @chip: The NAND chip
1616 * @offset_in_page: offset within the page
1617 * @buf: buffer used to store the data
1618 * @len: length of the buffer
1619 * @force_8bit: force 8-bit bus access
1621 * This function issues a CHANGE READ COLUMN operation.
1622 * This function does not select/unselect the CS line.
1624 * Returns 0 on success, a negative error code otherwise.
1626 int nand_change_read_column_op(struct nand_chip
*chip
,
1627 unsigned int offset_in_page
, void *buf
,
1628 unsigned int len
, bool force_8bit
)
1630 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1635 if (offset_in_page
+ len
> mtd
->writesize
+ mtd
->oobsize
)
1638 /* Small page NANDs do not support column change. */
1639 if (mtd
->writesize
<= 512)
1642 if (chip
->exec_op
) {
1643 const struct nand_sdr_timings
*sdr
=
1644 nand_get_sdr_timings(&chip
->data_interface
);
1646 struct nand_op_instr instrs
[] = {
1647 NAND_OP_CMD(NAND_CMD_RNDOUT
, 0),
1648 NAND_OP_ADDR(2, addrs
, 0),
1649 NAND_OP_CMD(NAND_CMD_RNDOUTSTART
,
1650 PSEC_TO_NSEC(sdr
->tCCS_min
)),
1651 NAND_OP_DATA_IN(len
, buf
, 0),
1653 struct nand_operation op
= NAND_OPERATION(instrs
);
1656 ret
= nand_fill_column_cycles(chip
, addrs
, offset_in_page
);
1660 /* Drop the DATA_IN instruction if len is set to 0. */
1664 instrs
[3].ctx
.data
.force_8bit
= force_8bit
;
1666 return nand_exec_op(chip
, &op
);
1669 chip
->cmdfunc(mtd
, NAND_CMD_RNDOUT
, offset_in_page
, -1);
1671 chip
->read_buf(mtd
, buf
, len
);
1675 EXPORT_SYMBOL_GPL(nand_change_read_column_op
);
1678 * nand_read_oob_op - Do a READ OOB operation
1679 * @chip: The NAND chip
1680 * @page: page to read
1681 * @offset_in_oob: offset within the OOB area
1682 * @buf: buffer used to store the data
1683 * @len: length of the buffer
1685 * This function issues a READ OOB operation.
1686 * This function does not select/unselect the CS line.
1688 * Returns 0 on success, a negative error code otherwise.
1690 int nand_read_oob_op(struct nand_chip
*chip
, unsigned int page
,
1691 unsigned int offset_in_oob
, void *buf
, unsigned int len
)
1693 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1698 if (offset_in_oob
+ len
> mtd
->oobsize
)
1702 return nand_read_page_op(chip
, page
,
1703 mtd
->writesize
+ offset_in_oob
,
1706 chip
->cmdfunc(mtd
, NAND_CMD_READOOB
, offset_in_oob
, page
);
1708 chip
->read_buf(mtd
, buf
, len
);
1712 EXPORT_SYMBOL_GPL(nand_read_oob_op
);
1714 static int nand_exec_prog_page_op(struct nand_chip
*chip
, unsigned int page
,
1715 unsigned int offset_in_page
, const void *buf
,
1716 unsigned int len
, bool prog
)
1718 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1719 const struct nand_sdr_timings
*sdr
=
1720 nand_get_sdr_timings(&chip
->data_interface
);
1722 struct nand_op_instr instrs
[] = {
1724 * The first instruction will be dropped if we're dealing
1725 * with a large page NAND and adjusted if we're dealing
1726 * with a small page NAND and the page offset is > 255.
1728 NAND_OP_CMD(NAND_CMD_READ0
, 0),
1729 NAND_OP_CMD(NAND_CMD_SEQIN
, 0),
1730 NAND_OP_ADDR(0, addrs
, PSEC_TO_NSEC(sdr
->tADL_min
)),
1731 NAND_OP_DATA_OUT(len
, buf
, 0),
1732 NAND_OP_CMD(NAND_CMD_PAGEPROG
, PSEC_TO_NSEC(sdr
->tWB_max
)),
1733 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr
->tPROG_max
), 0),
1735 struct nand_operation op
= NAND_OPERATION(instrs
);
1736 int naddrs
= nand_fill_column_cycles(chip
, addrs
, offset_in_page
);
1743 addrs
[naddrs
++] = page
;
1744 addrs
[naddrs
++] = page
>> 8;
1745 if (chip
->options
& NAND_ROW_ADDR_3
)
1746 addrs
[naddrs
++] = page
>> 16;
1748 instrs
[2].ctx
.addr
.naddrs
= naddrs
;
1750 /* Drop the last two instructions if we're not programming the page. */
1753 /* Also drop the DATA_OUT instruction if empty. */
1758 if (mtd
->writesize
<= 512) {
1760 * Small pages need some more tweaking: we have to adjust the
1761 * first instruction depending on the page offset we're trying
1764 if (offset_in_page
>= mtd
->writesize
)
1765 instrs
[0].ctx
.cmd
.opcode
= NAND_CMD_READOOB
;
1766 else if (offset_in_page
>= 256 &&
1767 !(chip
->options
& NAND_BUSWIDTH_16
))
1768 instrs
[0].ctx
.cmd
.opcode
= NAND_CMD_READ1
;
1771 * Drop the first command if we're dealing with a large page
1778 ret
= nand_exec_op(chip
, &op
);
1782 ret
= nand_status_op(chip
, &status
);
1790 * nand_prog_page_begin_op - starts a PROG PAGE operation
1791 * @chip: The NAND chip
1792 * @page: page to write
1793 * @offset_in_page: offset within the page
1794 * @buf: buffer containing the data to write to the page
1795 * @len: length of the buffer
1797 * This function issues the first half of a PROG PAGE operation.
1798 * This function does not select/unselect the CS line.
1800 * Returns 0 on success, a negative error code otherwise.
1802 int nand_prog_page_begin_op(struct nand_chip
*chip
, unsigned int page
,
1803 unsigned int offset_in_page
, const void *buf
,
1806 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1811 if (offset_in_page
+ len
> mtd
->writesize
+ mtd
->oobsize
)
1815 return nand_exec_prog_page_op(chip
, page
, offset_in_page
, buf
,
1818 chip
->cmdfunc(mtd
, NAND_CMD_SEQIN
, offset_in_page
, page
);
1821 chip
->write_buf(mtd
, buf
, len
);
1825 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op
);
1828 * nand_prog_page_end_op - ends a PROG PAGE operation
1829 * @chip: The NAND chip
1831 * This function issues the second half of a PROG PAGE operation.
1832 * This function does not select/unselect the CS line.
1834 * Returns 0 on success, a negative error code otherwise.
1836 int nand_prog_page_end_op(struct nand_chip
*chip
)
1838 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1842 if (chip
->exec_op
) {
1843 const struct nand_sdr_timings
*sdr
=
1844 nand_get_sdr_timings(&chip
->data_interface
);
1845 struct nand_op_instr instrs
[] = {
1846 NAND_OP_CMD(NAND_CMD_PAGEPROG
,
1847 PSEC_TO_NSEC(sdr
->tWB_max
)),
1848 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr
->tPROG_max
), 0),
1850 struct nand_operation op
= NAND_OPERATION(instrs
);
1852 ret
= nand_exec_op(chip
, &op
);
1856 ret
= nand_status_op(chip
, &status
);
1860 chip
->cmdfunc(mtd
, NAND_CMD_PAGEPROG
, -1, -1);
1861 ret
= chip
->waitfunc(mtd
, chip
);
1868 if (status
& NAND_STATUS_FAIL
)
1873 EXPORT_SYMBOL_GPL(nand_prog_page_end_op
);
1876 * nand_prog_page_op - Do a full PROG PAGE operation
1877 * @chip: The NAND chip
1878 * @page: page to write
1879 * @offset_in_page: offset within the page
1880 * @buf: buffer containing the data to write to the page
1881 * @len: length of the buffer
1883 * This function issues a full PROG PAGE operation.
1884 * This function does not select/unselect the CS line.
1886 * Returns 0 on success, a negative error code otherwise.
1888 int nand_prog_page_op(struct nand_chip
*chip
, unsigned int page
,
1889 unsigned int offset_in_page
, const void *buf
,
1892 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1898 if (offset_in_page
+ len
> mtd
->writesize
+ mtd
->oobsize
)
1901 if (chip
->exec_op
) {
1902 status
= nand_exec_prog_page_op(chip
, page
, offset_in_page
, buf
,
1905 chip
->cmdfunc(mtd
, NAND_CMD_SEQIN
, offset_in_page
, page
);
1906 chip
->write_buf(mtd
, buf
, len
);
1907 chip
->cmdfunc(mtd
, NAND_CMD_PAGEPROG
, -1, -1);
1908 status
= chip
->waitfunc(mtd
, chip
);
1911 if (status
& NAND_STATUS_FAIL
)
1916 EXPORT_SYMBOL_GPL(nand_prog_page_op
);
1919 * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1920 * @chip: The NAND chip
1921 * @offset_in_page: offset within the page
1922 * @buf: buffer containing the data to send to the NAND
1923 * @len: length of the buffer
1924 * @force_8bit: force 8-bit bus access
1926 * This function issues a CHANGE WRITE COLUMN operation.
1927 * This function does not select/unselect the CS line.
1929 * Returns 0 on success, a negative error code otherwise.
1931 int nand_change_write_column_op(struct nand_chip
*chip
,
1932 unsigned int offset_in_page
,
1933 const void *buf
, unsigned int len
,
1936 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1941 if (offset_in_page
+ len
> mtd
->writesize
+ mtd
->oobsize
)
1944 /* Small page NANDs do not support column change. */
1945 if (mtd
->writesize
<= 512)
1948 if (chip
->exec_op
) {
1949 const struct nand_sdr_timings
*sdr
=
1950 nand_get_sdr_timings(&chip
->data_interface
);
1952 struct nand_op_instr instrs
[] = {
1953 NAND_OP_CMD(NAND_CMD_RNDIN
, 0),
1954 NAND_OP_ADDR(2, addrs
, PSEC_TO_NSEC(sdr
->tCCS_min
)),
1955 NAND_OP_DATA_OUT(len
, buf
, 0),
1957 struct nand_operation op
= NAND_OPERATION(instrs
);
1960 ret
= nand_fill_column_cycles(chip
, addrs
, offset_in_page
);
1964 instrs
[2].ctx
.data
.force_8bit
= force_8bit
;
1966 /* Drop the DATA_OUT instruction if len is set to 0. */
1970 return nand_exec_op(chip
, &op
);
1973 chip
->cmdfunc(mtd
, NAND_CMD_RNDIN
, offset_in_page
, -1);
1975 chip
->write_buf(mtd
, buf
, len
);
1979 EXPORT_SYMBOL_GPL(nand_change_write_column_op
);
1982 * nand_readid_op - Do a READID operation
1983 * @chip: The NAND chip
1984 * @addr: address cycle to pass after the READID command
1985 * @buf: buffer used to store the ID
1986 * @len: length of the buffer
1988 * This function sends a READID command and reads back the ID returned by the
1990 * This function does not select/unselect the CS line.
1992 * Returns 0 on success, a negative error code otherwise.
1994 int nand_readid_op(struct nand_chip
*chip
, u8 addr
, void *buf
,
1997 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2004 if (chip
->exec_op
) {
2005 const struct nand_sdr_timings
*sdr
=
2006 nand_get_sdr_timings(&chip
->data_interface
);
2007 struct nand_op_instr instrs
[] = {
2008 NAND_OP_CMD(NAND_CMD_READID
, 0),
2009 NAND_OP_ADDR(1, &addr
, PSEC_TO_NSEC(sdr
->tADL_min
)),
2010 NAND_OP_8BIT_DATA_IN(len
, buf
, 0),
2012 struct nand_operation op
= NAND_OPERATION(instrs
);
2014 /* Drop the DATA_IN instruction if len is set to 0. */
2018 return nand_exec_op(chip
, &op
);
2021 chip
->cmdfunc(mtd
, NAND_CMD_READID
, addr
, -1);
2023 for (i
= 0; i
< len
; i
++)
2024 id
[i
] = chip
->read_byte(mtd
);
2028 EXPORT_SYMBOL_GPL(nand_readid_op
);
2031 * nand_status_op - Do a STATUS operation
2032 * @chip: The NAND chip
2033 * @status: out variable to store the NAND status
2035 * This function sends a STATUS command and reads back the status returned by
2037 * This function does not select/unselect the CS line.
2039 * Returns 0 on success, a negative error code otherwise.
2041 int nand_status_op(struct nand_chip
*chip
, u8
*status
)
2043 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2045 if (chip
->exec_op
) {
2046 const struct nand_sdr_timings
*sdr
=
2047 nand_get_sdr_timings(&chip
->data_interface
);
2048 struct nand_op_instr instrs
[] = {
2049 NAND_OP_CMD(NAND_CMD_STATUS
,
2050 PSEC_TO_NSEC(sdr
->tADL_min
)),
2051 NAND_OP_8BIT_DATA_IN(1, status
, 0),
2053 struct nand_operation op
= NAND_OPERATION(instrs
);
2058 return nand_exec_op(chip
, &op
);
2061 chip
->cmdfunc(mtd
, NAND_CMD_STATUS
, -1, -1);
2063 *status
= chip
->read_byte(mtd
);
2067 EXPORT_SYMBOL_GPL(nand_status_op
);
2070 * nand_exit_status_op - Exit a STATUS operation
2071 * @chip: The NAND chip
2073 * This function sends a READ0 command to cancel the effect of the STATUS
2074 * command to avoid reading only the status until a new read command is sent.
2076 * This function does not select/unselect the CS line.
2078 * Returns 0 on success, a negative error code otherwise.
2080 int nand_exit_status_op(struct nand_chip
*chip
)
2082 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2084 if (chip
->exec_op
) {
2085 struct nand_op_instr instrs
[] = {
2086 NAND_OP_CMD(NAND_CMD_READ0
, 0),
2088 struct nand_operation op
= NAND_OPERATION(instrs
);
2090 return nand_exec_op(chip
, &op
);
2093 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, -1, -1);
2097 EXPORT_SYMBOL_GPL(nand_exit_status_op
);
2100 * nand_erase_op - Do an erase operation
2101 * @chip: The NAND chip
2102 * @eraseblock: block to erase
2104 * This function sends an ERASE command and waits for the NAND to be ready
2106 * This function does not select/unselect the CS line.
2108 * Returns 0 on success, a negative error code otherwise.
2110 int nand_erase_op(struct nand_chip
*chip
, unsigned int eraseblock
)
2112 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2113 unsigned int page
= eraseblock
<<
2114 (chip
->phys_erase_shift
- chip
->page_shift
);
2118 if (chip
->exec_op
) {
2119 const struct nand_sdr_timings
*sdr
=
2120 nand_get_sdr_timings(&chip
->data_interface
);
2121 u8 addrs
[3] = { page
, page
>> 8, page
>> 16 };
2122 struct nand_op_instr instrs
[] = {
2123 NAND_OP_CMD(NAND_CMD_ERASE1
, 0),
2124 NAND_OP_ADDR(2, addrs
, 0),
2125 NAND_OP_CMD(NAND_CMD_ERASE2
,
2126 PSEC_TO_MSEC(sdr
->tWB_max
)),
2127 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr
->tBERS_max
), 0),
2129 struct nand_operation op
= NAND_OPERATION(instrs
);
2131 if (chip
->options
& NAND_ROW_ADDR_3
)
2132 instrs
[1].ctx
.addr
.naddrs
++;
2134 ret
= nand_exec_op(chip
, &op
);
2138 ret
= nand_status_op(chip
, &status
);
2142 chip
->cmdfunc(mtd
, NAND_CMD_ERASE1
, -1, page
);
2143 chip
->cmdfunc(mtd
, NAND_CMD_ERASE2
, -1, -1);
2145 ret
= chip
->waitfunc(mtd
, chip
);
2152 if (status
& NAND_STATUS_FAIL
)
2157 EXPORT_SYMBOL_GPL(nand_erase_op
);
2160 * nand_set_features_op - Do a SET FEATURES operation
2161 * @chip: The NAND chip
2162 * @feature: feature id
2163 * @data: 4 bytes of data
2165 * This function sends a SET FEATURES command and waits for the NAND to be
2166 * ready before returning.
2167 * This function does not select/unselect the CS line.
2169 * Returns 0 on success, a negative error code otherwise.
2171 static int nand_set_features_op(struct nand_chip
*chip
, u8 feature
,
2174 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2175 const u8
*params
= data
;
2178 if (chip
->exec_op
) {
2179 const struct nand_sdr_timings
*sdr
=
2180 nand_get_sdr_timings(&chip
->data_interface
);
2181 struct nand_op_instr instrs
[] = {
2182 NAND_OP_CMD(NAND_CMD_SET_FEATURES
, 0),
2183 NAND_OP_ADDR(1, &feature
, PSEC_TO_NSEC(sdr
->tADL_min
)),
2184 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN
, data
,
2185 PSEC_TO_NSEC(sdr
->tWB_max
)),
2186 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr
->tFEAT_max
), 0),
2188 struct nand_operation op
= NAND_OPERATION(instrs
);
2190 return nand_exec_op(chip
, &op
);
2193 chip
->cmdfunc(mtd
, NAND_CMD_SET_FEATURES
, feature
, -1);
2194 for (i
= 0; i
< ONFI_SUBFEATURE_PARAM_LEN
; ++i
)
2195 chip
->write_byte(mtd
, params
[i
]);
2197 ret
= chip
->waitfunc(mtd
, chip
);
2201 if (ret
& NAND_STATUS_FAIL
)
2208 * nand_get_features_op - Do a GET FEATURES operation
2209 * @chip: The NAND chip
2210 * @feature: feature id
2211 * @data: 4 bytes of data
2213 * This function sends a GET FEATURES command and waits for the NAND to be
2214 * ready before returning.
2215 * This function does not select/unselect the CS line.
2217 * Returns 0 on success, a negative error code otherwise.
2219 static int nand_get_features_op(struct nand_chip
*chip
, u8 feature
,
2222 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2226 if (chip
->exec_op
) {
2227 const struct nand_sdr_timings
*sdr
=
2228 nand_get_sdr_timings(&chip
->data_interface
);
2229 struct nand_op_instr instrs
[] = {
2230 NAND_OP_CMD(NAND_CMD_GET_FEATURES
, 0),
2231 NAND_OP_ADDR(1, &feature
, PSEC_TO_NSEC(sdr
->tWB_max
)),
2232 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr
->tFEAT_max
),
2233 PSEC_TO_NSEC(sdr
->tRR_min
)),
2234 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN
,
2237 struct nand_operation op
= NAND_OPERATION(instrs
);
2239 return nand_exec_op(chip
, &op
);
2242 chip
->cmdfunc(mtd
, NAND_CMD_GET_FEATURES
, feature
, -1);
2243 for (i
= 0; i
< ONFI_SUBFEATURE_PARAM_LEN
; ++i
)
2244 params
[i
] = chip
->read_byte(mtd
);
2250 * nand_reset_op - Do a reset operation
2251 * @chip: The NAND chip
2253 * This function sends a RESET command and waits for the NAND to be ready
2255 * This function does not select/unselect the CS line.
2257 * Returns 0 on success, a negative error code otherwise.
2259 int nand_reset_op(struct nand_chip
*chip
)
2261 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2263 if (chip
->exec_op
) {
2264 const struct nand_sdr_timings
*sdr
=
2265 nand_get_sdr_timings(&chip
->data_interface
);
2266 struct nand_op_instr instrs
[] = {
2267 NAND_OP_CMD(NAND_CMD_RESET
, PSEC_TO_NSEC(sdr
->tWB_max
)),
2268 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr
->tRST_max
), 0),
2270 struct nand_operation op
= NAND_OPERATION(instrs
);
2272 return nand_exec_op(chip
, &op
);
2275 chip
->cmdfunc(mtd
, NAND_CMD_RESET
, -1, -1);
2279 EXPORT_SYMBOL_GPL(nand_reset_op
);
2282 * nand_read_data_op - Read data from the NAND
2283 * @chip: The NAND chip
2284 * @buf: buffer used to store the data
2285 * @len: length of the buffer
2286 * @force_8bit: force 8-bit bus access
2288 * This function does a raw data read on the bus. Usually used after launching
2289 * another NAND operation like nand_read_page_op().
2290 * This function does not select/unselect the CS line.
2292 * Returns 0 on success, a negative error code otherwise.
2294 int nand_read_data_op(struct nand_chip
*chip
, void *buf
, unsigned int len
,
2297 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2302 if (chip
->exec_op
) {
2303 struct nand_op_instr instrs
[] = {
2304 NAND_OP_DATA_IN(len
, buf
, 0),
2306 struct nand_operation op
= NAND_OPERATION(instrs
);
2308 instrs
[0].ctx
.data
.force_8bit
= force_8bit
;
2310 return nand_exec_op(chip
, &op
);
2317 for (i
= 0; i
< len
; i
++)
2318 p
[i
] = chip
->read_byte(mtd
);
2320 chip
->read_buf(mtd
, buf
, len
);
2325 EXPORT_SYMBOL_GPL(nand_read_data_op
);
2328 * nand_write_data_op - Write data from the NAND
2329 * @chip: The NAND chip
2330 * @buf: buffer containing the data to send on the bus
2331 * @len: length of the buffer
2332 * @force_8bit: force 8-bit bus access
2334 * This function does a raw data write on the bus. Usually used after launching
2335 * another NAND operation like nand_write_page_begin_op().
2336 * This function does not select/unselect the CS line.
2338 * Returns 0 on success, a negative error code otherwise.
2340 int nand_write_data_op(struct nand_chip
*chip
, const void *buf
,
2341 unsigned int len
, bool force_8bit
)
2343 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2348 if (chip
->exec_op
) {
2349 struct nand_op_instr instrs
[] = {
2350 NAND_OP_DATA_OUT(len
, buf
, 0),
2352 struct nand_operation op
= NAND_OPERATION(instrs
);
2354 instrs
[0].ctx
.data
.force_8bit
= force_8bit
;
2356 return nand_exec_op(chip
, &op
);
2363 for (i
= 0; i
< len
; i
++)
2364 chip
->write_byte(mtd
, p
[i
]);
2366 chip
->write_buf(mtd
, buf
, len
);
2371 EXPORT_SYMBOL_GPL(nand_write_data_op
);
2374 * struct nand_op_parser_ctx - Context used by the parser
2375 * @instrs: array of all the instructions that must be addressed
2376 * @ninstrs: length of the @instrs array
2377 * @subop: Sub-operation to be passed to the NAND controller
2379 * This structure is used by the core to split NAND operations into
2380 * sub-operations that can be handled by the NAND controller.
2382 struct nand_op_parser_ctx
{
2383 const struct nand_op_instr
*instrs
;
2384 unsigned int ninstrs
;
2385 struct nand_subop subop
;
2389 * nand_op_parser_must_split_instr - Checks if an instruction must be split
2390 * @pat: the parser pattern element that matches @instr
2391 * @instr: pointer to the instruction to check
2392 * @start_offset: this is an in/out parameter. If @instr has already been
2393 * split, then @start_offset is the offset from which to start
2394 * (either an address cycle or an offset in the data buffer).
2395 * Conversely, if the function returns true (ie. instr must be
2396 * split), this parameter is updated to point to the first
2397 * data/address cycle that has not been taken care of.
2399 * Some NAND controllers are limited and cannot send X address cycles with a
2400 * unique operation, or cannot read/write more than Y bytes at the same time.
2401 * In this case, split the instruction that does not fit in a single
2402 * controller-operation into two or more chunks.
2404 * Returns true if the instruction must be split, false otherwise.
2405 * The @start_offset parameter is also updated to the offset at which the next
2406 * bundle of instruction must start (if an address or a data instruction).
2409 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem
*pat
,
2410 const struct nand_op_instr
*instr
,
2411 unsigned int *start_offset
)
2413 switch (pat
->type
) {
2414 case NAND_OP_ADDR_INSTR
:
2415 if (!pat
->ctx
.addr
.maxcycles
)
2418 if (instr
->ctx
.addr
.naddrs
- *start_offset
>
2419 pat
->ctx
.addr
.maxcycles
) {
2420 *start_offset
+= pat
->ctx
.addr
.maxcycles
;
2425 case NAND_OP_DATA_IN_INSTR
:
2426 case NAND_OP_DATA_OUT_INSTR
:
2427 if (!pat
->ctx
.data
.maxlen
)
2430 if (instr
->ctx
.data
.len
- *start_offset
>
2431 pat
->ctx
.data
.maxlen
) {
2432 *start_offset
+= pat
->ctx
.data
.maxlen
;
2445 * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2446 * remaining in the parser context
2447 * @pat: the pattern to test
2448 * @ctx: the parser context structure to match with the pattern @pat
2450 * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2451 * Returns true if this is the case, false ortherwise. When true is returned,
2452 * @ctx->subop is updated with the set of instructions to be passed to the
2453 * controller driver.
2456 nand_op_parser_match_pat(const struct nand_op_parser_pattern
*pat
,
2457 struct nand_op_parser_ctx
*ctx
)
2459 unsigned int instr_offset
= ctx
->subop
.first_instr_start_off
;
2460 const struct nand_op_instr
*end
= ctx
->instrs
+ ctx
->ninstrs
;
2461 const struct nand_op_instr
*instr
= ctx
->subop
.instrs
;
2462 unsigned int i
, ninstrs
;
2464 for (i
= 0, ninstrs
= 0; i
< pat
->nelems
&& instr
< end
; i
++) {
2466 * The pattern instruction does not match the operation
2467 * instruction. If the instruction is marked optional in the
2468 * pattern definition, we skip the pattern element and continue
2469 * to the next one. If the element is mandatory, there's no
2470 * match and we can return false directly.
2472 if (instr
->type
!= pat
->elems
[i
].type
) {
2473 if (!pat
->elems
[i
].optional
)
2480 * Now check the pattern element constraints. If the pattern is
2481 * not able to handle the whole instruction in a single step,
2482 * we have to split it.
2483 * The last_instr_end_off value comes back updated to point to
2484 * the position where we have to split the instruction (the
2485 * start of the next subop chunk).
2487 if (nand_op_parser_must_split_instr(&pat
->elems
[i
], instr
,
2500 * This can happen if all instructions of a pattern are optional.
2501 * Still, if there's not at least one instruction handled by this
2502 * pattern, this is not a match, and we should try the next one (if
2509 * We had a match on the pattern head, but the pattern may be longer
2510 * than the instructions we're asked to execute. We need to make sure
2511 * there's no mandatory elements in the pattern tail.
2513 for (; i
< pat
->nelems
; i
++) {
2514 if (!pat
->elems
[i
].optional
)
2519 * We have a match: update the subop structure accordingly and return
2522 ctx
->subop
.ninstrs
= ninstrs
;
2523 ctx
->subop
.last_instr_end_off
= instr_offset
;
2528 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2529 static void nand_op_parser_trace(const struct nand_op_parser_ctx
*ctx
)
2531 const struct nand_op_instr
*instr
;
2535 pr_debug("executing subop:\n");
2537 for (i
= 0; i
< ctx
->ninstrs
; i
++) {
2538 instr
= &ctx
->instrs
[i
];
2540 if (instr
== &ctx
->subop
.instrs
[0])
2543 switch (instr
->type
) {
2544 case NAND_OP_CMD_INSTR
:
2545 pr_debug("%sCMD [0x%02x]\n", prefix
,
2546 instr
->ctx
.cmd
.opcode
);
2548 case NAND_OP_ADDR_INSTR
:
2549 pr_debug("%sADDR [%d cyc: %*ph]\n", prefix
,
2550 instr
->ctx
.addr
.naddrs
,
2551 instr
->ctx
.addr
.naddrs
< 64 ?
2552 instr
->ctx
.addr
.naddrs
: 64,
2553 instr
->ctx
.addr
.addrs
);
2555 case NAND_OP_DATA_IN_INSTR
:
2556 pr_debug("%sDATA_IN [%d B%s]\n", prefix
,
2557 instr
->ctx
.data
.len
,
2558 instr
->ctx
.data
.force_8bit
?
2559 ", force 8-bit" : "");
2561 case NAND_OP_DATA_OUT_INSTR
:
2562 pr_debug("%sDATA_OUT [%d B%s]\n", prefix
,
2563 instr
->ctx
.data
.len
,
2564 instr
->ctx
.data
.force_8bit
?
2565 ", force 8-bit" : "");
2567 case NAND_OP_WAITRDY_INSTR
:
2568 pr_debug("%sWAITRDY [max %d ms]\n", prefix
,
2569 instr
->ctx
.waitrdy
.timeout_ms
);
2573 if (instr
== &ctx
->subop
.instrs
[ctx
->subop
.ninstrs
- 1])
2578 static void nand_op_parser_trace(const struct nand_op_parser_ctx
*ctx
)
2585 * nand_op_parser_exec_op - exec_op parser
2586 * @chip: the NAND chip
2587 * @parser: patterns description provided by the controller driver
2588 * @op: the NAND operation to address
2589 * @check_only: when true, the function only checks if @op can be handled but
2590 * does not execute the operation
2592 * Helper function designed to ease integration of NAND controller drivers that
2593 * only support a limited set of instruction sequences. The supported sequences
2594 * are described in @parser, and the framework takes care of splitting @op into
2595 * multiple sub-operations (if required) and pass them back to the ->exec()
2596 * callback of the matching pattern if @check_only is set to false.
2598 * NAND controller drivers should call this function from their own ->exec_op()
2601 * Returns 0 on success, a negative error code otherwise. A failure can be
2602 * caused by an unsupported operation (none of the supported patterns is able
2603 * to handle the requested operation), or an error returned by one of the
2604 * matching pattern->exec() hook.
2606 int nand_op_parser_exec_op(struct nand_chip
*chip
,
2607 const struct nand_op_parser
*parser
,
2608 const struct nand_operation
*op
, bool check_only
)
2610 struct nand_op_parser_ctx ctx
= {
2611 .subop
.instrs
= op
->instrs
,
2612 .instrs
= op
->instrs
,
2613 .ninstrs
= op
->ninstrs
,
2617 while (ctx
.subop
.instrs
< op
->instrs
+ op
->ninstrs
) {
2620 for (i
= 0; i
< parser
->npatterns
; i
++) {
2621 const struct nand_op_parser_pattern
*pattern
;
2623 pattern
= &parser
->patterns
[i
];
2624 if (!nand_op_parser_match_pat(pattern
, &ctx
))
2627 nand_op_parser_trace(&ctx
);
2632 ret
= pattern
->exec(chip
, &ctx
.subop
);
2639 if (i
== parser
->npatterns
) {
2640 pr_debug("->exec_op() parser: pattern not found!\n");
2645 * Update the context structure by pointing to the start of the
2648 ctx
.subop
.instrs
= ctx
.subop
.instrs
+ ctx
.subop
.ninstrs
;
2649 if (ctx
.subop
.last_instr_end_off
)
2650 ctx
.subop
.instrs
-= 1;
2652 ctx
.subop
.first_instr_start_off
= ctx
.subop
.last_instr_end_off
;
2657 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op
);
2659 static bool nand_instr_is_data(const struct nand_op_instr
*instr
)
2661 return instr
&& (instr
->type
== NAND_OP_DATA_IN_INSTR
||
2662 instr
->type
== NAND_OP_DATA_OUT_INSTR
);
2665 static bool nand_subop_instr_is_valid(const struct nand_subop
*subop
,
2666 unsigned int instr_idx
)
2668 return subop
&& instr_idx
< subop
->ninstrs
;
2671 static unsigned int nand_subop_get_start_off(const struct nand_subop
*subop
,
2672 unsigned int instr_idx
)
2677 return subop
->first_instr_start_off
;
2681 * nand_subop_get_addr_start_off - Get the start offset in an address array
2682 * @subop: The entire sub-operation
2683 * @instr_idx: Index of the instruction inside the sub-operation
2685 * During driver development, one could be tempted to directly use the
2686 * ->addr.addrs field of address instructions. This is wrong as address
2687 * instructions might be split.
2689 * Given an address instruction, returns the offset of the first cycle to issue.
2691 unsigned int nand_subop_get_addr_start_off(const struct nand_subop
*subop
,
2692 unsigned int instr_idx
)
2694 if (WARN_ON(!nand_subop_instr_is_valid(subop
, instr_idx
) ||
2695 subop
->instrs
[instr_idx
].type
!= NAND_OP_ADDR_INSTR
))
2698 return nand_subop_get_start_off(subop
, instr_idx
);
2700 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off
);
2703 * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2704 * @subop: The entire sub-operation
2705 * @instr_idx: Index of the instruction inside the sub-operation
2707 * During driver development, one could be tempted to directly use the
2708 * ->addr->naddrs field of a data instruction. This is wrong as instructions
2711 * Given an address instruction, returns the number of address cycle to issue.
2713 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop
*subop
,
2714 unsigned int instr_idx
)
2716 int start_off
, end_off
;
2718 if (WARN_ON(!nand_subop_instr_is_valid(subop
, instr_idx
) ||
2719 subop
->instrs
[instr_idx
].type
!= NAND_OP_ADDR_INSTR
))
2722 start_off
= nand_subop_get_addr_start_off(subop
, instr_idx
);
2724 if (instr_idx
== subop
->ninstrs
- 1 &&
2725 subop
->last_instr_end_off
)
2726 end_off
= subop
->last_instr_end_off
;
2728 end_off
= subop
->instrs
[instr_idx
].ctx
.addr
.naddrs
;
2730 return end_off
- start_off
;
2732 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc
);
2735 * nand_subop_get_data_start_off - Get the start offset in a data array
2736 * @subop: The entire sub-operation
2737 * @instr_idx: Index of the instruction inside the sub-operation
2739 * During driver development, one could be tempted to directly use the
2740 * ->data->buf.{in,out} field of data instructions. This is wrong as data
2741 * instructions might be split.
2743 * Given a data instruction, returns the offset to start from.
2745 unsigned int nand_subop_get_data_start_off(const struct nand_subop
*subop
,
2746 unsigned int instr_idx
)
2748 if (WARN_ON(!nand_subop_instr_is_valid(subop
, instr_idx
) ||
2749 !nand_instr_is_data(&subop
->instrs
[instr_idx
])))
2752 return nand_subop_get_start_off(subop
, instr_idx
);
2754 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off
);
2757 * nand_subop_get_data_len - Get the number of bytes to retrieve
2758 * @subop: The entire sub-operation
2759 * @instr_idx: Index of the instruction inside the sub-operation
2761 * During driver development, one could be tempted to directly use the
2762 * ->data->len field of a data instruction. This is wrong as data instructions
2765 * Returns the length of the chunk of data to send/receive.
2767 unsigned int nand_subop_get_data_len(const struct nand_subop
*subop
,
2768 unsigned int instr_idx
)
2770 int start_off
= 0, end_off
;
2772 if (WARN_ON(!nand_subop_instr_is_valid(subop
, instr_idx
) ||
2773 !nand_instr_is_data(&subop
->instrs
[instr_idx
])))
2776 start_off
= nand_subop_get_data_start_off(subop
, instr_idx
);
2778 if (instr_idx
== subop
->ninstrs
- 1 &&
2779 subop
->last_instr_end_off
)
2780 end_off
= subop
->last_instr_end_off
;
2782 end_off
= subop
->instrs
[instr_idx
].ctx
.data
.len
;
2784 return end_off
- start_off
;
2786 EXPORT_SYMBOL_GPL(nand_subop_get_data_len
);
2789 * nand_reset - Reset and initialize a NAND device
2790 * @chip: The NAND chip
2791 * @chipnr: Internal die id
2793 * Save the timings data structure, then apply SDR timings mode 0 (see
2794 * nand_reset_data_interface for details), do the reset operation, and
2795 * apply back the previous timings.
2797 * Returns 0 on success, a negative error code otherwise.
2799 int nand_reset(struct nand_chip
*chip
, int chipnr
)
2801 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2802 struct nand_data_interface saved_data_intf
= chip
->data_interface
;
2805 ret
= nand_reset_data_interface(chip
, chipnr
);
2810 * The CS line has to be released before we can apply the new NAND
2811 * interface settings, hence this weird ->select_chip() dance.
2813 chip
->select_chip(mtd
, chipnr
);
2814 ret
= nand_reset_op(chip
);
2815 chip
->select_chip(mtd
, -1);
2820 * A nand_reset_data_interface() put both the NAND chip and the NAND
2821 * controller in timings mode 0. If the default mode for this chip is
2822 * also 0, no need to proceed to the change again. Plus, at probe time,
2823 * nand_setup_data_interface() uses ->set/get_features() which would
2824 * fail anyway as the parameter page is not available yet.
2826 if (!chip
->onfi_timing_mode_default
)
2829 chip
->data_interface
= saved_data_intf
;
2830 ret
= nand_setup_data_interface(chip
, chipnr
);
2836 EXPORT_SYMBOL_GPL(nand_reset
);
2839 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2840 * @buf: buffer to test
2841 * @len: buffer length
2842 * @bitflips_threshold: maximum number of bitflips
2844 * Check if a buffer contains only 0xff, which means the underlying region
2845 * has been erased and is ready to be programmed.
2846 * The bitflips_threshold specify the maximum number of bitflips before
2847 * considering the region is not erased.
2848 * Note: The logic of this function has been extracted from the memweight
2849 * implementation, except that nand_check_erased_buf function exit before
2850 * testing the whole buffer if the number of bitflips exceed the
2851 * bitflips_threshold value.
2853 * Returns a positive number of bitflips less than or equal to
2854 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2857 static int nand_check_erased_buf(void *buf
, int len
, int bitflips_threshold
)
2859 const unsigned char *bitmap
= buf
;
2863 for (; len
&& ((uintptr_t)bitmap
) % sizeof(long);
2865 weight
= hweight8(*bitmap
);
2866 bitflips
+= BITS_PER_BYTE
- weight
;
2867 if (unlikely(bitflips
> bitflips_threshold
))
2871 for (; len
>= sizeof(long);
2872 len
-= sizeof(long), bitmap
+= sizeof(long)) {
2873 unsigned long d
= *((unsigned long *)bitmap
);
2876 weight
= hweight_long(d
);
2877 bitflips
+= BITS_PER_LONG
- weight
;
2878 if (unlikely(bitflips
> bitflips_threshold
))
2882 for (; len
> 0; len
--, bitmap
++) {
2883 weight
= hweight8(*bitmap
);
2884 bitflips
+= BITS_PER_BYTE
- weight
;
2885 if (unlikely(bitflips
> bitflips_threshold
))
2893 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2895 * @data: data buffer to test
2896 * @datalen: data length
2898 * @ecclen: ECC length
2899 * @extraoob: extra OOB buffer
2900 * @extraooblen: extra OOB length
2901 * @bitflips_threshold: maximum number of bitflips
2903 * Check if a data buffer and its associated ECC and OOB data contains only
2904 * 0xff pattern, which means the underlying region has been erased and is
2905 * ready to be programmed.
2906 * The bitflips_threshold specify the maximum number of bitflips before
2907 * considering the region as not erased.
2910 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2911 * different from the NAND page size. When fixing bitflips, ECC engines will
2912 * report the number of errors per chunk, and the NAND core infrastructure
2913 * expect you to return the maximum number of bitflips for the whole page.
2914 * This is why you should always use this function on a single chunk and
2915 * not on the whole page. After checking each chunk you should update your
2916 * max_bitflips value accordingly.
2917 * 2/ When checking for bitflips in erased pages you should not only check
2918 * the payload data but also their associated ECC data, because a user might
2919 * have programmed almost all bits to 1 but a few. In this case, we
2920 * shouldn't consider the chunk as erased, and checking ECC bytes prevent
2922 * 3/ The extraoob argument is optional, and should be used if some of your OOB
2923 * data are protected by the ECC engine.
2924 * It could also be used if you support subpages and want to attach some
2925 * extra OOB data to an ECC chunk.
2927 * Returns a positive number of bitflips less than or equal to
2928 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2929 * threshold. In case of success, the passed buffers are filled with 0xff.
2931 int nand_check_erased_ecc_chunk(void *data
, int datalen
,
2932 void *ecc
, int ecclen
,
2933 void *extraoob
, int extraooblen
,
2934 int bitflips_threshold
)
2936 int data_bitflips
= 0, ecc_bitflips
= 0, extraoob_bitflips
= 0;
2938 data_bitflips
= nand_check_erased_buf(data
, datalen
,
2939 bitflips_threshold
);
2940 if (data_bitflips
< 0)
2941 return data_bitflips
;
2943 bitflips_threshold
-= data_bitflips
;
2945 ecc_bitflips
= nand_check_erased_buf(ecc
, ecclen
, bitflips_threshold
);
2946 if (ecc_bitflips
< 0)
2947 return ecc_bitflips
;
2949 bitflips_threshold
-= ecc_bitflips
;
2951 extraoob_bitflips
= nand_check_erased_buf(extraoob
, extraooblen
,
2952 bitflips_threshold
);
2953 if (extraoob_bitflips
< 0)
2954 return extraoob_bitflips
;
2957 memset(data
, 0xff, datalen
);
2960 memset(ecc
, 0xff, ecclen
);
2962 if (extraoob_bitflips
)
2963 memset(extraoob
, 0xff, extraooblen
);
2965 return data_bitflips
+ ecc_bitflips
+ extraoob_bitflips
;
2967 EXPORT_SYMBOL(nand_check_erased_ecc_chunk
);
2970 * nand_read_page_raw_notsupp - dummy read raw page function
2971 * @mtd: mtd info structure
2972 * @chip: nand chip info structure
2973 * @buf: buffer to store read data
2974 * @oob_required: caller requires OOB data read to chip->oob_poi
2975 * @page: page number to read
2977 * Returns -ENOTSUPP unconditionally.
2979 int nand_read_page_raw_notsupp(struct mtd_info
*mtd
, struct nand_chip
*chip
,
2980 u8
*buf
, int oob_required
, int page
)
2984 EXPORT_SYMBOL(nand_read_page_raw_notsupp
);
2987 * nand_read_page_raw - [INTERN] read raw page data without ecc
2988 * @mtd: mtd info structure
2989 * @chip: nand chip info structure
2990 * @buf: buffer to store read data
2991 * @oob_required: caller requires OOB data read to chip->oob_poi
2992 * @page: page number to read
2994 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2996 int nand_read_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
2997 uint8_t *buf
, int oob_required
, int page
)
3001 ret
= nand_read_page_op(chip
, page
, 0, buf
, mtd
->writesize
);
3006 ret
= nand_read_data_op(chip
, chip
->oob_poi
, mtd
->oobsize
,
3014 EXPORT_SYMBOL(nand_read_page_raw
);
3017 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
3018 * @mtd: mtd info structure
3019 * @chip: nand chip info structure
3020 * @buf: buffer to store read data
3021 * @oob_required: caller requires OOB data read to chip->oob_poi
3022 * @page: page number to read
3024 * We need a special oob layout and handling even when OOB isn't used.
3026 static int nand_read_page_raw_syndrome(struct mtd_info
*mtd
,
3027 struct nand_chip
*chip
, uint8_t *buf
,
3028 int oob_required
, int page
)
3030 int eccsize
= chip
->ecc
.size
;
3031 int eccbytes
= chip
->ecc
.bytes
;
3032 uint8_t *oob
= chip
->oob_poi
;
3033 int steps
, size
, ret
;
3035 ret
= nand_read_page_op(chip
, page
, 0, NULL
, 0);
3039 for (steps
= chip
->ecc
.steps
; steps
> 0; steps
--) {
3040 ret
= nand_read_data_op(chip
, buf
, eccsize
, false);
3046 if (chip
->ecc
.prepad
) {
3047 ret
= nand_read_data_op(chip
, oob
, chip
->ecc
.prepad
,
3052 oob
+= chip
->ecc
.prepad
;
3055 ret
= nand_read_data_op(chip
, oob
, eccbytes
, false);
3061 if (chip
->ecc
.postpad
) {
3062 ret
= nand_read_data_op(chip
, oob
, chip
->ecc
.postpad
,
3067 oob
+= chip
->ecc
.postpad
;
3071 size
= mtd
->oobsize
- (oob
- chip
->oob_poi
);
3073 ret
= nand_read_data_op(chip
, oob
, size
, false);
3082 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
3083 * @mtd: mtd info structure
3084 * @chip: nand chip info structure
3085 * @buf: buffer to store read data
3086 * @oob_required: caller requires OOB data read to chip->oob_poi
3087 * @page: page number to read
3089 static int nand_read_page_swecc(struct mtd_info
*mtd
, struct nand_chip
*chip
,
3090 uint8_t *buf
, int oob_required
, int page
)
3092 int i
, eccsize
= chip
->ecc
.size
, ret
;
3093 int eccbytes
= chip
->ecc
.bytes
;
3094 int eccsteps
= chip
->ecc
.steps
;
3096 uint8_t *ecc_calc
= chip
->ecc
.calc_buf
;
3097 uint8_t *ecc_code
= chip
->ecc
.code_buf
;
3098 unsigned int max_bitflips
= 0;
3100 chip
->ecc
.read_page_raw(mtd
, chip
, buf
, 1, page
);
3102 for (i
= 0; eccsteps
; eccsteps
--, i
+= eccbytes
, p
+= eccsize
)
3103 chip
->ecc
.calculate(mtd
, p
, &ecc_calc
[i
]);
3105 ret
= mtd_ooblayout_get_eccbytes(mtd
, ecc_code
, chip
->oob_poi
, 0,
3110 eccsteps
= chip
->ecc
.steps
;
3113 for (i
= 0 ; eccsteps
; eccsteps
--, i
+= eccbytes
, p
+= eccsize
) {
3116 stat
= chip
->ecc
.correct(mtd
, p
, &ecc_code
[i
], &ecc_calc
[i
]);
3118 mtd
->ecc_stats
.failed
++;
3120 mtd
->ecc_stats
.corrected
+= stat
;
3121 max_bitflips
= max_t(unsigned int, max_bitflips
, stat
);
3124 return max_bitflips
;
3128 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
3129 * @mtd: mtd info structure
3130 * @chip: nand chip info structure
3131 * @data_offs: offset of requested data within the page
3132 * @readlen: data length
3133 * @bufpoi: buffer to store read data
3134 * @page: page number to read
3136 static int nand_read_subpage(struct mtd_info
*mtd
, struct nand_chip
*chip
,
3137 uint32_t data_offs
, uint32_t readlen
, uint8_t *bufpoi
,
3140 int start_step
, end_step
, num_steps
, ret
;
3142 int data_col_addr
, i
, gaps
= 0;
3143 int datafrag_len
, eccfrag_len
, aligned_len
, aligned_pos
;
3144 int busw
= (chip
->options
& NAND_BUSWIDTH_16
) ? 2 : 1;
3145 int index
, section
= 0;
3146 unsigned int max_bitflips
= 0;
3147 struct mtd_oob_region oobregion
= { };
3149 /* Column address within the page aligned to ECC size (256bytes) */
3150 start_step
= data_offs
/ chip
->ecc
.size
;
3151 end_step
= (data_offs
+ readlen
- 1) / chip
->ecc
.size
;
3152 num_steps
= end_step
- start_step
+ 1;
3153 index
= start_step
* chip
->ecc
.bytes
;
3155 /* Data size aligned to ECC ecc.size */
3156 datafrag_len
= num_steps
* chip
->ecc
.size
;
3157 eccfrag_len
= num_steps
* chip
->ecc
.bytes
;
3159 data_col_addr
= start_step
* chip
->ecc
.size
;
3160 /* If we read not a page aligned data */
3161 p
= bufpoi
+ data_col_addr
;
3162 ret
= nand_read_page_op(chip
, page
, data_col_addr
, p
, datafrag_len
);
3167 for (i
= 0; i
< eccfrag_len
; i
+= chip
->ecc
.bytes
, p
+= chip
->ecc
.size
)
3168 chip
->ecc
.calculate(mtd
, p
, &chip
->ecc
.calc_buf
[i
]);
3171 * The performance is faster if we position offsets according to
3172 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
3174 ret
= mtd_ooblayout_find_eccregion(mtd
, index
, §ion
, &oobregion
);
3178 if (oobregion
.length
< eccfrag_len
)
3182 ret
= nand_change_read_column_op(chip
, mtd
->writesize
,
3183 chip
->oob_poi
, mtd
->oobsize
,
3189 * Send the command to read the particular ECC bytes take care
3190 * about buswidth alignment in read_buf.
3192 aligned_pos
= oobregion
.offset
& ~(busw
- 1);
3193 aligned_len
= eccfrag_len
;
3194 if (oobregion
.offset
& (busw
- 1))
3196 if ((oobregion
.offset
+ (num_steps
* chip
->ecc
.bytes
)) &
3200 ret
= nand_change_read_column_op(chip
,
3201 mtd
->writesize
+ aligned_pos
,
3202 &chip
->oob_poi
[aligned_pos
],
3203 aligned_len
, false);
3208 ret
= mtd_ooblayout_get_eccbytes(mtd
, chip
->ecc
.code_buf
,
3209 chip
->oob_poi
, index
, eccfrag_len
);
3213 p
= bufpoi
+ data_col_addr
;
3214 for (i
= 0; i
< eccfrag_len
; i
+= chip
->ecc
.bytes
, p
+= chip
->ecc
.size
) {
3217 stat
= chip
->ecc
.correct(mtd
, p
, &chip
->ecc
.code_buf
[i
],
3218 &chip
->ecc
.calc_buf
[i
]);
3219 if (stat
== -EBADMSG
&&
3220 (chip
->ecc
.options
& NAND_ECC_GENERIC_ERASED_CHECK
)) {
3221 /* check for empty pages with bitflips */
3222 stat
= nand_check_erased_ecc_chunk(p
, chip
->ecc
.size
,
3223 &chip
->ecc
.code_buf
[i
],
3226 chip
->ecc
.strength
);
3230 mtd
->ecc_stats
.failed
++;
3232 mtd
->ecc_stats
.corrected
+= stat
;
3233 max_bitflips
= max_t(unsigned int, max_bitflips
, stat
);
3236 return max_bitflips
;
3240 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
3241 * @mtd: mtd info structure
3242 * @chip: nand chip info structure
3243 * @buf: buffer to store read data
3244 * @oob_required: caller requires OOB data read to chip->oob_poi
3245 * @page: page number to read
3247 * Not for syndrome calculating ECC controllers which need a special oob layout.
3249 static int nand_read_page_hwecc(struct mtd_info
*mtd
, struct nand_chip
*chip
,
3250 uint8_t *buf
, int oob_required
, int page
)
3252 int i
, eccsize
= chip
->ecc
.size
, ret
;
3253 int eccbytes
= chip
->ecc
.bytes
;
3254 int eccsteps
= chip
->ecc
.steps
;
3256 uint8_t *ecc_calc
= chip
->ecc
.calc_buf
;
3257 uint8_t *ecc_code
= chip
->ecc
.code_buf
;
3258 unsigned int max_bitflips
= 0;
3260 ret
= nand_read_page_op(chip
, page
, 0, NULL
, 0);
3264 for (i
= 0; eccsteps
; eccsteps
--, i
+= eccbytes
, p
+= eccsize
) {
3265 chip
->ecc
.hwctl(mtd
, NAND_ECC_READ
);
3267 ret
= nand_read_data_op(chip
, p
, eccsize
, false);
3271 chip
->ecc
.calculate(mtd
, p
, &ecc_calc
[i
]);
3274 ret
= nand_read_data_op(chip
, chip
->oob_poi
, mtd
->oobsize
, false);
3278 ret
= mtd_ooblayout_get_eccbytes(mtd
, ecc_code
, chip
->oob_poi
, 0,
3283 eccsteps
= chip
->ecc
.steps
;
3286 for (i
= 0 ; eccsteps
; eccsteps
--, i
+= eccbytes
, p
+= eccsize
) {
3289 stat
= chip
->ecc
.correct(mtd
, p
, &ecc_code
[i
], &ecc_calc
[i
]);
3290 if (stat
== -EBADMSG
&&
3291 (chip
->ecc
.options
& NAND_ECC_GENERIC_ERASED_CHECK
)) {
3292 /* check for empty pages with bitflips */
3293 stat
= nand_check_erased_ecc_chunk(p
, eccsize
,
3294 &ecc_code
[i
], eccbytes
,
3296 chip
->ecc
.strength
);
3300 mtd
->ecc_stats
.failed
++;
3302 mtd
->ecc_stats
.corrected
+= stat
;
3303 max_bitflips
= max_t(unsigned int, max_bitflips
, stat
);
3306 return max_bitflips
;
3310 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
3311 * @mtd: mtd info structure
3312 * @chip: nand chip info structure
3313 * @buf: buffer to store read data
3314 * @oob_required: caller requires OOB data read to chip->oob_poi
3315 * @page: page number to read
3317 * Hardware ECC for large page chips, require OOB to be read first. For this
3318 * ECC mode, the write_page method is re-used from ECC_HW. These methods
3319 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
3320 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
3321 * the data area, by overwriting the NAND manufacturer bad block markings.
3323 static int nand_read_page_hwecc_oob_first(struct mtd_info
*mtd
,
3324 struct nand_chip
*chip
, uint8_t *buf
, int oob_required
, int page
)
3326 int i
, eccsize
= chip
->ecc
.size
, ret
;
3327 int eccbytes
= chip
->ecc
.bytes
;
3328 int eccsteps
= chip
->ecc
.steps
;
3330 uint8_t *ecc_code
= chip
->ecc
.code_buf
;
3331 uint8_t *ecc_calc
= chip
->ecc
.calc_buf
;
3332 unsigned int max_bitflips
= 0;
3334 /* Read the OOB area first */
3335 ret
= nand_read_oob_op(chip
, page
, 0, chip
->oob_poi
, mtd
->oobsize
);
3339 ret
= nand_read_page_op(chip
, page
, 0, NULL
, 0);
3343 ret
= mtd_ooblayout_get_eccbytes(mtd
, ecc_code
, chip
->oob_poi
, 0,
3348 for (i
= 0; eccsteps
; eccsteps
--, i
+= eccbytes
, p
+= eccsize
) {
3351 chip
->ecc
.hwctl(mtd
, NAND_ECC_READ
);
3353 ret
= nand_read_data_op(chip
, p
, eccsize
, false);
3357 chip
->ecc
.calculate(mtd
, p
, &ecc_calc
[i
]);
3359 stat
= chip
->ecc
.correct(mtd
, p
, &ecc_code
[i
], NULL
);
3360 if (stat
== -EBADMSG
&&
3361 (chip
->ecc
.options
& NAND_ECC_GENERIC_ERASED_CHECK
)) {
3362 /* check for empty pages with bitflips */
3363 stat
= nand_check_erased_ecc_chunk(p
, eccsize
,
3364 &ecc_code
[i
], eccbytes
,
3366 chip
->ecc
.strength
);
3370 mtd
->ecc_stats
.failed
++;
3372 mtd
->ecc_stats
.corrected
+= stat
;
3373 max_bitflips
= max_t(unsigned int, max_bitflips
, stat
);
3376 return max_bitflips
;
3380 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
3381 * @mtd: mtd info structure
3382 * @chip: nand chip info structure
3383 * @buf: buffer to store read data
3384 * @oob_required: caller requires OOB data read to chip->oob_poi
3385 * @page: page number to read
3387 * The hw generator calculates the error syndrome automatically. Therefore we
3388 * need a special oob layout and handling.
3390 static int nand_read_page_syndrome(struct mtd_info
*mtd
, struct nand_chip
*chip
,
3391 uint8_t *buf
, int oob_required
, int page
)
3393 int ret
, i
, eccsize
= chip
->ecc
.size
;
3394 int eccbytes
= chip
->ecc
.bytes
;
3395 int eccsteps
= chip
->ecc
.steps
;
3396 int eccpadbytes
= eccbytes
+ chip
->ecc
.prepad
+ chip
->ecc
.postpad
;
3398 uint8_t *oob
= chip
->oob_poi
;
3399 unsigned int max_bitflips
= 0;
3401 ret
= nand_read_page_op(chip
, page
, 0, NULL
, 0);
3405 for (i
= 0; eccsteps
; eccsteps
--, i
+= eccbytes
, p
+= eccsize
) {
3408 chip
->ecc
.hwctl(mtd
, NAND_ECC_READ
);
3410 ret
= nand_read_data_op(chip
, p
, eccsize
, false);
3414 if (chip
->ecc
.prepad
) {
3415 ret
= nand_read_data_op(chip
, oob
, chip
->ecc
.prepad
,
3420 oob
+= chip
->ecc
.prepad
;
3423 chip
->ecc
.hwctl(mtd
, NAND_ECC_READSYN
);
3425 ret
= nand_read_data_op(chip
, oob
, eccbytes
, false);
3429 stat
= chip
->ecc
.correct(mtd
, p
, oob
, NULL
);
3433 if (chip
->ecc
.postpad
) {
3434 ret
= nand_read_data_op(chip
, oob
, chip
->ecc
.postpad
,
3439 oob
+= chip
->ecc
.postpad
;
3442 if (stat
== -EBADMSG
&&
3443 (chip
->ecc
.options
& NAND_ECC_GENERIC_ERASED_CHECK
)) {
3444 /* check for empty pages with bitflips */
3445 stat
= nand_check_erased_ecc_chunk(p
, chip
->ecc
.size
,
3449 chip
->ecc
.strength
);
3453 mtd
->ecc_stats
.failed
++;
3455 mtd
->ecc_stats
.corrected
+= stat
;
3456 max_bitflips
= max_t(unsigned int, max_bitflips
, stat
);
3460 /* Calculate remaining oob bytes */
3461 i
= mtd
->oobsize
- (oob
- chip
->oob_poi
);
3463 ret
= nand_read_data_op(chip
, oob
, i
, false);
3468 return max_bitflips
;
3472 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3473 * @mtd: mtd info structure
3474 * @oob: oob destination address
3475 * @ops: oob ops structure
3476 * @len: size of oob to transfer
3478 static uint8_t *nand_transfer_oob(struct mtd_info
*mtd
, uint8_t *oob
,
3479 struct mtd_oob_ops
*ops
, size_t len
)
3481 struct nand_chip
*chip
= mtd_to_nand(mtd
);
3484 switch (ops
->mode
) {
3486 case MTD_OPS_PLACE_OOB
:
3488 memcpy(oob
, chip
->oob_poi
+ ops
->ooboffs
, len
);
3491 case MTD_OPS_AUTO_OOB
:
3492 ret
= mtd_ooblayout_get_databytes(mtd
, oob
, chip
->oob_poi
,
3504 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3505 * @mtd: MTD device structure
3506 * @retry_mode: the retry mode to use
3508 * Some vendors supply a special command to shift the Vt threshold, to be used
3509 * when there are too many bitflips in a page (i.e., ECC error). After setting
3510 * a new threshold, the host should retry reading the page.
3512 static int nand_setup_read_retry(struct mtd_info
*mtd
, int retry_mode
)
3514 struct nand_chip
*chip
= mtd_to_nand(mtd
);
3516 pr_debug("setting READ RETRY mode %d\n", retry_mode
);
3518 if (retry_mode
>= chip
->read_retries
)
3521 if (!chip
->setup_read_retry
)
3524 return chip
->setup_read_retry(mtd
, retry_mode
);
3528 * nand_do_read_ops - [INTERN] Read data with ECC
3529 * @mtd: MTD device structure
3530 * @from: offset to read from
3531 * @ops: oob ops structure
3533 * Internal function. Called with chip held.
3535 static int nand_do_read_ops(struct mtd_info
*mtd
, loff_t from
,
3536 struct mtd_oob_ops
*ops
)
3538 int chipnr
, page
, realpage
, col
, bytes
, aligned
, oob_required
;
3539 struct nand_chip
*chip
= mtd_to_nand(mtd
);
3541 uint32_t readlen
= ops
->len
;
3542 uint32_t oobreadlen
= ops
->ooblen
;
3543 uint32_t max_oobsize
= mtd_oobavail(mtd
, ops
);
3545 uint8_t *bufpoi
, *oob
, *buf
;
3547 unsigned int max_bitflips
= 0;
3549 bool ecc_fail
= false;
3551 chipnr
= (int)(from
>> chip
->chip_shift
);
3552 chip
->select_chip(mtd
, chipnr
);
3554 realpage
= (int)(from
>> chip
->page_shift
);
3555 page
= realpage
& chip
->pagemask
;
3557 col
= (int)(from
& (mtd
->writesize
- 1));
3561 oob_required
= oob
? 1 : 0;
3564 unsigned int ecc_failures
= mtd
->ecc_stats
.failed
;
3566 bytes
= min(mtd
->writesize
- col
, readlen
);
3567 aligned
= (bytes
== mtd
->writesize
);
3571 else if (chip
->options
& NAND_USE_BOUNCE_BUFFER
)
3572 use_bufpoi
= !virt_addr_valid(buf
) ||
3573 !IS_ALIGNED((unsigned long)buf
,
3578 /* Is the current page in the buffer? */
3579 if (realpage
!= chip
->pagebuf
|| oob
) {
3580 bufpoi
= use_bufpoi
? chip
->data_buf
: buf
;
3582 if (use_bufpoi
&& aligned
)
3583 pr_debug("%s: using read bounce buffer for buf@%p\n",
3588 * Now read the page into the buffer. Absent an error,
3589 * the read methods return max bitflips per ecc step.
3591 if (unlikely(ops
->mode
== MTD_OPS_RAW
))
3592 ret
= chip
->ecc
.read_page_raw(mtd
, chip
, bufpoi
,
3595 else if (!aligned
&& NAND_HAS_SUBPAGE_READ(chip
) &&
3597 ret
= chip
->ecc
.read_subpage(mtd
, chip
,
3601 ret
= chip
->ecc
.read_page(mtd
, chip
, bufpoi
,
3602 oob_required
, page
);
3605 /* Invalidate page cache */
3610 /* Transfer not aligned data */
3612 if (!NAND_HAS_SUBPAGE_READ(chip
) && !oob
&&
3613 !(mtd
->ecc_stats
.failed
- ecc_failures
) &&
3614 (ops
->mode
!= MTD_OPS_RAW
)) {
3615 chip
->pagebuf
= realpage
;
3616 chip
->pagebuf_bitflips
= ret
;
3618 /* Invalidate page cache */
3621 memcpy(buf
, chip
->data_buf
+ col
, bytes
);
3624 if (unlikely(oob
)) {
3625 int toread
= min(oobreadlen
, max_oobsize
);
3628 oob
= nand_transfer_oob(mtd
,
3630 oobreadlen
-= toread
;
3634 if (chip
->options
& NAND_NEED_READRDY
) {
3635 /* Apply delay or wait for ready/busy pin */
3636 if (!chip
->dev_ready
)
3637 udelay(chip
->chip_delay
);
3639 nand_wait_ready(mtd
);
3642 if (mtd
->ecc_stats
.failed
- ecc_failures
) {
3643 if (retry_mode
+ 1 < chip
->read_retries
) {
3645 ret
= nand_setup_read_retry(mtd
,
3650 /* Reset failures; retry */
3651 mtd
->ecc_stats
.failed
= ecc_failures
;
3654 /* No more retry modes; real failure */
3660 max_bitflips
= max_t(unsigned int, max_bitflips
, ret
);
3662 memcpy(buf
, chip
->data_buf
+ col
, bytes
);
3664 max_bitflips
= max_t(unsigned int, max_bitflips
,
3665 chip
->pagebuf_bitflips
);
3670 /* Reset to retry mode 0 */
3672 ret
= nand_setup_read_retry(mtd
, 0);
3681 /* For subsequent reads align to page boundary */
3683 /* Increment page address */
3686 page
= realpage
& chip
->pagemask
;
3687 /* Check, if we cross a chip boundary */
3690 chip
->select_chip(mtd
, -1);
3691 chip
->select_chip(mtd
, chipnr
);
3694 chip
->select_chip(mtd
, -1);
3696 ops
->retlen
= ops
->len
- (size_t) readlen
;
3698 ops
->oobretlen
= ops
->ooblen
- oobreadlen
;
3706 return max_bitflips
;
3710 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3711 * @mtd: mtd info structure
3712 * @chip: nand chip info structure
3713 * @page: page number to read
3715 int nand_read_oob_std(struct mtd_info
*mtd
, struct nand_chip
*chip
, int page
)
3717 return nand_read_oob_op(chip
, page
, 0, chip
->oob_poi
, mtd
->oobsize
);
3719 EXPORT_SYMBOL(nand_read_oob_std
);
3722 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3724 * @mtd: mtd info structure
3725 * @chip: nand chip info structure
3726 * @page: page number to read
3728 int nand_read_oob_syndrome(struct mtd_info
*mtd
, struct nand_chip
*chip
,
3731 int length
= mtd
->oobsize
;
3732 int chunk
= chip
->ecc
.bytes
+ chip
->ecc
.prepad
+ chip
->ecc
.postpad
;
3733 int eccsize
= chip
->ecc
.size
;
3734 uint8_t *bufpoi
= chip
->oob_poi
;
3735 int i
, toread
, sndrnd
= 0, pos
, ret
;
3737 ret
= nand_read_page_op(chip
, page
, chip
->ecc
.size
, NULL
, 0);
3741 for (i
= 0; i
< chip
->ecc
.steps
; i
++) {
3745 pos
= eccsize
+ i
* (eccsize
+ chunk
);
3746 if (mtd
->writesize
> 512)
3747 ret
= nand_change_read_column_op(chip
, pos
,
3751 ret
= nand_read_page_op(chip
, page
, pos
, NULL
,
3758 toread
= min_t(int, length
, chunk
);
3760 ret
= nand_read_data_op(chip
, bufpoi
, toread
, false);
3768 ret
= nand_read_data_op(chip
, bufpoi
, length
, false);
3775 EXPORT_SYMBOL(nand_read_oob_syndrome
);
3778 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3779 * @mtd: mtd info structure
3780 * @chip: nand chip info structure
3781 * @page: page number to write
3783 int nand_write_oob_std(struct mtd_info
*mtd
, struct nand_chip
*chip
, int page
)
3785 return nand_prog_page_op(chip
, page
, mtd
->writesize
, chip
->oob_poi
,
3788 EXPORT_SYMBOL(nand_write_oob_std
);
3791 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3792 * with syndrome - only for large page flash
3793 * @mtd: mtd info structure
3794 * @chip: nand chip info structure
3795 * @page: page number to write
3797 int nand_write_oob_syndrome(struct mtd_info
*mtd
, struct nand_chip
*chip
,
3800 int chunk
= chip
->ecc
.bytes
+ chip
->ecc
.prepad
+ chip
->ecc
.postpad
;
3801 int eccsize
= chip
->ecc
.size
, length
= mtd
->oobsize
;
3802 int ret
, i
, len
, pos
, sndcmd
= 0, steps
= chip
->ecc
.steps
;
3803 const uint8_t *bufpoi
= chip
->oob_poi
;
3806 * data-ecc-data-ecc ... ecc-oob
3808 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3810 if (!chip
->ecc
.prepad
&& !chip
->ecc
.postpad
) {
3811 pos
= steps
* (eccsize
+ chunk
);
3816 ret
= nand_prog_page_begin_op(chip
, page
, pos
, NULL
, 0);
3820 for (i
= 0; i
< steps
; i
++) {
3822 if (mtd
->writesize
<= 512) {
3823 uint32_t fill
= 0xFFFFFFFF;
3827 int num
= min_t(int, len
, 4);
3829 ret
= nand_write_data_op(chip
, &fill
,
3837 pos
= eccsize
+ i
* (eccsize
+ chunk
);
3838 ret
= nand_change_write_column_op(chip
, pos
,
3846 len
= min_t(int, length
, chunk
);
3848 ret
= nand_write_data_op(chip
, bufpoi
, len
, false);
3856 ret
= nand_write_data_op(chip
, bufpoi
, length
, false);
3861 return nand_prog_page_end_op(chip
);
3863 EXPORT_SYMBOL(nand_write_oob_syndrome
);
3866 * nand_do_read_oob - [INTERN] NAND read out-of-band
3867 * @mtd: MTD device structure
3868 * @from: offset to read from
3869 * @ops: oob operations description structure
3871 * NAND read out-of-band data from the spare area.
3873 static int nand_do_read_oob(struct mtd_info
*mtd
, loff_t from
,
3874 struct mtd_oob_ops
*ops
)
3876 unsigned int max_bitflips
= 0;
3877 int page
, realpage
, chipnr
;
3878 struct nand_chip
*chip
= mtd_to_nand(mtd
);
3879 struct mtd_ecc_stats stats
;
3880 int readlen
= ops
->ooblen
;
3882 uint8_t *buf
= ops
->oobbuf
;
3885 pr_debug("%s: from = 0x%08Lx, len = %i\n",
3886 __func__
, (unsigned long long)from
, readlen
);
3888 stats
= mtd
->ecc_stats
;
3890 len
= mtd_oobavail(mtd
, ops
);
3892 chipnr
= (int)(from
>> chip
->chip_shift
);
3893 chip
->select_chip(mtd
, chipnr
);
3895 /* Shift to get page */
3896 realpage
= (int)(from
>> chip
->page_shift
);
3897 page
= realpage
& chip
->pagemask
;
3900 if (ops
->mode
== MTD_OPS_RAW
)
3901 ret
= chip
->ecc
.read_oob_raw(mtd
, chip
, page
);
3903 ret
= chip
->ecc
.read_oob(mtd
, chip
, page
);
3908 len
= min(len
, readlen
);
3909 buf
= nand_transfer_oob(mtd
, buf
, ops
, len
);
3911 if (chip
->options
& NAND_NEED_READRDY
) {
3912 /* Apply delay or wait for ready/busy pin */
3913 if (!chip
->dev_ready
)
3914 udelay(chip
->chip_delay
);
3916 nand_wait_ready(mtd
);
3919 max_bitflips
= max_t(unsigned int, max_bitflips
, ret
);
3925 /* Increment page address */
3928 page
= realpage
& chip
->pagemask
;
3929 /* Check, if we cross a chip boundary */
3932 chip
->select_chip(mtd
, -1);
3933 chip
->select_chip(mtd
, chipnr
);
3936 chip
->select_chip(mtd
, -1);
3938 ops
->oobretlen
= ops
->ooblen
- readlen
;
3943 if (mtd
->ecc_stats
.failed
- stats
.failed
)
3946 return max_bitflips
;
3950 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3951 * @mtd: MTD device structure
3952 * @from: offset to read from
3953 * @ops: oob operation description structure
3955 * NAND read data and/or out-of-band data.
3957 static int nand_read_oob(struct mtd_info
*mtd
, loff_t from
,
3958 struct mtd_oob_ops
*ops
)
3964 if (ops
->mode
!= MTD_OPS_PLACE_OOB
&&
3965 ops
->mode
!= MTD_OPS_AUTO_OOB
&&
3966 ops
->mode
!= MTD_OPS_RAW
)
3969 nand_get_device(mtd
, FL_READING
);
3972 ret
= nand_do_read_oob(mtd
, from
, ops
);
3974 ret
= nand_do_read_ops(mtd
, from
, ops
);
3976 nand_release_device(mtd
);
3981 * nand_write_page_raw_notsupp - dummy raw page write function
3982 * @mtd: mtd info structure
3983 * @chip: nand chip info structure
3985 * @oob_required: must write chip->oob_poi to OOB
3986 * @page: page number to write
3988 * Returns -ENOTSUPP unconditionally.
3990 int nand_write_page_raw_notsupp(struct mtd_info
*mtd
, struct nand_chip
*chip
,
3991 const u8
*buf
, int oob_required
, int page
)
3995 EXPORT_SYMBOL(nand_write_page_raw_notsupp
);
3998 * nand_write_page_raw - [INTERN] raw page write function
3999 * @mtd: mtd info structure
4000 * @chip: nand chip info structure
4002 * @oob_required: must write chip->oob_poi to OOB
4003 * @page: page number to write
4005 * Not for syndrome calculating ECC controllers, which use a special oob layout.
4007 int nand_write_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
4008 const uint8_t *buf
, int oob_required
, int page
)
4012 ret
= nand_prog_page_begin_op(chip
, page
, 0, buf
, mtd
->writesize
);
4017 ret
= nand_write_data_op(chip
, chip
->oob_poi
, mtd
->oobsize
,
4023 return nand_prog_page_end_op(chip
);
4025 EXPORT_SYMBOL(nand_write_page_raw
);
4028 * nand_write_page_raw_syndrome - [INTERN] raw page write function
4029 * @mtd: mtd info structure
4030 * @chip: nand chip info structure
4032 * @oob_required: must write chip->oob_poi to OOB
4033 * @page: page number to write
4035 * We need a special oob layout and handling even when ECC isn't checked.
4037 static int nand_write_page_raw_syndrome(struct mtd_info
*mtd
,
4038 struct nand_chip
*chip
,
4039 const uint8_t *buf
, int oob_required
,
4042 int eccsize
= chip
->ecc
.size
;
4043 int eccbytes
= chip
->ecc
.bytes
;
4044 uint8_t *oob
= chip
->oob_poi
;
4045 int steps
, size
, ret
;
4047 ret
= nand_prog_page_begin_op(chip
, page
, 0, NULL
, 0);
4051 for (steps
= chip
->ecc
.steps
; steps
> 0; steps
--) {
4052 ret
= nand_write_data_op(chip
, buf
, eccsize
, false);
4058 if (chip
->ecc
.prepad
) {
4059 ret
= nand_write_data_op(chip
, oob
, chip
->ecc
.prepad
,
4064 oob
+= chip
->ecc
.prepad
;
4067 ret
= nand_write_data_op(chip
, oob
, eccbytes
, false);
4073 if (chip
->ecc
.postpad
) {
4074 ret
= nand_write_data_op(chip
, oob
, chip
->ecc
.postpad
,
4079 oob
+= chip
->ecc
.postpad
;
4083 size
= mtd
->oobsize
- (oob
- chip
->oob_poi
);
4085 ret
= nand_write_data_op(chip
, oob
, size
, false);
4090 return nand_prog_page_end_op(chip
);
4093 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
4094 * @mtd: mtd info structure
4095 * @chip: nand chip info structure
4097 * @oob_required: must write chip->oob_poi to OOB
4098 * @page: page number to write
4100 static int nand_write_page_swecc(struct mtd_info
*mtd
, struct nand_chip
*chip
,
4101 const uint8_t *buf
, int oob_required
,
4104 int i
, eccsize
= chip
->ecc
.size
, ret
;
4105 int eccbytes
= chip
->ecc
.bytes
;
4106 int eccsteps
= chip
->ecc
.steps
;
4107 uint8_t *ecc_calc
= chip
->ecc
.calc_buf
;
4108 const uint8_t *p
= buf
;
4110 /* Software ECC calculation */
4111 for (i
= 0; eccsteps
; eccsteps
--, i
+= eccbytes
, p
+= eccsize
)
4112 chip
->ecc
.calculate(mtd
, p
, &ecc_calc
[i
]);
4114 ret
= mtd_ooblayout_set_eccbytes(mtd
, ecc_calc
, chip
->oob_poi
, 0,
4119 return chip
->ecc
.write_page_raw(mtd
, chip
, buf
, 1, page
);
4123 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
4124 * @mtd: mtd info structure
4125 * @chip: nand chip info structure
4127 * @oob_required: must write chip->oob_poi to OOB
4128 * @page: page number to write
4130 static int nand_write_page_hwecc(struct mtd_info
*mtd
, struct nand_chip
*chip
,
4131 const uint8_t *buf
, int oob_required
,
4134 int i
, eccsize
= chip
->ecc
.size
, ret
;
4135 int eccbytes
= chip
->ecc
.bytes
;
4136 int eccsteps
= chip
->ecc
.steps
;
4137 uint8_t *ecc_calc
= chip
->ecc
.calc_buf
;
4138 const uint8_t *p
= buf
;
4140 ret
= nand_prog_page_begin_op(chip
, page
, 0, NULL
, 0);
4144 for (i
= 0; eccsteps
; eccsteps
--, i
+= eccbytes
, p
+= eccsize
) {
4145 chip
->ecc
.hwctl(mtd
, NAND_ECC_WRITE
);
4147 ret
= nand_write_data_op(chip
, p
, eccsize
, false);
4151 chip
->ecc
.calculate(mtd
, p
, &ecc_calc
[i
]);
4154 ret
= mtd_ooblayout_set_eccbytes(mtd
, ecc_calc
, chip
->oob_poi
, 0,
4159 ret
= nand_write_data_op(chip
, chip
->oob_poi
, mtd
->oobsize
, false);
4163 return nand_prog_page_end_op(chip
);
4168 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
4169 * @mtd: mtd info structure
4170 * @chip: nand chip info structure
4171 * @offset: column address of subpage within the page
4172 * @data_len: data length
4174 * @oob_required: must write chip->oob_poi to OOB
4175 * @page: page number to write
4177 static int nand_write_subpage_hwecc(struct mtd_info
*mtd
,
4178 struct nand_chip
*chip
, uint32_t offset
,
4179 uint32_t data_len
, const uint8_t *buf
,
4180 int oob_required
, int page
)
4182 uint8_t *oob_buf
= chip
->oob_poi
;
4183 uint8_t *ecc_calc
= chip
->ecc
.calc_buf
;
4184 int ecc_size
= chip
->ecc
.size
;
4185 int ecc_bytes
= chip
->ecc
.bytes
;
4186 int ecc_steps
= chip
->ecc
.steps
;
4187 uint32_t start_step
= offset
/ ecc_size
;
4188 uint32_t end_step
= (offset
+ data_len
- 1) / ecc_size
;
4189 int oob_bytes
= mtd
->oobsize
/ ecc_steps
;
4192 ret
= nand_prog_page_begin_op(chip
, page
, 0, NULL
, 0);
4196 for (step
= 0; step
< ecc_steps
; step
++) {
4197 /* configure controller for WRITE access */
4198 chip
->ecc
.hwctl(mtd
, NAND_ECC_WRITE
);
4200 /* write data (untouched subpages already masked by 0xFF) */
4201 ret
= nand_write_data_op(chip
, buf
, ecc_size
, false);
4205 /* mask ECC of un-touched subpages by padding 0xFF */
4206 if ((step
< start_step
) || (step
> end_step
))
4207 memset(ecc_calc
, 0xff, ecc_bytes
);
4209 chip
->ecc
.calculate(mtd
, buf
, ecc_calc
);
4211 /* mask OOB of un-touched subpages by padding 0xFF */
4212 /* if oob_required, preserve OOB metadata of written subpage */
4213 if (!oob_required
|| (step
< start_step
) || (step
> end_step
))
4214 memset(oob_buf
, 0xff, oob_bytes
);
4217 ecc_calc
+= ecc_bytes
;
4218 oob_buf
+= oob_bytes
;
4221 /* copy calculated ECC for whole page to chip->buffer->oob */
4222 /* this include masked-value(0xFF) for unwritten subpages */
4223 ecc_calc
= chip
->ecc
.calc_buf
;
4224 ret
= mtd_ooblayout_set_eccbytes(mtd
, ecc_calc
, chip
->oob_poi
, 0,
4229 /* write OOB buffer to NAND device */
4230 ret
= nand_write_data_op(chip
, chip
->oob_poi
, mtd
->oobsize
, false);
4234 return nand_prog_page_end_op(chip
);
4239 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
4240 * @mtd: mtd info structure
4241 * @chip: nand chip info structure
4243 * @oob_required: must write chip->oob_poi to OOB
4244 * @page: page number to write
4246 * The hw generator calculates the error syndrome automatically. Therefore we
4247 * need a special oob layout and handling.
4249 static int nand_write_page_syndrome(struct mtd_info
*mtd
,
4250 struct nand_chip
*chip
,
4251 const uint8_t *buf
, int oob_required
,
4254 int i
, eccsize
= chip
->ecc
.size
;
4255 int eccbytes
= chip
->ecc
.bytes
;
4256 int eccsteps
= chip
->ecc
.steps
;
4257 const uint8_t *p
= buf
;
4258 uint8_t *oob
= chip
->oob_poi
;
4261 ret
= nand_prog_page_begin_op(chip
, page
, 0, NULL
, 0);
4265 for (i
= 0; eccsteps
; eccsteps
--, i
+= eccbytes
, p
+= eccsize
) {
4266 chip
->ecc
.hwctl(mtd
, NAND_ECC_WRITE
);
4268 ret
= nand_write_data_op(chip
, p
, eccsize
, false);
4272 if (chip
->ecc
.prepad
) {
4273 ret
= nand_write_data_op(chip
, oob
, chip
->ecc
.prepad
,
4278 oob
+= chip
->ecc
.prepad
;
4281 chip
->ecc
.calculate(mtd
, p
, oob
);
4283 ret
= nand_write_data_op(chip
, oob
, eccbytes
, false);
4289 if (chip
->ecc
.postpad
) {
4290 ret
= nand_write_data_op(chip
, oob
, chip
->ecc
.postpad
,
4295 oob
+= chip
->ecc
.postpad
;
4299 /* Calculate remaining oob bytes */
4300 i
= mtd
->oobsize
- (oob
- chip
->oob_poi
);
4302 ret
= nand_write_data_op(chip
, oob
, i
, false);
4307 return nand_prog_page_end_op(chip
);
4311 * nand_write_page - write one page
4312 * @mtd: MTD device structure
4313 * @chip: NAND chip descriptor
4314 * @offset: address offset within the page
4315 * @data_len: length of actual data to be written
4316 * @buf: the data to write
4317 * @oob_required: must write chip->oob_poi to OOB
4318 * @page: page number to write
4319 * @raw: use _raw version of write_page
4321 static int nand_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
4322 uint32_t offset
, int data_len
, const uint8_t *buf
,
4323 int oob_required
, int page
, int raw
)
4325 int status
, subpage
;
4327 if (!(chip
->options
& NAND_NO_SUBPAGE_WRITE
) &&
4328 chip
->ecc
.write_subpage
)
4329 subpage
= offset
|| (data_len
< mtd
->writesize
);
4334 status
= chip
->ecc
.write_page_raw(mtd
, chip
, buf
,
4335 oob_required
, page
);
4337 status
= chip
->ecc
.write_subpage(mtd
, chip
, offset
, data_len
,
4338 buf
, oob_required
, page
);
4340 status
= chip
->ecc
.write_page(mtd
, chip
, buf
, oob_required
,
4350 * nand_fill_oob - [INTERN] Transfer client buffer to oob
4351 * @mtd: MTD device structure
4352 * @oob: oob data buffer
4353 * @len: oob data write length
4354 * @ops: oob ops structure
4356 static uint8_t *nand_fill_oob(struct mtd_info
*mtd
, uint8_t *oob
, size_t len
,
4357 struct mtd_oob_ops
*ops
)
4359 struct nand_chip
*chip
= mtd_to_nand(mtd
);
4363 * Initialise to all 0xFF, to avoid the possibility of left over OOB
4364 * data from a previous OOB read.
4366 memset(chip
->oob_poi
, 0xff, mtd
->oobsize
);
4368 switch (ops
->mode
) {
4370 case MTD_OPS_PLACE_OOB
:
4372 memcpy(chip
->oob_poi
+ ops
->ooboffs
, oob
, len
);
4375 case MTD_OPS_AUTO_OOB
:
4376 ret
= mtd_ooblayout_set_databytes(mtd
, oob
, chip
->oob_poi
,
4387 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
4390 * nand_do_write_ops - [INTERN] NAND write with ECC
4391 * @mtd: MTD device structure
4392 * @to: offset to write to
4393 * @ops: oob operations description structure
4395 * NAND write with ECC.
4397 static int nand_do_write_ops(struct mtd_info
*mtd
, loff_t to
,
4398 struct mtd_oob_ops
*ops
)
4400 int chipnr
, realpage
, page
, column
;
4401 struct nand_chip
*chip
= mtd_to_nand(mtd
);
4402 uint32_t writelen
= ops
->len
;
4404 uint32_t oobwritelen
= ops
->ooblen
;
4405 uint32_t oobmaxlen
= mtd_oobavail(mtd
, ops
);
4407 uint8_t *oob
= ops
->oobbuf
;
4408 uint8_t *buf
= ops
->datbuf
;
4410 int oob_required
= oob
? 1 : 0;
4416 /* Reject writes, which are not page aligned */
4417 if (NOTALIGNED(to
) || NOTALIGNED(ops
->len
)) {
4418 pr_notice("%s: attempt to write non page aligned data\n",
4423 column
= to
& (mtd
->writesize
- 1);
4425 chipnr
= (int)(to
>> chip
->chip_shift
);
4426 chip
->select_chip(mtd
, chipnr
);
4428 /* Check, if it is write protected */
4429 if (nand_check_wp(mtd
)) {
4434 realpage
= (int)(to
>> chip
->page_shift
);
4435 page
= realpage
& chip
->pagemask
;
4437 /* Invalidate the page cache, when we write to the cached page */
4438 if (to
<= ((loff_t
)chip
->pagebuf
<< chip
->page_shift
) &&
4439 ((loff_t
)chip
->pagebuf
<< chip
->page_shift
) < (to
+ ops
->len
))
4442 /* Don't allow multipage oob writes with offset */
4443 if (oob
&& ops
->ooboffs
&& (ops
->ooboffs
+ ops
->ooblen
> oobmaxlen
)) {
4449 int bytes
= mtd
->writesize
;
4450 uint8_t *wbuf
= buf
;
4452 int part_pagewr
= (column
|| writelen
< mtd
->writesize
);
4456 else if (chip
->options
& NAND_USE_BOUNCE_BUFFER
)
4457 use_bufpoi
= !virt_addr_valid(buf
) ||
4458 !IS_ALIGNED((unsigned long)buf
,
4463 /* Partial page write?, or need to use bounce buffer */
4465 pr_debug("%s: using write bounce buffer for buf@%p\n",
4468 bytes
= min_t(int, bytes
- column
, writelen
);
4470 memset(chip
->data_buf
, 0xff, mtd
->writesize
);
4471 memcpy(&chip
->data_buf
[column
], buf
, bytes
);
4472 wbuf
= chip
->data_buf
;
4475 if (unlikely(oob
)) {
4476 size_t len
= min(oobwritelen
, oobmaxlen
);
4477 oob
= nand_fill_oob(mtd
, oob
, len
, ops
);
4480 /* We still need to erase leftover OOB data */
4481 memset(chip
->oob_poi
, 0xff, mtd
->oobsize
);
4484 ret
= nand_write_page(mtd
, chip
, column
, bytes
, wbuf
,
4486 (ops
->mode
== MTD_OPS_RAW
));
4498 page
= realpage
& chip
->pagemask
;
4499 /* Check, if we cross a chip boundary */
4502 chip
->select_chip(mtd
, -1);
4503 chip
->select_chip(mtd
, chipnr
);
4507 ops
->retlen
= ops
->len
- writelen
;
4509 ops
->oobretlen
= ops
->ooblen
;
4512 chip
->select_chip(mtd
, -1);
4517 * panic_nand_write - [MTD Interface] NAND write with ECC
4518 * @mtd: MTD device structure
4519 * @to: offset to write to
4520 * @len: number of bytes to write
4521 * @retlen: pointer to variable to store the number of written bytes
4522 * @buf: the data to write
4524 * NAND write with ECC. Used when performing writes in interrupt context, this
4525 * may for example be called by mtdoops when writing an oops while in panic.
4527 static int panic_nand_write(struct mtd_info
*mtd
, loff_t to
, size_t len
,
4528 size_t *retlen
, const uint8_t *buf
)
4530 struct nand_chip
*chip
= mtd_to_nand(mtd
);
4531 int chipnr
= (int)(to
>> chip
->chip_shift
);
4532 struct mtd_oob_ops ops
;
4535 /* Grab the device */
4536 panic_nand_get_device(chip
, mtd
, FL_WRITING
);
4538 chip
->select_chip(mtd
, chipnr
);
4540 /* Wait for the device to get ready */
4541 panic_nand_wait(mtd
, chip
, 400);
4543 memset(&ops
, 0, sizeof(ops
));
4545 ops
.datbuf
= (uint8_t *)buf
;
4546 ops
.mode
= MTD_OPS_PLACE_OOB
;
4548 ret
= nand_do_write_ops(mtd
, to
, &ops
);
4550 *retlen
= ops
.retlen
;
4555 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
4556 * @mtd: MTD device structure
4557 * @to: offset to write to
4558 * @ops: oob operation description structure
4560 * NAND write out-of-band.
4562 static int nand_do_write_oob(struct mtd_info
*mtd
, loff_t to
,
4563 struct mtd_oob_ops
*ops
)
4565 int chipnr
, page
, status
, len
;
4566 struct nand_chip
*chip
= mtd_to_nand(mtd
);
4568 pr_debug("%s: to = 0x%08x, len = %i\n",
4569 __func__
, (unsigned int)to
, (int)ops
->ooblen
);
4571 len
= mtd_oobavail(mtd
, ops
);
4573 /* Do not allow write past end of page */
4574 if ((ops
->ooboffs
+ ops
->ooblen
) > len
) {
4575 pr_debug("%s: attempt to write past end of page\n",
4580 chipnr
= (int)(to
>> chip
->chip_shift
);
4583 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
4584 * of my DiskOnChip 2000 test units) will clear the whole data page too
4585 * if we don't do this. I have no clue why, but I seem to have 'fixed'
4586 * it in the doc2000 driver in August 1999. dwmw2.
4588 nand_reset(chip
, chipnr
);
4590 chip
->select_chip(mtd
, chipnr
);
4592 /* Shift to get page */
4593 page
= (int)(to
>> chip
->page_shift
);
4595 /* Check, if it is write protected */
4596 if (nand_check_wp(mtd
)) {
4597 chip
->select_chip(mtd
, -1);
4601 /* Invalidate the page cache, if we write to the cached page */
4602 if (page
== chip
->pagebuf
)
4605 nand_fill_oob(mtd
, ops
->oobbuf
, ops
->ooblen
, ops
);
4607 if (ops
->mode
== MTD_OPS_RAW
)
4608 status
= chip
->ecc
.write_oob_raw(mtd
, chip
, page
& chip
->pagemask
);
4610 status
= chip
->ecc
.write_oob(mtd
, chip
, page
& chip
->pagemask
);
4612 chip
->select_chip(mtd
, -1);
4617 ops
->oobretlen
= ops
->ooblen
;
4623 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4624 * @mtd: MTD device structure
4625 * @to: offset to write to
4626 * @ops: oob operation description structure
4628 static int nand_write_oob(struct mtd_info
*mtd
, loff_t to
,
4629 struct mtd_oob_ops
*ops
)
4631 int ret
= -ENOTSUPP
;
4635 nand_get_device(mtd
, FL_WRITING
);
4637 switch (ops
->mode
) {
4638 case MTD_OPS_PLACE_OOB
:
4639 case MTD_OPS_AUTO_OOB
:
4648 ret
= nand_do_write_oob(mtd
, to
, ops
);
4650 ret
= nand_do_write_ops(mtd
, to
, ops
);
4653 nand_release_device(mtd
);
4658 * single_erase - [GENERIC] NAND standard block erase command function
4659 * @mtd: MTD device structure
4660 * @page: the page address of the block which will be erased
4662 * Standard erase command for NAND chips. Returns NAND status.
4664 static int single_erase(struct mtd_info
*mtd
, int page
)
4666 struct nand_chip
*chip
= mtd_to_nand(mtd
);
4667 unsigned int eraseblock
;
4669 /* Send commands to erase a block */
4670 eraseblock
= page
>> (chip
->phys_erase_shift
- chip
->page_shift
);
4672 return nand_erase_op(chip
, eraseblock
);
4676 * nand_erase - [MTD Interface] erase block(s)
4677 * @mtd: MTD device structure
4678 * @instr: erase instruction
4680 * Erase one ore more blocks.
4682 static int nand_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
4684 return nand_erase_nand(mtd
, instr
, 0);
4688 * nand_erase_nand - [INTERN] erase block(s)
4689 * @mtd: MTD device structure
4690 * @instr: erase instruction
4691 * @allowbbt: allow erasing the bbt area
4693 * Erase one ore more blocks.
4695 int nand_erase_nand(struct mtd_info
*mtd
, struct erase_info
*instr
,
4698 int page
, status
, pages_per_block
, ret
, chipnr
;
4699 struct nand_chip
*chip
= mtd_to_nand(mtd
);
4702 pr_debug("%s: start = 0x%012llx, len = %llu\n",
4703 __func__
, (unsigned long long)instr
->addr
,
4704 (unsigned long long)instr
->len
);
4706 if (check_offs_len(mtd
, instr
->addr
, instr
->len
))
4709 /* Grab the lock and see if the device is available */
4710 nand_get_device(mtd
, FL_ERASING
);
4712 /* Shift to get first page */
4713 page
= (int)(instr
->addr
>> chip
->page_shift
);
4714 chipnr
= (int)(instr
->addr
>> chip
->chip_shift
);
4716 /* Calculate pages in each block */
4717 pages_per_block
= 1 << (chip
->phys_erase_shift
- chip
->page_shift
);
4719 /* Select the NAND device */
4720 chip
->select_chip(mtd
, chipnr
);
4722 /* Check, if it is write protected */
4723 if (nand_check_wp(mtd
)) {
4724 pr_debug("%s: device is write protected!\n",
4730 /* Loop through the pages */
4734 /* Check if we have a bad block, we do not erase bad blocks! */
4735 if (nand_block_checkbad(mtd
, ((loff_t
) page
) <<
4736 chip
->page_shift
, allowbbt
)) {
4737 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4744 * Invalidate the page cache, if we erase the block which
4745 * contains the current cached page.
4747 if (page
<= chip
->pagebuf
&& chip
->pagebuf
<
4748 (page
+ pages_per_block
))
4751 status
= chip
->erase(mtd
, page
& chip
->pagemask
);
4753 /* See if block erase succeeded */
4755 pr_debug("%s: failed erase, page 0x%08x\n",
4759 ((loff_t
)page
<< chip
->page_shift
);
4763 /* Increment page address and decrement length */
4764 len
-= (1ULL << chip
->phys_erase_shift
);
4765 page
+= pages_per_block
;
4767 /* Check, if we cross a chip boundary */
4768 if (len
&& !(page
& chip
->pagemask
)) {
4770 chip
->select_chip(mtd
, -1);
4771 chip
->select_chip(mtd
, chipnr
);
4778 /* Deselect and wake up anyone waiting on the device */
4779 chip
->select_chip(mtd
, -1);
4780 nand_release_device(mtd
);
4782 /* Return more or less happy */
4787 * nand_sync - [MTD Interface] sync
4788 * @mtd: MTD device structure
4790 * Sync is actually a wait for chip ready function.
4792 static void nand_sync(struct mtd_info
*mtd
)
4794 pr_debug("%s: called\n", __func__
);
4796 /* Grab the lock and see if the device is available */
4797 nand_get_device(mtd
, FL_SYNCING
);
4798 /* Release it and go back */
4799 nand_release_device(mtd
);
4803 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4804 * @mtd: MTD device structure
4805 * @offs: offset relative to mtd start
4807 static int nand_block_isbad(struct mtd_info
*mtd
, loff_t offs
)
4809 struct nand_chip
*chip
= mtd_to_nand(mtd
);
4810 int chipnr
= (int)(offs
>> chip
->chip_shift
);
4813 /* Select the NAND device */
4814 nand_get_device(mtd
, FL_READING
);
4815 chip
->select_chip(mtd
, chipnr
);
4817 ret
= nand_block_checkbad(mtd
, offs
, 0);
4819 chip
->select_chip(mtd
, -1);
4820 nand_release_device(mtd
);
4826 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4827 * @mtd: MTD device structure
4828 * @ofs: offset relative to mtd start
4830 static int nand_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
4834 ret
= nand_block_isbad(mtd
, ofs
);
4836 /* If it was bad already, return success and do nothing */
4842 return nand_block_markbad_lowlevel(mtd
, ofs
);
4846 * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
4847 * @mtd: MTD device structure
4848 * @ofs: offset relative to mtd start
4849 * @len: length of mtd
4851 static int nand_max_bad_blocks(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
4853 struct nand_chip
*chip
= mtd_to_nand(mtd
);
4854 u32 part_start_block
;
4860 * max_bb_per_die and blocks_per_die used to determine
4861 * the maximum bad block count.
4863 if (!chip
->max_bb_per_die
|| !chip
->blocks_per_die
)
4866 /* Get the start and end of the partition in erase blocks. */
4867 part_start_block
= mtd_div_by_eb(ofs
, mtd
);
4868 part_end_block
= mtd_div_by_eb(len
, mtd
) + part_start_block
- 1;
4870 /* Get the start and end LUNs of the partition. */
4871 part_start_die
= part_start_block
/ chip
->blocks_per_die
;
4872 part_end_die
= part_end_block
/ chip
->blocks_per_die
;
4875 * Look up the bad blocks per unit and multiply by the number of units
4876 * that the partition spans.
4878 return chip
->max_bb_per_die
* (part_end_die
- part_start_die
+ 1);
4882 * nand_default_set_features- [REPLACEABLE] set NAND chip features
4883 * @mtd: MTD device structure
4884 * @chip: nand chip info structure
4885 * @addr: feature address.
4886 * @subfeature_param: the subfeature parameters, a four bytes array.
4888 static int nand_default_set_features(struct mtd_info
*mtd
,
4889 struct nand_chip
*chip
, int addr
,
4890 uint8_t *subfeature_param
)
4892 return nand_set_features_op(chip
, addr
, subfeature_param
);
4896 * nand_default_get_features- [REPLACEABLE] get NAND chip features
4897 * @mtd: MTD device structure
4898 * @chip: nand chip info structure
4899 * @addr: feature address.
4900 * @subfeature_param: the subfeature parameters, a four bytes array.
4902 static int nand_default_get_features(struct mtd_info
*mtd
,
4903 struct nand_chip
*chip
, int addr
,
4904 uint8_t *subfeature_param
)
4906 return nand_get_features_op(chip
, addr
, subfeature_param
);
4910 * nand_get_set_features_notsupp - set/get features stub returning -ENOTSUPP
4911 * @mtd: MTD device structure
4912 * @chip: nand chip info structure
4913 * @addr: feature address.
4914 * @subfeature_param: the subfeature parameters, a four bytes array.
4916 * Should be used by NAND controller drivers that do not support the SET/GET
4917 * FEATURES operations.
4919 int nand_get_set_features_notsupp(struct mtd_info
*mtd
, struct nand_chip
*chip
,
4920 int addr
, u8
*subfeature_param
)
4924 EXPORT_SYMBOL(nand_get_set_features_notsupp
);
4927 * nand_suspend - [MTD Interface] Suspend the NAND flash
4928 * @mtd: MTD device structure
4930 static int nand_suspend(struct mtd_info
*mtd
)
4932 return nand_get_device(mtd
, FL_PM_SUSPENDED
);
4936 * nand_resume - [MTD Interface] Resume the NAND flash
4937 * @mtd: MTD device structure
4939 static void nand_resume(struct mtd_info
*mtd
)
4941 struct nand_chip
*chip
= mtd_to_nand(mtd
);
4943 if (chip
->state
== FL_PM_SUSPENDED
)
4944 nand_release_device(mtd
);
4946 pr_err("%s called for a chip which is not in suspended state\n",
4951 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4952 * prevent further operations
4953 * @mtd: MTD device structure
4955 static void nand_shutdown(struct mtd_info
*mtd
)
4957 nand_get_device(mtd
, FL_PM_SUSPENDED
);
4960 /* Set default functions */
4961 static void nand_set_defaults(struct nand_chip
*chip
)
4963 unsigned int busw
= chip
->options
& NAND_BUSWIDTH_16
;
4965 /* check for proper chip_delay setup, set 20us if not */
4966 if (!chip
->chip_delay
)
4967 chip
->chip_delay
= 20;
4969 /* check, if a user supplied command function given */
4970 if (!chip
->cmdfunc
&& !chip
->exec_op
)
4971 chip
->cmdfunc
= nand_command
;
4973 /* check, if a user supplied wait function given */
4974 if (chip
->waitfunc
== NULL
)
4975 chip
->waitfunc
= nand_wait
;
4977 if (!chip
->select_chip
)
4978 chip
->select_chip
= nand_select_chip
;
4980 /* set for ONFI nand */
4981 if (!chip
->set_features
)
4982 chip
->set_features
= nand_default_set_features
;
4983 if (!chip
->get_features
)
4984 chip
->get_features
= nand_default_get_features
;
4986 /* If called twice, pointers that depend on busw may need to be reset */
4987 if (!chip
->read_byte
|| chip
->read_byte
== nand_read_byte
)
4988 chip
->read_byte
= busw
? nand_read_byte16
: nand_read_byte
;
4989 if (!chip
->read_word
)
4990 chip
->read_word
= nand_read_word
;
4991 if (!chip
->block_bad
)
4992 chip
->block_bad
= nand_block_bad
;
4993 if (!chip
->block_markbad
)
4994 chip
->block_markbad
= nand_default_block_markbad
;
4995 if (!chip
->write_buf
|| chip
->write_buf
== nand_write_buf
)
4996 chip
->write_buf
= busw
? nand_write_buf16
: nand_write_buf
;
4997 if (!chip
->write_byte
|| chip
->write_byte
== nand_write_byte
)
4998 chip
->write_byte
= busw
? nand_write_byte16
: nand_write_byte
;
4999 if (!chip
->read_buf
|| chip
->read_buf
== nand_read_buf
)
5000 chip
->read_buf
= busw
? nand_read_buf16
: nand_read_buf
;
5002 if (!chip
->controller
) {
5003 chip
->controller
= &chip
->dummy_controller
;
5004 nand_controller_init(chip
->controller
);
5007 if (!chip
->buf_align
)
5008 chip
->buf_align
= 1;
5011 /* Sanitize ONFI strings so we can safely print them */
5012 static void sanitize_string(uint8_t *s
, size_t len
)
5016 /* Null terminate */
5019 /* Remove non printable chars */
5020 for (i
= 0; i
< len
- 1; i
++) {
5021 if (s
[i
] < ' ' || s
[i
] > 127)
5025 /* Remove trailing spaces */
5029 static u16
onfi_crc16(u16 crc
, u8
const *p
, size_t len
)
5034 for (i
= 0; i
< 8; i
++)
5035 crc
= (crc
<< 1) ^ ((crc
& 0x8000) ? 0x8005 : 0);
5041 /* Parse the Extended Parameter Page. */
5042 static int nand_flash_detect_ext_param_page(struct nand_chip
*chip
,
5043 struct nand_onfi_params
*p
)
5045 struct onfi_ext_param_page
*ep
;
5046 struct onfi_ext_section
*s
;
5047 struct onfi_ext_ecc_info
*ecc
;
5053 len
= le16_to_cpu(p
->ext_param_page_length
) * 16;
5054 ep
= kmalloc(len
, GFP_KERNEL
);
5058 /* Send our own NAND_CMD_PARAM. */
5059 ret
= nand_read_param_page_op(chip
, 0, NULL
, 0);
5063 /* Use the Change Read Column command to skip the ONFI param pages. */
5064 ret
= nand_change_read_column_op(chip
,
5065 sizeof(*p
) * p
->num_of_param_pages
,
5071 if ((onfi_crc16(ONFI_CRC_BASE
, ((uint8_t *)ep
) + 2, len
- 2)
5072 != le16_to_cpu(ep
->crc
))) {
5073 pr_debug("fail in the CRC.\n");
5078 * Check the signature.
5079 * Do not strictly follow the ONFI spec, maybe changed in future.
5081 if (strncmp(ep
->sig
, "EPPS", 4)) {
5082 pr_debug("The signature is invalid.\n");
5086 /* find the ECC section. */
5087 cursor
= (uint8_t *)(ep
+ 1);
5088 for (i
= 0; i
< ONFI_EXT_SECTION_MAX
; i
++) {
5089 s
= ep
->sections
+ i
;
5090 if (s
->type
== ONFI_SECTION_TYPE_2
)
5092 cursor
+= s
->length
* 16;
5094 if (i
== ONFI_EXT_SECTION_MAX
) {
5095 pr_debug("We can not find the ECC section.\n");
5099 /* get the info we want. */
5100 ecc
= (struct onfi_ext_ecc_info
*)cursor
;
5102 if (!ecc
->codeword_size
) {
5103 pr_debug("Invalid codeword size\n");
5107 chip
->ecc_strength_ds
= ecc
->ecc_bits
;
5108 chip
->ecc_step_ds
= 1 << ecc
->codeword_size
;
5117 * Recover data with bit-wise majority
5119 static void nand_bit_wise_majority(const void **srcbufs
,
5120 unsigned int nsrcbufs
,
5122 unsigned int bufsize
)
5126 for (i
= 0; i
< bufsize
; i
++) {
5129 for (j
= 0; j
< 8; j
++) {
5130 unsigned int cnt
= 0;
5132 for (k
= 0; k
< nsrcbufs
; k
++) {
5133 const u8
*srcbuf
= srcbufs
[k
];
5135 if (srcbuf
[i
] & BIT(j
))
5139 if (cnt
> nsrcbufs
/ 2)
5143 ((u8
*)dstbuf
)[i
] = val
;
5148 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
5150 static int nand_flash_detect_onfi(struct nand_chip
*chip
)
5152 struct mtd_info
*mtd
= nand_to_mtd(chip
);
5153 struct nand_onfi_params
*p
;
5154 struct onfi_params
*onfi
;
5155 int onfi_version
= 0;
5159 /* Try ONFI for unknown chip or LP */
5160 ret
= nand_readid_op(chip
, 0x20, id
, sizeof(id
));
5161 if (ret
|| strncmp(id
, "ONFI", 4))
5164 /* ONFI chip: allocate a buffer to hold its parameter page */
5165 p
= kzalloc((sizeof(*p
) * 3), GFP_KERNEL
);
5169 ret
= nand_read_param_page_op(chip
, 0, NULL
, 0);
5172 goto free_onfi_param_page
;
5175 for (i
= 0; i
< 3; i
++) {
5176 ret
= nand_read_data_op(chip
, &p
[i
], sizeof(*p
), true);
5179 goto free_onfi_param_page
;
5182 if (onfi_crc16(ONFI_CRC_BASE
, (u8
*)&p
[i
], 254) ==
5183 le16_to_cpu(p
->crc
)) {
5185 memcpy(p
, &p
[i
], sizeof(*p
));
5191 const void *srcbufs
[3] = {p
, p
+ 1, p
+ 2};
5193 pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n");
5194 nand_bit_wise_majority(srcbufs
, ARRAY_SIZE(srcbufs
), p
,
5197 if (onfi_crc16(ONFI_CRC_BASE
, (u8
*)p
, 254) !=
5198 le16_to_cpu(p
->crc
)) {
5199 pr_err("ONFI parameter recovery failed, aborting\n");
5200 goto free_onfi_param_page
;
5204 if (chip
->manufacturer
.desc
&& chip
->manufacturer
.desc
->ops
&&
5205 chip
->manufacturer
.desc
->ops
->fixup_onfi_param_page
)
5206 chip
->manufacturer
.desc
->ops
->fixup_onfi_param_page(chip
, p
);
5209 val
= le16_to_cpu(p
->revision
);
5210 if (val
& ONFI_VERSION_2_3
)
5212 else if (val
& ONFI_VERSION_2_2
)
5214 else if (val
& ONFI_VERSION_2_1
)
5216 else if (val
& ONFI_VERSION_2_0
)
5218 else if (val
& ONFI_VERSION_1_0
)
5221 if (!onfi_version
) {
5222 pr_info("unsupported ONFI version: %d\n", val
);
5223 goto free_onfi_param_page
;
5226 sanitize_string(p
->manufacturer
, sizeof(p
->manufacturer
));
5227 sanitize_string(p
->model
, sizeof(p
->model
));
5228 chip
->parameters
.model
= kstrdup(p
->model
, GFP_KERNEL
);
5229 if (!chip
->parameters
.model
) {
5231 goto free_onfi_param_page
;
5234 mtd
->writesize
= le32_to_cpu(p
->byte_per_page
);
5237 * pages_per_block and blocks_per_lun may not be a power-of-2 size
5238 * (don't ask me who thought of this...). MTD assumes that these
5239 * dimensions will be power-of-2, so just truncate the remaining area.
5241 mtd
->erasesize
= 1 << (fls(le32_to_cpu(p
->pages_per_block
)) - 1);
5242 mtd
->erasesize
*= mtd
->writesize
;
5244 mtd
->oobsize
= le16_to_cpu(p
->spare_bytes_per_page
);
5246 /* See erasesize comment */
5247 chip
->chipsize
= 1 << (fls(le32_to_cpu(p
->blocks_per_lun
)) - 1);
5248 chip
->chipsize
*= (uint64_t)mtd
->erasesize
* p
->lun_count
;
5249 chip
->bits_per_cell
= p
->bits_per_cell
;
5251 chip
->max_bb_per_die
= le16_to_cpu(p
->bb_per_lun
);
5252 chip
->blocks_per_die
= le32_to_cpu(p
->blocks_per_lun
);
5254 if (le16_to_cpu(p
->features
) & ONFI_FEATURE_16_BIT_BUS
)
5255 chip
->options
|= NAND_BUSWIDTH_16
;
5257 if (p
->ecc_bits
!= 0xff) {
5258 chip
->ecc_strength_ds
= p
->ecc_bits
;
5259 chip
->ecc_step_ds
= 512;
5260 } else if (onfi_version
>= 21 &&
5261 (le16_to_cpu(p
->features
) & ONFI_FEATURE_EXT_PARAM_PAGE
)) {
5264 * The nand_flash_detect_ext_param_page() uses the
5265 * Change Read Column command which maybe not supported
5266 * by the chip->cmdfunc. So try to update the chip->cmdfunc
5267 * now. We do not replace user supplied command function.
5269 if (mtd
->writesize
> 512 && chip
->cmdfunc
== nand_command
)
5270 chip
->cmdfunc
= nand_command_lp
;
5272 /* The Extended Parameter Page is supported since ONFI 2.1. */
5273 if (nand_flash_detect_ext_param_page(chip
, p
))
5274 pr_warn("Failed to detect ONFI extended param page\n");
5276 pr_warn("Could not retrieve ONFI ECC requirements\n");
5279 /* Save some parameters from the parameter page for future use */
5280 if (le16_to_cpu(p
->opt_cmd
) & ONFI_OPT_CMD_SET_GET_FEATURES
) {
5281 chip
->parameters
.supports_set_get_features
= true;
5282 bitmap_set(chip
->parameters
.get_feature_list
,
5283 ONFI_FEATURE_ADDR_TIMING_MODE
, 1);
5284 bitmap_set(chip
->parameters
.set_feature_list
,
5285 ONFI_FEATURE_ADDR_TIMING_MODE
, 1);
5288 onfi
= kzalloc(sizeof(*onfi
), GFP_KERNEL
);
5294 onfi
->version
= onfi_version
;
5295 onfi
->tPROG
= le16_to_cpu(p
->t_prog
);
5296 onfi
->tBERS
= le16_to_cpu(p
->t_bers
);
5297 onfi
->tR
= le16_to_cpu(p
->t_r
);
5298 onfi
->tCCS
= le16_to_cpu(p
->t_ccs
);
5299 onfi
->async_timing_mode
= le16_to_cpu(p
->async_timing_mode
);
5300 onfi
->vendor_revision
= le16_to_cpu(p
->vendor_revision
);
5301 memcpy(onfi
->vendor
, p
->vendor
, sizeof(p
->vendor
));
5302 chip
->parameters
.onfi
= onfi
;
5304 /* Identification done, free the full ONFI parameter page and exit */
5310 kfree(chip
->parameters
.model
);
5311 free_onfi_param_page
:
5318 * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
5320 static int nand_flash_detect_jedec(struct nand_chip
*chip
)
5322 struct mtd_info
*mtd
= nand_to_mtd(chip
);
5323 struct nand_jedec_params
*p
;
5324 struct jedec_ecc_info
*ecc
;
5325 int jedec_version
= 0;
5329 /* Try JEDEC for unknown chip or LP */
5330 ret
= nand_readid_op(chip
, 0x40, id
, sizeof(id
));
5331 if (ret
|| strncmp(id
, "JEDEC", sizeof(id
)))
5334 /* JEDEC chip: allocate a buffer to hold its parameter page */
5335 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
5339 ret
= nand_read_param_page_op(chip
, 0x40, NULL
, 0);
5342 goto free_jedec_param_page
;
5345 for (i
= 0; i
< 3; i
++) {
5346 ret
= nand_read_data_op(chip
, p
, sizeof(*p
), true);
5349 goto free_jedec_param_page
;
5352 if (onfi_crc16(ONFI_CRC_BASE
, (uint8_t *)p
, 510) ==
5353 le16_to_cpu(p
->crc
))
5358 pr_err("Could not find valid JEDEC parameter page; aborting\n");
5359 goto free_jedec_param_page
;
5363 val
= le16_to_cpu(p
->revision
);
5366 else if (val
& (1 << 1))
5367 jedec_version
= 1; /* vendor specific version */
5369 if (!jedec_version
) {
5370 pr_info("unsupported JEDEC version: %d\n", val
);
5371 goto free_jedec_param_page
;
5374 sanitize_string(p
->manufacturer
, sizeof(p
->manufacturer
));
5375 sanitize_string(p
->model
, sizeof(p
->model
));
5376 chip
->parameters
.model
= kstrdup(p
->model
, GFP_KERNEL
);
5377 if (!chip
->parameters
.model
) {
5379 goto free_jedec_param_page
;
5382 mtd
->writesize
= le32_to_cpu(p
->byte_per_page
);
5384 /* Please reference to the comment for nand_flash_detect_onfi. */
5385 mtd
->erasesize
= 1 << (fls(le32_to_cpu(p
->pages_per_block
)) - 1);
5386 mtd
->erasesize
*= mtd
->writesize
;
5388 mtd
->oobsize
= le16_to_cpu(p
->spare_bytes_per_page
);
5390 /* Please reference to the comment for nand_flash_detect_onfi. */
5391 chip
->chipsize
= 1 << (fls(le32_to_cpu(p
->blocks_per_lun
)) - 1);
5392 chip
->chipsize
*= (uint64_t)mtd
->erasesize
* p
->lun_count
;
5393 chip
->bits_per_cell
= p
->bits_per_cell
;
5395 if (le16_to_cpu(p
->features
) & JEDEC_FEATURE_16_BIT_BUS
)
5396 chip
->options
|= NAND_BUSWIDTH_16
;
5399 ecc
= &p
->ecc_info
[0];
5401 if (ecc
->codeword_size
>= 9) {
5402 chip
->ecc_strength_ds
= ecc
->ecc_bits
;
5403 chip
->ecc_step_ds
= 1 << ecc
->codeword_size
;
5405 pr_warn("Invalid codeword size\n");
5408 free_jedec_param_page
:
5414 * nand_id_has_period - Check if an ID string has a given wraparound period
5415 * @id_data: the ID string
5416 * @arrlen: the length of the @id_data array
5417 * @period: the period of repitition
5419 * Check if an ID string is repeated within a given sequence of bytes at
5420 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
5421 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
5422 * if the repetition has a period of @period; otherwise, returns zero.
5424 static int nand_id_has_period(u8
*id_data
, int arrlen
, int period
)
5427 for (i
= 0; i
< period
; i
++)
5428 for (j
= i
+ period
; j
< arrlen
; j
+= period
)
5429 if (id_data
[i
] != id_data
[j
])
5435 * nand_id_len - Get the length of an ID string returned by CMD_READID
5436 * @id_data: the ID string
5437 * @arrlen: the length of the @id_data array
5439 * Returns the length of the ID string, according to known wraparound/trailing
5440 * zero patterns. If no pattern exists, returns the length of the array.
5442 static int nand_id_len(u8
*id_data
, int arrlen
)
5444 int last_nonzero
, period
;
5446 /* Find last non-zero byte */
5447 for (last_nonzero
= arrlen
- 1; last_nonzero
>= 0; last_nonzero
--)
5448 if (id_data
[last_nonzero
])
5452 if (last_nonzero
< 0)
5455 /* Calculate wraparound period */
5456 for (period
= 1; period
< arrlen
; period
++)
5457 if (nand_id_has_period(id_data
, arrlen
, period
))
5460 /* There's a repeated pattern */
5461 if (period
< arrlen
)
5464 /* There are trailing zeros */
5465 if (last_nonzero
< arrlen
- 1)
5466 return last_nonzero
+ 1;
5468 /* No pattern detected */
5472 /* Extract the bits of per cell from the 3rd byte of the extended ID */
5473 static int nand_get_bits_per_cell(u8 cellinfo
)
5477 bits
= cellinfo
& NAND_CI_CELLTYPE_MSK
;
5478 bits
>>= NAND_CI_CELLTYPE_SHIFT
;
5483 * Many new NAND share similar device ID codes, which represent the size of the
5484 * chip. The rest of the parameters must be decoded according to generic or
5485 * manufacturer-specific "extended ID" decoding patterns.
5487 void nand_decode_ext_id(struct nand_chip
*chip
)
5489 struct mtd_info
*mtd
= nand_to_mtd(chip
);
5491 u8
*id_data
= chip
->id
.data
;
5492 /* The 3rd id byte holds MLC / multichip data */
5493 chip
->bits_per_cell
= nand_get_bits_per_cell(id_data
[2]);
5494 /* The 4th id byte is the important one */
5498 mtd
->writesize
= 1024 << (extid
& 0x03);
5501 mtd
->oobsize
= (8 << (extid
& 0x01)) * (mtd
->writesize
>> 9);
5503 /* Calc blocksize. Blocksize is multiples of 64KiB */
5504 mtd
->erasesize
= (64 * 1024) << (extid
& 0x03);
5506 /* Get buswidth information */
5508 chip
->options
|= NAND_BUSWIDTH_16
;
5510 EXPORT_SYMBOL_GPL(nand_decode_ext_id
);
5513 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
5514 * decodes a matching ID table entry and assigns the MTD size parameters for
5517 static void nand_decode_id(struct nand_chip
*chip
, struct nand_flash_dev
*type
)
5519 struct mtd_info
*mtd
= nand_to_mtd(chip
);
5521 mtd
->erasesize
= type
->erasesize
;
5522 mtd
->writesize
= type
->pagesize
;
5523 mtd
->oobsize
= mtd
->writesize
/ 32;
5525 /* All legacy ID NAND are small-page, SLC */
5526 chip
->bits_per_cell
= 1;
5530 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
5531 * heuristic patterns using various detected parameters (e.g., manufacturer,
5532 * page size, cell-type information).
5534 static void nand_decode_bbm_options(struct nand_chip
*chip
)
5536 struct mtd_info
*mtd
= nand_to_mtd(chip
);
5538 /* Set the bad block position */
5539 if (mtd
->writesize
> 512 || (chip
->options
& NAND_BUSWIDTH_16
))
5540 chip
->badblockpos
= NAND_LARGE_BADBLOCK_POS
;
5542 chip
->badblockpos
= NAND_SMALL_BADBLOCK_POS
;
5545 static inline bool is_full_id_nand(struct nand_flash_dev
*type
)
5547 return type
->id_len
;
5550 static bool find_full_id_nand(struct nand_chip
*chip
,
5551 struct nand_flash_dev
*type
)
5553 struct mtd_info
*mtd
= nand_to_mtd(chip
);
5554 u8
*id_data
= chip
->id
.data
;
5556 if (!strncmp(type
->id
, id_data
, type
->id_len
)) {
5557 mtd
->writesize
= type
->pagesize
;
5558 mtd
->erasesize
= type
->erasesize
;
5559 mtd
->oobsize
= type
->oobsize
;
5561 chip
->bits_per_cell
= nand_get_bits_per_cell(id_data
[2]);
5562 chip
->chipsize
= (uint64_t)type
->chipsize
<< 20;
5563 chip
->options
|= type
->options
;
5564 chip
->ecc_strength_ds
= NAND_ECC_STRENGTH(type
);
5565 chip
->ecc_step_ds
= NAND_ECC_STEP(type
);
5566 chip
->onfi_timing_mode_default
=
5567 type
->onfi_timing_mode_default
;
5569 chip
->parameters
.model
= kstrdup(type
->name
, GFP_KERNEL
);
5570 if (!chip
->parameters
.model
)
5579 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
5580 * compliant and does not have a full-id or legacy-id entry in the nand_ids
5583 static void nand_manufacturer_detect(struct nand_chip
*chip
)
5586 * Try manufacturer detection if available and use
5587 * nand_decode_ext_id() otherwise.
5589 if (chip
->manufacturer
.desc
&& chip
->manufacturer
.desc
->ops
&&
5590 chip
->manufacturer
.desc
->ops
->detect
) {
5591 /* The 3rd id byte holds MLC / multichip data */
5592 chip
->bits_per_cell
= nand_get_bits_per_cell(chip
->id
.data
[2]);
5593 chip
->manufacturer
.desc
->ops
->detect(chip
);
5595 nand_decode_ext_id(chip
);
5600 * Manufacturer initialization. This function is called for all NANDs including
5601 * ONFI and JEDEC compliant ones.
5602 * Manufacturer drivers should put all their specific initialization code in
5603 * their ->init() hook.
5605 static int nand_manufacturer_init(struct nand_chip
*chip
)
5607 if (!chip
->manufacturer
.desc
|| !chip
->manufacturer
.desc
->ops
||
5608 !chip
->manufacturer
.desc
->ops
->init
)
5611 return chip
->manufacturer
.desc
->ops
->init(chip
);
5615 * Manufacturer cleanup. This function is called for all NANDs including
5616 * ONFI and JEDEC compliant ones.
5617 * Manufacturer drivers should put all their specific cleanup code in their
5620 static void nand_manufacturer_cleanup(struct nand_chip
*chip
)
5622 /* Release manufacturer private data */
5623 if (chip
->manufacturer
.desc
&& chip
->manufacturer
.desc
->ops
&&
5624 chip
->manufacturer
.desc
->ops
->cleanup
)
5625 chip
->manufacturer
.desc
->ops
->cleanup(chip
);
5629 * Get the flash and manufacturer id and lookup if the type is supported.
5631 static int nand_detect(struct nand_chip
*chip
, struct nand_flash_dev
*type
)
5633 const struct nand_manufacturer
*manufacturer
;
5634 struct mtd_info
*mtd
= nand_to_mtd(chip
);
5636 u8
*id_data
= chip
->id
.data
;
5640 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
5643 ret
= nand_reset(chip
, 0);
5647 /* Select the device */
5648 chip
->select_chip(mtd
, 0);
5650 /* Send the command for reading device ID */
5651 ret
= nand_readid_op(chip
, 0, id_data
, 2);
5655 /* Read manufacturer and device IDs */
5656 maf_id
= id_data
[0];
5657 dev_id
= id_data
[1];
5660 * Try again to make sure, as some systems the bus-hold or other
5661 * interface concerns can cause random data which looks like a
5662 * possibly credible NAND flash to appear. If the two results do
5663 * not match, ignore the device completely.
5666 /* Read entire ID string */
5667 ret
= nand_readid_op(chip
, 0, id_data
, sizeof(chip
->id
.data
));
5671 if (id_data
[0] != maf_id
|| id_data
[1] != dev_id
) {
5672 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
5673 maf_id
, dev_id
, id_data
[0], id_data
[1]);
5677 chip
->id
.len
= nand_id_len(id_data
, ARRAY_SIZE(chip
->id
.data
));
5679 /* Try to identify manufacturer */
5680 manufacturer
= nand_get_manufacturer(maf_id
);
5681 chip
->manufacturer
.desc
= manufacturer
;
5684 type
= nand_flash_ids
;
5687 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
5689 * This is required to make sure initial NAND bus width set by the
5690 * NAND controller driver is coherent with the real NAND bus width
5691 * (extracted by auto-detection code).
5693 busw
= chip
->options
& NAND_BUSWIDTH_16
;
5696 * The flag is only set (never cleared), reset it to its default value
5697 * before starting auto-detection.
5699 chip
->options
&= ~NAND_BUSWIDTH_16
;
5701 for (; type
->name
!= NULL
; type
++) {
5702 if (is_full_id_nand(type
)) {
5703 if (find_full_id_nand(chip
, type
))
5705 } else if (dev_id
== type
->dev_id
) {
5710 if (!type
->name
|| !type
->pagesize
) {
5711 /* Check if the chip is ONFI compliant */
5712 ret
= nand_flash_detect_onfi(chip
);
5718 /* Check if the chip is JEDEC compliant */
5719 ret
= nand_flash_detect_jedec(chip
);
5729 chip
->parameters
.model
= kstrdup(type
->name
, GFP_KERNEL
);
5730 if (!chip
->parameters
.model
)
5733 chip
->chipsize
= (uint64_t)type
->chipsize
<< 20;
5735 if (!type
->pagesize
)
5736 nand_manufacturer_detect(chip
);
5738 nand_decode_id(chip
, type
);
5740 /* Get chip options */
5741 chip
->options
|= type
->options
;
5745 mtd
->name
= chip
->parameters
.model
;
5747 if (chip
->options
& NAND_BUSWIDTH_AUTO
) {
5748 WARN_ON(busw
& NAND_BUSWIDTH_16
);
5749 nand_set_defaults(chip
);
5750 } else if (busw
!= (chip
->options
& NAND_BUSWIDTH_16
)) {
5752 * Check, if buswidth is correct. Hardware drivers should set
5755 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5757 pr_info("%s %s\n", nand_manufacturer_name(manufacturer
),
5759 pr_warn("bus width %d instead of %d bits\n", busw
? 16 : 8,
5760 (chip
->options
& NAND_BUSWIDTH_16
) ? 16 : 8);
5763 goto free_detect_allocation
;
5766 nand_decode_bbm_options(chip
);
5768 /* Calculate the address shift from the page size */
5769 chip
->page_shift
= ffs(mtd
->writesize
) - 1;
5770 /* Convert chipsize to number of pages per chip -1 */
5771 chip
->pagemask
= (chip
->chipsize
>> chip
->page_shift
) - 1;
5773 chip
->bbt_erase_shift
= chip
->phys_erase_shift
=
5774 ffs(mtd
->erasesize
) - 1;
5775 if (chip
->chipsize
& 0xffffffff)
5776 chip
->chip_shift
= ffs((unsigned)chip
->chipsize
) - 1;
5778 chip
->chip_shift
= ffs((unsigned)(chip
->chipsize
>> 32));
5779 chip
->chip_shift
+= 32 - 1;
5782 if (chip
->chip_shift
- chip
->page_shift
> 16)
5783 chip
->options
|= NAND_ROW_ADDR_3
;
5785 chip
->badblockbits
= 8;
5786 chip
->erase
= single_erase
;
5788 /* Do not replace user supplied command function! */
5789 if (mtd
->writesize
> 512 && chip
->cmdfunc
== nand_command
)
5790 chip
->cmdfunc
= nand_command_lp
;
5792 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5794 pr_info("%s %s\n", nand_manufacturer_name(manufacturer
),
5795 chip
->parameters
.model
);
5796 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
5797 (int)(chip
->chipsize
>> 20), nand_is_slc(chip
) ? "SLC" : "MLC",
5798 mtd
->erasesize
>> 10, mtd
->writesize
, mtd
->oobsize
);
5801 free_detect_allocation
:
5802 kfree(chip
->parameters
.model
);
5807 static const char * const nand_ecc_modes
[] = {
5808 [NAND_ECC_NONE
] = "none",
5809 [NAND_ECC_SOFT
] = "soft",
5810 [NAND_ECC_HW
] = "hw",
5811 [NAND_ECC_HW_SYNDROME
] = "hw_syndrome",
5812 [NAND_ECC_HW_OOB_FIRST
] = "hw_oob_first",
5813 [NAND_ECC_ON_DIE
] = "on-die",
5816 static int of_get_nand_ecc_mode(struct device_node
*np
)
5821 err
= of_property_read_string(np
, "nand-ecc-mode", &pm
);
5825 for (i
= 0; i
< ARRAY_SIZE(nand_ecc_modes
); i
++)
5826 if (!strcasecmp(pm
, nand_ecc_modes
[i
]))
5830 * For backward compatibility we support few obsoleted values that don't
5831 * have their mappings into nand_ecc_modes_t anymore (they were merged
5832 * with other enums).
5834 if (!strcasecmp(pm
, "soft_bch"))
5835 return NAND_ECC_SOFT
;
5840 static const char * const nand_ecc_algos
[] = {
5841 [NAND_ECC_HAMMING
] = "hamming",
5842 [NAND_ECC_BCH
] = "bch",
5843 [NAND_ECC_RS
] = "rs",
5846 static int of_get_nand_ecc_algo(struct device_node
*np
)
5851 err
= of_property_read_string(np
, "nand-ecc-algo", &pm
);
5853 for (i
= NAND_ECC_HAMMING
; i
< ARRAY_SIZE(nand_ecc_algos
); i
++)
5854 if (!strcasecmp(pm
, nand_ecc_algos
[i
]))
5860 * For backward compatibility we also read "nand-ecc-mode" checking
5861 * for some obsoleted values that were specifying ECC algorithm.
5863 err
= of_property_read_string(np
, "nand-ecc-mode", &pm
);
5867 if (!strcasecmp(pm
, "soft"))
5868 return NAND_ECC_HAMMING
;
5869 else if (!strcasecmp(pm
, "soft_bch"))
5870 return NAND_ECC_BCH
;
5875 static int of_get_nand_ecc_step_size(struct device_node
*np
)
5880 ret
= of_property_read_u32(np
, "nand-ecc-step-size", &val
);
5881 return ret
? ret
: val
;
5884 static int of_get_nand_ecc_strength(struct device_node
*np
)
5889 ret
= of_property_read_u32(np
, "nand-ecc-strength", &val
);
5890 return ret
? ret
: val
;
5893 static int of_get_nand_bus_width(struct device_node
*np
)
5897 if (of_property_read_u32(np
, "nand-bus-width", &val
))
5909 static bool of_get_nand_on_flash_bbt(struct device_node
*np
)
5911 return of_property_read_bool(np
, "nand-on-flash-bbt");
5914 static int nand_dt_init(struct nand_chip
*chip
)
5916 struct device_node
*dn
= nand_get_flash_node(chip
);
5917 int ecc_mode
, ecc_algo
, ecc_strength
, ecc_step
;
5922 if (of_get_nand_bus_width(dn
) == 16)
5923 chip
->options
|= NAND_BUSWIDTH_16
;
5925 if (of_property_read_bool(dn
, "nand-is-boot-medium"))
5926 chip
->options
|= NAND_IS_BOOT_MEDIUM
;
5928 if (of_get_nand_on_flash_bbt(dn
))
5929 chip
->bbt_options
|= NAND_BBT_USE_FLASH
;
5931 ecc_mode
= of_get_nand_ecc_mode(dn
);
5932 ecc_algo
= of_get_nand_ecc_algo(dn
);
5933 ecc_strength
= of_get_nand_ecc_strength(dn
);
5934 ecc_step
= of_get_nand_ecc_step_size(dn
);
5937 chip
->ecc
.mode
= ecc_mode
;
5940 chip
->ecc
.algo
= ecc_algo
;
5942 if (ecc_strength
>= 0)
5943 chip
->ecc
.strength
= ecc_strength
;
5946 chip
->ecc
.size
= ecc_step
;
5948 if (of_property_read_bool(dn
, "nand-ecc-maximize"))
5949 chip
->ecc
.options
|= NAND_ECC_MAXIMIZE
;
5955 * nand_scan_ident - Scan for the NAND device
5956 * @chip: NAND chip object
5957 * @maxchips: number of chips to scan for
5958 * @table: alternative NAND ID table
5960 * This is the first phase of the normal nand_scan() function. It reads the
5961 * flash ID and sets up MTD fields accordingly.
5963 * This helper used to be called directly from controller drivers that needed
5964 * to tweak some ECC-related parameters before nand_scan_tail(). This separation
5965 * prevented dynamic allocations during this phase which was unconvenient and
5966 * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
5968 static int nand_scan_ident(struct nand_chip
*chip
, int maxchips
,
5969 struct nand_flash_dev
*table
)
5971 struct mtd_info
*mtd
= nand_to_mtd(chip
);
5972 int i
, nand_maf_id
, nand_dev_id
;
5975 /* Enforce the right timings for reset/detection */
5976 onfi_fill_data_interface(chip
, NAND_SDR_IFACE
, 0);
5978 ret
= nand_dt_init(chip
);
5982 if (!mtd
->name
&& mtd
->dev
.parent
)
5983 mtd
->name
= dev_name(mtd
->dev
.parent
);
5986 * ->cmdfunc() is legacy and will only be used if ->exec_op() is not
5989 if (!chip
->exec_op
) {
5991 * Default functions assigned for ->cmdfunc() and
5992 * ->select_chip() both expect ->cmd_ctrl() to be populated.
5994 if ((!chip
->cmdfunc
|| !chip
->select_chip
) && !chip
->cmd_ctrl
) {
5995 pr_err("->cmd_ctrl() should be provided\n");
6000 /* Set the default functions */
6001 nand_set_defaults(chip
);
6003 /* Read the flash type */
6004 ret
= nand_detect(chip
, table
);
6006 if (!(chip
->options
& NAND_SCAN_SILENT_NODEV
))
6007 pr_warn("No NAND device found\n");
6008 chip
->select_chip(mtd
, -1);
6012 nand_maf_id
= chip
->id
.data
[0];
6013 nand_dev_id
= chip
->id
.data
[1];
6015 chip
->select_chip(mtd
, -1);
6017 /* Check for a chip array */
6018 for (i
= 1; i
< maxchips
; i
++) {
6021 /* See comment in nand_get_flash_type for reset */
6022 nand_reset(chip
, i
);
6024 chip
->select_chip(mtd
, i
);
6025 /* Send the command for reading device ID */
6026 nand_readid_op(chip
, 0, id
, sizeof(id
));
6027 /* Read manufacturer and device IDs */
6028 if (nand_maf_id
!= id
[0] || nand_dev_id
!= id
[1]) {
6029 chip
->select_chip(mtd
, -1);
6032 chip
->select_chip(mtd
, -1);
6035 pr_info("%d chips detected\n", i
);
6037 /* Store the number of chips and calc total size for mtd */
6039 mtd
->size
= i
* chip
->chipsize
;
6044 static void nand_scan_ident_cleanup(struct nand_chip
*chip
)
6046 kfree(chip
->parameters
.model
);
6047 kfree(chip
->parameters
.onfi
);
6050 static int nand_set_ecc_soft_ops(struct mtd_info
*mtd
)
6052 struct nand_chip
*chip
= mtd_to_nand(mtd
);
6053 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
6055 if (WARN_ON(ecc
->mode
!= NAND_ECC_SOFT
))
6058 switch (ecc
->algo
) {
6059 case NAND_ECC_HAMMING
:
6060 ecc
->calculate
= nand_calculate_ecc
;
6061 ecc
->correct
= nand_correct_data
;
6062 ecc
->read_page
= nand_read_page_swecc
;
6063 ecc
->read_subpage
= nand_read_subpage
;
6064 ecc
->write_page
= nand_write_page_swecc
;
6065 ecc
->read_page_raw
= nand_read_page_raw
;
6066 ecc
->write_page_raw
= nand_write_page_raw
;
6067 ecc
->read_oob
= nand_read_oob_std
;
6068 ecc
->write_oob
= nand_write_oob_std
;
6075 if (!mtd_nand_has_bch()) {
6076 WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
6079 ecc
->calculate
= nand_bch_calculate_ecc
;
6080 ecc
->correct
= nand_bch_correct_data
;
6081 ecc
->read_page
= nand_read_page_swecc
;
6082 ecc
->read_subpage
= nand_read_subpage
;
6083 ecc
->write_page
= nand_write_page_swecc
;
6084 ecc
->read_page_raw
= nand_read_page_raw
;
6085 ecc
->write_page_raw
= nand_write_page_raw
;
6086 ecc
->read_oob
= nand_read_oob_std
;
6087 ecc
->write_oob
= nand_write_oob_std
;
6090 * Board driver should supply ecc.size and ecc.strength
6091 * values to select how many bits are correctable.
6092 * Otherwise, default to 4 bits for large page devices.
6094 if (!ecc
->size
&& (mtd
->oobsize
>= 64)) {
6100 * if no ecc placement scheme was provided pickup the default
6103 if (!mtd
->ooblayout
) {
6104 /* handle large page devices only */
6105 if (mtd
->oobsize
< 64) {
6106 WARN(1, "OOB layout is required when using software BCH on small pages\n");
6110 mtd_set_ooblayout(mtd
, &nand_ooblayout_lp_ops
);
6115 * We can only maximize ECC config when the default layout is
6116 * used, otherwise we don't know how many bytes can really be
6119 if (mtd
->ooblayout
== &nand_ooblayout_lp_ops
&&
6120 ecc
->options
& NAND_ECC_MAXIMIZE
) {
6123 /* Always prefer 1k blocks over 512bytes ones */
6125 steps
= mtd
->writesize
/ ecc
->size
;
6127 /* Reserve 2 bytes for the BBM */
6128 bytes
= (mtd
->oobsize
- 2) / steps
;
6129 ecc
->strength
= bytes
* 8 / fls(8 * ecc
->size
);
6132 /* See nand_bch_init() for details. */
6134 ecc
->priv
= nand_bch_init(mtd
);
6136 WARN(1, "BCH ECC initialization failed!\n");
6141 WARN(1, "Unsupported ECC algorithm!\n");
6147 * nand_check_ecc_caps - check the sanity of preset ECC settings
6148 * @chip: nand chip info structure
6149 * @caps: ECC caps info structure
6150 * @oobavail: OOB size that the ECC engine can use
6152 * When ECC step size and strength are already set, check if they are supported
6153 * by the controller and the calculated ECC bytes fit within the chip's OOB.
6154 * On success, the calculated ECC bytes is set.
6157 nand_check_ecc_caps(struct nand_chip
*chip
,
6158 const struct nand_ecc_caps
*caps
, int oobavail
)
6160 struct mtd_info
*mtd
= nand_to_mtd(chip
);
6161 const struct nand_ecc_step_info
*stepinfo
;
6162 int preset_step
= chip
->ecc
.size
;
6163 int preset_strength
= chip
->ecc
.strength
;
6164 int ecc_bytes
, nsteps
= mtd
->writesize
/ preset_step
;
6167 for (i
= 0; i
< caps
->nstepinfos
; i
++) {
6168 stepinfo
= &caps
->stepinfos
[i
];
6170 if (stepinfo
->stepsize
!= preset_step
)
6173 for (j
= 0; j
< stepinfo
->nstrengths
; j
++) {
6174 if (stepinfo
->strengths
[j
] != preset_strength
)
6177 ecc_bytes
= caps
->calc_ecc_bytes(preset_step
,
6179 if (WARN_ON_ONCE(ecc_bytes
< 0))
6182 if (ecc_bytes
* nsteps
> oobavail
) {
6183 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
6184 preset_step
, preset_strength
);
6188 chip
->ecc
.bytes
= ecc_bytes
;
6194 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
6195 preset_step
, preset_strength
);
6201 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
6202 * @chip: nand chip info structure
6203 * @caps: ECC engine caps info structure
6204 * @oobavail: OOB size that the ECC engine can use
6206 * If a chip's ECC requirement is provided, try to meet it with the least
6207 * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
6208 * On success, the chosen ECC settings are set.
6211 nand_match_ecc_req(struct nand_chip
*chip
,
6212 const struct nand_ecc_caps
*caps
, int oobavail
)
6214 struct mtd_info
*mtd
= nand_to_mtd(chip
);
6215 const struct nand_ecc_step_info
*stepinfo
;
6216 int req_step
= chip
->ecc_step_ds
;
6217 int req_strength
= chip
->ecc_strength_ds
;
6218 int req_corr
, step_size
, strength
, nsteps
, ecc_bytes
, ecc_bytes_total
;
6219 int best_step
, best_strength
, best_ecc_bytes
;
6220 int best_ecc_bytes_total
= INT_MAX
;
6223 /* No information provided by the NAND chip */
6224 if (!req_step
|| !req_strength
)
6227 /* number of correctable bits the chip requires in a page */
6228 req_corr
= mtd
->writesize
/ req_step
* req_strength
;
6230 for (i
= 0; i
< caps
->nstepinfos
; i
++) {
6231 stepinfo
= &caps
->stepinfos
[i
];
6232 step_size
= stepinfo
->stepsize
;
6234 for (j
= 0; j
< stepinfo
->nstrengths
; j
++) {
6235 strength
= stepinfo
->strengths
[j
];
6238 * If both step size and strength are smaller than the
6239 * chip's requirement, it is not easy to compare the
6240 * resulted reliability.
6242 if (step_size
< req_step
&& strength
< req_strength
)
6245 if (mtd
->writesize
% step_size
)
6248 nsteps
= mtd
->writesize
/ step_size
;
6250 ecc_bytes
= caps
->calc_ecc_bytes(step_size
, strength
);
6251 if (WARN_ON_ONCE(ecc_bytes
< 0))
6253 ecc_bytes_total
= ecc_bytes
* nsteps
;
6255 if (ecc_bytes_total
> oobavail
||
6256 strength
* nsteps
< req_corr
)
6260 * We assume the best is to meet the chip's requrement
6261 * with the least number of ECC bytes.
6263 if (ecc_bytes_total
< best_ecc_bytes_total
) {
6264 best_ecc_bytes_total
= ecc_bytes_total
;
6265 best_step
= step_size
;
6266 best_strength
= strength
;
6267 best_ecc_bytes
= ecc_bytes
;
6272 if (best_ecc_bytes_total
== INT_MAX
)
6275 chip
->ecc
.size
= best_step
;
6276 chip
->ecc
.strength
= best_strength
;
6277 chip
->ecc
.bytes
= best_ecc_bytes
;
6283 * nand_maximize_ecc - choose the max ECC strength available
6284 * @chip: nand chip info structure
6285 * @caps: ECC engine caps info structure
6286 * @oobavail: OOB size that the ECC engine can use
6288 * Choose the max ECC strength that is supported on the controller, and can fit
6289 * within the chip's OOB. On success, the chosen ECC settings are set.
6292 nand_maximize_ecc(struct nand_chip
*chip
,
6293 const struct nand_ecc_caps
*caps
, int oobavail
)
6295 struct mtd_info
*mtd
= nand_to_mtd(chip
);
6296 const struct nand_ecc_step_info
*stepinfo
;
6297 int step_size
, strength
, nsteps
, ecc_bytes
, corr
;
6300 int best_strength
, best_ecc_bytes
;
6303 for (i
= 0; i
< caps
->nstepinfos
; i
++) {
6304 stepinfo
= &caps
->stepinfos
[i
];
6305 step_size
= stepinfo
->stepsize
;
6307 /* If chip->ecc.size is already set, respect it */
6308 if (chip
->ecc
.size
&& step_size
!= chip
->ecc
.size
)
6311 for (j
= 0; j
< stepinfo
->nstrengths
; j
++) {
6312 strength
= stepinfo
->strengths
[j
];
6314 if (mtd
->writesize
% step_size
)
6317 nsteps
= mtd
->writesize
/ step_size
;
6319 ecc_bytes
= caps
->calc_ecc_bytes(step_size
, strength
);
6320 if (WARN_ON_ONCE(ecc_bytes
< 0))
6323 if (ecc_bytes
* nsteps
> oobavail
)
6326 corr
= strength
* nsteps
;
6329 * If the number of correctable bits is the same,
6330 * bigger step_size has more reliability.
6332 if (corr
> best_corr
||
6333 (corr
== best_corr
&& step_size
> best_step
)) {
6335 best_step
= step_size
;
6336 best_strength
= strength
;
6337 best_ecc_bytes
= ecc_bytes
;
6345 chip
->ecc
.size
= best_step
;
6346 chip
->ecc
.strength
= best_strength
;
6347 chip
->ecc
.bytes
= best_ecc_bytes
;
6353 * nand_ecc_choose_conf - Set the ECC strength and ECC step size
6354 * @chip: nand chip info structure
6355 * @caps: ECC engine caps info structure
6356 * @oobavail: OOB size that the ECC engine can use
6358 * Choose the ECC configuration according to following logic
6360 * 1. If both ECC step size and ECC strength are already set (usually by DT)
6361 * then check if it is supported by this controller.
6362 * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength.
6363 * 3. Otherwise, try to match the ECC step size and ECC strength closest
6364 * to the chip's requirement. If available OOB size can't fit the chip
6365 * requirement then fallback to the maximum ECC step size and ECC strength.
6367 * On success, the chosen ECC settings are set.
6369 int nand_ecc_choose_conf(struct nand_chip
*chip
,
6370 const struct nand_ecc_caps
*caps
, int oobavail
)
6372 struct mtd_info
*mtd
= nand_to_mtd(chip
);
6374 if (WARN_ON(oobavail
< 0 || oobavail
> mtd
->oobsize
))
6377 if (chip
->ecc
.size
&& chip
->ecc
.strength
)
6378 return nand_check_ecc_caps(chip
, caps
, oobavail
);
6380 if (chip
->ecc
.options
& NAND_ECC_MAXIMIZE
)
6381 return nand_maximize_ecc(chip
, caps
, oobavail
);
6383 if (!nand_match_ecc_req(chip
, caps
, oobavail
))
6386 return nand_maximize_ecc(chip
, caps
, oobavail
);
6388 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf
);
6391 * Check if the chip configuration meet the datasheet requirements.
6393 * If our configuration corrects A bits per B bytes and the minimum
6394 * required correction level is X bits per Y bytes, then we must ensure
6395 * both of the following are true:
6397 * (1) A / B >= X / Y
6400 * Requirement (1) ensures we can correct for the required bitflip density.
6401 * Requirement (2) ensures we can correct even when all bitflips are clumped
6402 * in the same sector.
6404 static bool nand_ecc_strength_good(struct mtd_info
*mtd
)
6406 struct nand_chip
*chip
= mtd_to_nand(mtd
);
6407 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
6410 if (ecc
->size
== 0 || chip
->ecc_step_ds
== 0)
6411 /* Not enough information */
6415 * We get the number of corrected bits per page to compare
6416 * the correction density.
6418 corr
= (mtd
->writesize
* ecc
->strength
) / ecc
->size
;
6419 ds_corr
= (mtd
->writesize
* chip
->ecc_strength_ds
) / chip
->ecc_step_ds
;
6421 return corr
>= ds_corr
&& ecc
->strength
>= chip
->ecc_strength_ds
;
6425 * nand_scan_tail - Scan for the NAND device
6426 * @chip: NAND chip object
6428 * This is the second phase of the normal nand_scan() function. It fills out
6429 * all the uninitialized function pointers with the defaults and scans for a
6430 * bad block table if appropriate.
6432 static int nand_scan_tail(struct nand_chip
*chip
)
6434 struct mtd_info
*mtd
= nand_to_mtd(chip
);
6435 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
6438 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
6439 if (WARN_ON((chip
->bbt_options
& NAND_BBT_NO_OOB_BBM
) &&
6440 !(chip
->bbt_options
& NAND_BBT_USE_FLASH
))) {
6444 chip
->data_buf
= kmalloc(mtd
->writesize
+ mtd
->oobsize
, GFP_KERNEL
);
6445 if (!chip
->data_buf
)
6449 * FIXME: some NAND manufacturer drivers expect the first die to be
6450 * selected when manufacturer->init() is called. They should be fixed
6451 * to explictly select the relevant die when interacting with the NAND
6454 chip
->select_chip(mtd
, 0);
6455 ret
= nand_manufacturer_init(chip
);
6456 chip
->select_chip(mtd
, -1);
6460 /* Set the internal oob buffer location, just after the page data */
6461 chip
->oob_poi
= chip
->data_buf
+ mtd
->writesize
;
6464 * If no default placement scheme is given, select an appropriate one.
6466 if (!mtd
->ooblayout
&&
6467 !(ecc
->mode
== NAND_ECC_SOFT
&& ecc
->algo
== NAND_ECC_BCH
)) {
6468 switch (mtd
->oobsize
) {
6471 mtd_set_ooblayout(mtd
, &nand_ooblayout_sp_ops
);
6475 mtd_set_ooblayout(mtd
, &nand_ooblayout_lp_hamming_ops
);
6479 * Expose the whole OOB area to users if ECC_NONE
6480 * is passed. We could do that for all kind of
6481 * ->oobsize, but we must keep the old large/small
6482 * page with ECC layout when ->oobsize <= 128 for
6483 * compatibility reasons.
6485 if (ecc
->mode
== NAND_ECC_NONE
) {
6486 mtd_set_ooblayout(mtd
,
6487 &nand_ooblayout_lp_ops
);
6491 WARN(1, "No oob scheme defined for oobsize %d\n",
6494 goto err_nand_manuf_cleanup
;
6499 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
6500 * selected and we have 256 byte pagesize fallback to software ECC
6503 switch (ecc
->mode
) {
6504 case NAND_ECC_HW_OOB_FIRST
:
6505 /* Similar to NAND_ECC_HW, but a separate read_page handle */
6506 if (!ecc
->calculate
|| !ecc
->correct
|| !ecc
->hwctl
) {
6507 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
6509 goto err_nand_manuf_cleanup
;
6511 if (!ecc
->read_page
)
6512 ecc
->read_page
= nand_read_page_hwecc_oob_first
;
6515 /* Use standard hwecc read page function? */
6516 if (!ecc
->read_page
)
6517 ecc
->read_page
= nand_read_page_hwecc
;
6518 if (!ecc
->write_page
)
6519 ecc
->write_page
= nand_write_page_hwecc
;
6520 if (!ecc
->read_page_raw
)
6521 ecc
->read_page_raw
= nand_read_page_raw
;
6522 if (!ecc
->write_page_raw
)
6523 ecc
->write_page_raw
= nand_write_page_raw
;
6525 ecc
->read_oob
= nand_read_oob_std
;
6526 if (!ecc
->write_oob
)
6527 ecc
->write_oob
= nand_write_oob_std
;
6528 if (!ecc
->read_subpage
)
6529 ecc
->read_subpage
= nand_read_subpage
;
6530 if (!ecc
->write_subpage
&& ecc
->hwctl
&& ecc
->calculate
)
6531 ecc
->write_subpage
= nand_write_subpage_hwecc
;
6533 case NAND_ECC_HW_SYNDROME
:
6534 if ((!ecc
->calculate
|| !ecc
->correct
|| !ecc
->hwctl
) &&
6536 ecc
->read_page
== nand_read_page_hwecc
||
6538 ecc
->write_page
== nand_write_page_hwecc
)) {
6539 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
6541 goto err_nand_manuf_cleanup
;
6543 /* Use standard syndrome read/write page function? */
6544 if (!ecc
->read_page
)
6545 ecc
->read_page
= nand_read_page_syndrome
;
6546 if (!ecc
->write_page
)
6547 ecc
->write_page
= nand_write_page_syndrome
;
6548 if (!ecc
->read_page_raw
)
6549 ecc
->read_page_raw
= nand_read_page_raw_syndrome
;
6550 if (!ecc
->write_page_raw
)
6551 ecc
->write_page_raw
= nand_write_page_raw_syndrome
;
6553 ecc
->read_oob
= nand_read_oob_syndrome
;
6554 if (!ecc
->write_oob
)
6555 ecc
->write_oob
= nand_write_oob_syndrome
;
6557 if (mtd
->writesize
>= ecc
->size
) {
6558 if (!ecc
->strength
) {
6559 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
6561 goto err_nand_manuf_cleanup
;
6565 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
6566 ecc
->size
, mtd
->writesize
);
6567 ecc
->mode
= NAND_ECC_SOFT
;
6568 ecc
->algo
= NAND_ECC_HAMMING
;
6571 ret
= nand_set_ecc_soft_ops(mtd
);
6574 goto err_nand_manuf_cleanup
;
6578 case NAND_ECC_ON_DIE
:
6579 if (!ecc
->read_page
|| !ecc
->write_page
) {
6580 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
6582 goto err_nand_manuf_cleanup
;
6585 ecc
->read_oob
= nand_read_oob_std
;
6586 if (!ecc
->write_oob
)
6587 ecc
->write_oob
= nand_write_oob_std
;
6591 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
6592 ecc
->read_page
= nand_read_page_raw
;
6593 ecc
->write_page
= nand_write_page_raw
;
6594 ecc
->read_oob
= nand_read_oob_std
;
6595 ecc
->read_page_raw
= nand_read_page_raw
;
6596 ecc
->write_page_raw
= nand_write_page_raw
;
6597 ecc
->write_oob
= nand_write_oob_std
;
6598 ecc
->size
= mtd
->writesize
;
6604 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc
->mode
);
6606 goto err_nand_manuf_cleanup
;
6609 if (ecc
->correct
|| ecc
->calculate
) {
6610 ecc
->calc_buf
= kmalloc(mtd
->oobsize
, GFP_KERNEL
);
6611 ecc
->code_buf
= kmalloc(mtd
->oobsize
, GFP_KERNEL
);
6612 if (!ecc
->calc_buf
|| !ecc
->code_buf
) {
6614 goto err_nand_manuf_cleanup
;
6618 /* For many systems, the standard OOB write also works for raw */
6619 if (!ecc
->read_oob_raw
)
6620 ecc
->read_oob_raw
= ecc
->read_oob
;
6621 if (!ecc
->write_oob_raw
)
6622 ecc
->write_oob_raw
= ecc
->write_oob
;
6624 /* propagate ecc info to mtd_info */
6625 mtd
->ecc_strength
= ecc
->strength
;
6626 mtd
->ecc_step_size
= ecc
->size
;
6629 * Set the number of read / write steps for one page depending on ECC
6632 ecc
->steps
= mtd
->writesize
/ ecc
->size
;
6633 if (ecc
->steps
* ecc
->size
!= mtd
->writesize
) {
6634 WARN(1, "Invalid ECC parameters\n");
6636 goto err_nand_manuf_cleanup
;
6638 ecc
->total
= ecc
->steps
* ecc
->bytes
;
6639 if (ecc
->total
> mtd
->oobsize
) {
6640 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
6642 goto err_nand_manuf_cleanup
;
6646 * The number of bytes available for a client to place data into
6647 * the out of band area.
6649 ret
= mtd_ooblayout_count_freebytes(mtd
);
6653 mtd
->oobavail
= ret
;
6655 /* ECC sanity check: warn if it's too weak */
6656 if (!nand_ecc_strength_good(mtd
))
6657 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
6660 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
6661 if (!(chip
->options
& NAND_NO_SUBPAGE_WRITE
) && nand_is_slc(chip
)) {
6662 switch (ecc
->steps
) {
6664 mtd
->subpage_sft
= 1;
6669 mtd
->subpage_sft
= 2;
6673 chip
->subpagesize
= mtd
->writesize
>> mtd
->subpage_sft
;
6675 /* Initialize state */
6676 chip
->state
= FL_READY
;
6678 /* Invalidate the pagebuffer reference */
6681 /* Large page NAND with SOFT_ECC should support subpage reads */
6682 switch (ecc
->mode
) {
6684 if (chip
->page_shift
> 9)
6685 chip
->options
|= NAND_SUBPAGE_READ
;
6692 /* Fill in remaining MTD driver data */
6693 mtd
->type
= nand_is_slc(chip
) ? MTD_NANDFLASH
: MTD_MLCNANDFLASH
;
6694 mtd
->flags
= (chip
->options
& NAND_ROM
) ? MTD_CAP_ROM
:
6696 mtd
->_erase
= nand_erase
;
6698 mtd
->_unpoint
= NULL
;
6699 mtd
->_panic_write
= panic_nand_write
;
6700 mtd
->_read_oob
= nand_read_oob
;
6701 mtd
->_write_oob
= nand_write_oob
;
6702 mtd
->_sync
= nand_sync
;
6704 mtd
->_unlock
= NULL
;
6705 mtd
->_suspend
= nand_suspend
;
6706 mtd
->_resume
= nand_resume
;
6707 mtd
->_reboot
= nand_shutdown
;
6708 mtd
->_block_isreserved
= nand_block_isreserved
;
6709 mtd
->_block_isbad
= nand_block_isbad
;
6710 mtd
->_block_markbad
= nand_block_markbad
;
6711 mtd
->_max_bad_blocks
= nand_max_bad_blocks
;
6712 mtd
->writebufsize
= mtd
->writesize
;
6715 * Initialize bitflip_threshold to its default prior scan_bbt() call.
6716 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
6719 if (!mtd
->bitflip_threshold
)
6720 mtd
->bitflip_threshold
= DIV_ROUND_UP(mtd
->ecc_strength
* 3, 4);
6722 /* Initialize the ->data_interface field. */
6723 ret
= nand_init_data_interface(chip
);
6725 goto err_nand_manuf_cleanup
;
6727 /* Enter fastest possible mode on all dies. */
6728 for (i
= 0; i
< chip
->numchips
; i
++) {
6729 ret
= nand_setup_data_interface(chip
, i
);
6731 goto err_nand_manuf_cleanup
;
6734 /* Check, if we should skip the bad block table scan */
6735 if (chip
->options
& NAND_SKIP_BBTSCAN
)
6738 /* Build bad block table */
6739 ret
= nand_create_bbt(chip
);
6741 goto err_nand_manuf_cleanup
;
6746 err_nand_manuf_cleanup
:
6747 nand_manufacturer_cleanup(chip
);
6750 kfree(chip
->data_buf
);
6751 kfree(ecc
->code_buf
);
6752 kfree(ecc
->calc_buf
);
6757 static int nand_attach(struct nand_chip
*chip
)
6759 if (chip
->controller
->ops
&& chip
->controller
->ops
->attach_chip
)
6760 return chip
->controller
->ops
->attach_chip(chip
);
6765 static void nand_detach(struct nand_chip
*chip
)
6767 if (chip
->controller
->ops
&& chip
->controller
->ops
->detach_chip
)
6768 chip
->controller
->ops
->detach_chip(chip
);
6772 * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
6773 * @chip: NAND chip object
6774 * @maxchips: number of chips to scan for. @nand_scan_ident() will not be run if
6775 * this parameter is zero (useful for specific drivers that must
6776 * handle this part of the process themselves, e.g docg4).
6777 * @ids: optional flash IDs table
6779 * This fills out all the uninitialized function pointers with the defaults.
6780 * The flash ID is read and the mtd/chip structures are filled with the
6781 * appropriate values.
6783 int nand_scan_with_ids(struct nand_chip
*chip
, int maxchips
,
6784 struct nand_flash_dev
*ids
)
6789 ret
= nand_scan_ident(chip
, maxchips
, ids
);
6794 ret
= nand_attach(chip
);
6798 ret
= nand_scan_tail(chip
);
6807 nand_scan_ident_cleanup(chip
);
6811 EXPORT_SYMBOL(nand_scan_with_ids
);
6814 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
6815 * @chip: NAND chip object
6817 void nand_cleanup(struct nand_chip
*chip
)
6819 if (chip
->ecc
.mode
== NAND_ECC_SOFT
&&
6820 chip
->ecc
.algo
== NAND_ECC_BCH
)
6821 nand_bch_free((struct nand_bch_control
*)chip
->ecc
.priv
);
6823 /* Free bad block table memory */
6825 kfree(chip
->data_buf
);
6826 kfree(chip
->ecc
.code_buf
);
6827 kfree(chip
->ecc
.calc_buf
);
6829 /* Free bad block descriptor memory */
6830 if (chip
->badblock_pattern
&& chip
->badblock_pattern
->options
6831 & NAND_BBT_DYNAMICSTRUCT
)
6832 kfree(chip
->badblock_pattern
);
6834 /* Free manufacturer priv data. */
6835 nand_manufacturer_cleanup(chip
);
6837 /* Free controller specific allocations after chip identification */
6840 /* Free identification phase allocations */
6841 nand_scan_ident_cleanup(chip
);
6844 EXPORT_SYMBOL_GPL(nand_cleanup
);
6847 * nand_release - [NAND Interface] Unregister the MTD device and free resources
6848 * held by the NAND device
6849 * @chip: NAND chip object
6851 void nand_release(struct nand_chip
*chip
)
6853 mtd_device_unregister(nand_to_mtd(chip
));
6856 EXPORT_SYMBOL_GPL(nand_release
);
6858 MODULE_LICENSE("GPL");
6859 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
6860 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
6861 MODULE_DESCRIPTION("Generic NAND flash driver code");