1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016-2017 Micron Technology, Inc.
6 * Peter Pan <peterpandong@micron.com>
7 * Boris Brezillon <boris.brezillon@bootlin.com>
10 #define pr_fmt(fmt) "spi-nand: " fmt
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/spinand.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
23 static int spinand_read_reg_op(struct spinand_device
*spinand
, u8 reg
, u8
*val
)
25 struct spi_mem_op op
= SPINAND_GET_FEATURE_OP(reg
,
29 ret
= spi_mem_exec_op(spinand
->spimem
, &op
);
33 *val
= *spinand
->scratchbuf
;
37 int spinand_write_reg_op(struct spinand_device
*spinand
, u8 reg
, u8 val
)
39 struct spi_mem_op op
= SPINAND_SET_FEATURE_OP(reg
,
42 *spinand
->scratchbuf
= val
;
43 return spi_mem_exec_op(spinand
->spimem
, &op
);
46 static int spinand_read_status(struct spinand_device
*spinand
, u8
*status
)
48 return spinand_read_reg_op(spinand
, REG_STATUS
, status
);
51 static int spinand_get_cfg(struct spinand_device
*spinand
, u8
*cfg
)
53 struct nand_device
*nand
= spinand_to_nand(spinand
);
55 if (WARN_ON(spinand
->cur_target
< 0 ||
56 spinand
->cur_target
>= nand
->memorg
.ntargets
))
59 *cfg
= spinand
->cfg_cache
[spinand
->cur_target
];
63 static int spinand_set_cfg(struct spinand_device
*spinand
, u8 cfg
)
65 struct nand_device
*nand
= spinand_to_nand(spinand
);
68 if (WARN_ON(spinand
->cur_target
< 0 ||
69 spinand
->cur_target
>= nand
->memorg
.ntargets
))
72 if (spinand
->cfg_cache
[spinand
->cur_target
] == cfg
)
75 ret
= spinand_write_reg_op(spinand
, REG_CFG
, cfg
);
79 spinand
->cfg_cache
[spinand
->cur_target
] = cfg
;
84 * spinand_upd_cfg() - Update the configuration register
85 * @spinand: the spinand device
86 * @mask: the mask encoding the bits to update in the config reg
87 * @val: the new value to apply
89 * Update the configuration register.
91 * Return: 0 on success, a negative error code otherwise.
93 int spinand_upd_cfg(struct spinand_device
*spinand
, u8 mask
, u8 val
)
98 ret
= spinand_get_cfg(spinand
, &cfg
);
105 return spinand_set_cfg(spinand
, cfg
);
109 * spinand_select_target() - Select a specific NAND target/die
110 * @spinand: the spinand device
111 * @target: the target/die to select
113 * Select a new target/die. If chip only has one die, this function is a NOOP.
115 * Return: 0 on success, a negative error code otherwise.
117 int spinand_select_target(struct spinand_device
*spinand
, unsigned int target
)
119 struct nand_device
*nand
= spinand_to_nand(spinand
);
122 if (WARN_ON(target
>= nand
->memorg
.ntargets
))
125 if (spinand
->cur_target
== target
)
128 if (nand
->memorg
.ntargets
== 1) {
129 spinand
->cur_target
= target
;
133 ret
= spinand
->select_target(spinand
, target
);
137 spinand
->cur_target
= target
;
141 static int spinand_read_cfg(struct spinand_device
*spinand
)
143 struct nand_device
*nand
= spinand_to_nand(spinand
);
147 for (target
= 0; target
< nand
->memorg
.ntargets
; target
++) {
148 ret
= spinand_select_target(spinand
, target
);
153 * We use spinand_read_reg_op() instead of spinand_get_cfg()
154 * here to bypass the config cache.
156 ret
= spinand_read_reg_op(spinand
, REG_CFG
,
157 &spinand
->cfg_cache
[target
]);
165 static int spinand_init_cfg_cache(struct spinand_device
*spinand
)
167 struct nand_device
*nand
= spinand_to_nand(spinand
);
168 struct device
*dev
= &spinand
->spimem
->spi
->dev
;
170 spinand
->cfg_cache
= devm_kcalloc(dev
,
171 nand
->memorg
.ntargets
,
172 sizeof(*spinand
->cfg_cache
),
174 if (!spinand
->cfg_cache
)
180 static int spinand_init_quad_enable(struct spinand_device
*spinand
)
184 if (!(spinand
->flags
& SPINAND_HAS_QE_BIT
))
187 if (spinand
->op_templates
.read_cache
->data
.buswidth
== 4 ||
188 spinand
->op_templates
.write_cache
->data
.buswidth
== 4 ||
189 spinand
->op_templates
.update_cache
->data
.buswidth
== 4)
192 return spinand_upd_cfg(spinand
, CFG_QUAD_ENABLE
,
193 enable
? CFG_QUAD_ENABLE
: 0);
196 static int spinand_ecc_enable(struct spinand_device
*spinand
,
199 return spinand_upd_cfg(spinand
, CFG_ECC_ENABLE
,
200 enable
? CFG_ECC_ENABLE
: 0);
203 static int spinand_cont_read_enable(struct spinand_device
*spinand
,
206 return spinand
->set_cont_read(spinand
, enable
);
209 static int spinand_check_ecc_status(struct spinand_device
*spinand
, u8 status
)
211 struct nand_device
*nand
= spinand_to_nand(spinand
);
213 if (spinand
->eccinfo
.get_status
)
214 return spinand
->eccinfo
.get_status(spinand
, status
);
216 switch (status
& STATUS_ECC_MASK
) {
217 case STATUS_ECC_NO_BITFLIPS
:
220 case STATUS_ECC_HAS_BITFLIPS
:
222 * We have no way to know exactly how many bitflips have been
223 * fixed, so let's return the maximum possible value so that
224 * wear-leveling layers move the data immediately.
226 return nanddev_get_ecc_conf(nand
)->strength
;
228 case STATUS_ECC_UNCOR_ERROR
:
238 static int spinand_noecc_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
239 struct mtd_oob_region
*region
)
244 static int spinand_noecc_ooblayout_free(struct mtd_info
*mtd
, int section
,
245 struct mtd_oob_region
*region
)
250 /* Reserve 2 bytes for the BBM. */
257 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout
= {
258 .ecc
= spinand_noecc_ooblayout_ecc
,
259 .free
= spinand_noecc_ooblayout_free
,
262 static int spinand_ondie_ecc_init_ctx(struct nand_device
*nand
)
264 struct spinand_device
*spinand
= nand_to_spinand(nand
);
265 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
266 struct spinand_ondie_ecc_conf
*engine_conf
;
268 nand
->ecc
.ctx
.conf
.engine_type
= NAND_ECC_ENGINE_TYPE_ON_DIE
;
269 nand
->ecc
.ctx
.conf
.step_size
= nand
->ecc
.requirements
.step_size
;
270 nand
->ecc
.ctx
.conf
.strength
= nand
->ecc
.requirements
.strength
;
272 engine_conf
= kzalloc(sizeof(*engine_conf
), GFP_KERNEL
);
276 nand
->ecc
.ctx
.priv
= engine_conf
;
278 if (spinand
->eccinfo
.ooblayout
)
279 mtd_set_ooblayout(mtd
, spinand
->eccinfo
.ooblayout
);
281 mtd_set_ooblayout(mtd
, &spinand_noecc_ooblayout
);
286 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device
*nand
)
288 kfree(nand
->ecc
.ctx
.priv
);
291 static int spinand_ondie_ecc_prepare_io_req(struct nand_device
*nand
,
292 struct nand_page_io_req
*req
)
294 struct spinand_device
*spinand
= nand_to_spinand(nand
);
295 bool enable
= (req
->mode
!= MTD_OPS_RAW
);
297 memset(spinand
->oobbuf
, 0xff, nanddev_per_page_oobsize(nand
));
299 /* Only enable or disable the engine */
300 return spinand_ecc_enable(spinand
, enable
);
303 static int spinand_ondie_ecc_finish_io_req(struct nand_device
*nand
,
304 struct nand_page_io_req
*req
)
306 struct spinand_ondie_ecc_conf
*engine_conf
= nand
->ecc
.ctx
.priv
;
307 struct spinand_device
*spinand
= nand_to_spinand(nand
);
308 struct mtd_info
*mtd
= spinand_to_mtd(spinand
);
311 if (req
->mode
== MTD_OPS_RAW
)
314 /* Nothing to do when finishing a page write */
315 if (req
->type
== NAND_PAGE_WRITE
)
318 /* Finish a page read: check the status, report errors/bitflips */
319 ret
= spinand_check_ecc_status(spinand
, engine_conf
->status
);
320 if (ret
== -EBADMSG
) {
321 mtd
->ecc_stats
.failed
++;
322 } else if (ret
> 0) {
326 * Continuous reads don't allow us to get the detail,
327 * so we may exagerate the actual number of corrected bitflips.
329 if (!req
->continuous
)
332 pages
= req
->datalen
/ nanddev_page_size(nand
);
334 mtd
->ecc_stats
.corrected
+= ret
* pages
;
340 static const struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops
= {
341 .init_ctx
= spinand_ondie_ecc_init_ctx
,
342 .cleanup_ctx
= spinand_ondie_ecc_cleanup_ctx
,
343 .prepare_io_req
= spinand_ondie_ecc_prepare_io_req
,
344 .finish_io_req
= spinand_ondie_ecc_finish_io_req
,
347 static struct nand_ecc_engine spinand_ondie_ecc_engine
= {
348 .ops
= &spinand_ondie_ecc_engine_ops
,
351 static void spinand_ondie_ecc_save_status(struct nand_device
*nand
, u8 status
)
353 struct spinand_ondie_ecc_conf
*engine_conf
= nand
->ecc
.ctx
.priv
;
355 if (nand
->ecc
.ctx
.conf
.engine_type
== NAND_ECC_ENGINE_TYPE_ON_DIE
&&
357 engine_conf
->status
= status
;
360 static int spinand_write_enable_op(struct spinand_device
*spinand
)
362 struct spi_mem_op op
= SPINAND_WR_EN_DIS_OP(true);
364 return spi_mem_exec_op(spinand
->spimem
, &op
);
367 static int spinand_load_page_op(struct spinand_device
*spinand
,
368 const struct nand_page_io_req
*req
)
370 struct nand_device
*nand
= spinand_to_nand(spinand
);
371 unsigned int row
= nanddev_pos_to_row(nand
, &req
->pos
);
372 struct spi_mem_op op
= SPINAND_PAGE_READ_OP(row
);
374 return spi_mem_exec_op(spinand
->spimem
, &op
);
377 static int spinand_read_from_cache_op(struct spinand_device
*spinand
,
378 const struct nand_page_io_req
*req
)
380 struct nand_device
*nand
= spinand_to_nand(spinand
);
381 struct mtd_info
*mtd
= spinand_to_mtd(spinand
);
382 struct spi_mem_dirmap_desc
*rdesc
;
383 unsigned int nbytes
= 0;
389 buf
= spinand
->databuf
;
390 if (!req
->continuous
)
391 nbytes
= nanddev_page_size(nand
);
393 nbytes
= round_up(req
->dataoffs
+ req
->datalen
,
394 nanddev_page_size(nand
));
399 nbytes
+= nanddev_per_page_oobsize(nand
);
401 buf
= spinand
->oobbuf
;
402 column
= nanddev_page_size(nand
);
406 if (req
->mode
== MTD_OPS_RAW
)
407 rdesc
= spinand
->dirmaps
[req
->pos
.plane
].rdesc
;
409 rdesc
= spinand
->dirmaps
[req
->pos
.plane
].rdesc_ecc
;
411 if (spinand
->flags
& SPINAND_HAS_READ_PLANE_SELECT_BIT
)
412 column
|= req
->pos
.plane
<< fls(nanddev_page_size(nand
));
415 ret
= spi_mem_dirmap_read(rdesc
, column
, nbytes
, buf
);
419 if (!ret
|| ret
> nbytes
)
427 * Dirmap accesses are allowed to toggle the CS.
428 * Toggling the CS during a continuous read is forbidden.
430 if (nbytes
&& req
->continuous
)
435 memcpy(req
->databuf
.in
, spinand
->databuf
+ req
->dataoffs
,
439 if (req
->mode
== MTD_OPS_AUTO_OOB
)
440 mtd_ooblayout_get_databytes(mtd
, req
->oobbuf
.in
,
445 memcpy(req
->oobbuf
.in
, spinand
->oobbuf
+ req
->ooboffs
,
452 static int spinand_write_to_cache_op(struct spinand_device
*spinand
,
453 const struct nand_page_io_req
*req
)
455 struct nand_device
*nand
= spinand_to_nand(spinand
);
456 struct mtd_info
*mtd
= spinand_to_mtd(spinand
);
457 struct spi_mem_dirmap_desc
*wdesc
;
458 unsigned int nbytes
, column
= 0;
459 void *buf
= spinand
->databuf
;
463 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
464 * the cache content to 0xFF (depends on vendor implementation), so we
465 * must fill the page cache entirely even if we only want to program
466 * the data portion of the page, otherwise we might corrupt the BBM or
467 * user data previously programmed in OOB area.
469 * Only reset the data buffer manually, the OOB buffer is prepared by
470 * ECC engines ->prepare_io_req() callback.
472 nbytes
= nanddev_page_size(nand
) + nanddev_per_page_oobsize(nand
);
473 memset(spinand
->databuf
, 0xff, nanddev_page_size(nand
));
476 memcpy(spinand
->databuf
+ req
->dataoffs
, req
->databuf
.out
,
480 if (req
->mode
== MTD_OPS_AUTO_OOB
)
481 mtd_ooblayout_set_databytes(mtd
, req
->oobbuf
.out
,
486 memcpy(spinand
->oobbuf
+ req
->ooboffs
, req
->oobbuf
.out
,
490 if (req
->mode
== MTD_OPS_RAW
)
491 wdesc
= spinand
->dirmaps
[req
->pos
.plane
].wdesc
;
493 wdesc
= spinand
->dirmaps
[req
->pos
.plane
].wdesc_ecc
;
495 if (spinand
->flags
& SPINAND_HAS_PROG_PLANE_SELECT_BIT
)
496 column
|= req
->pos
.plane
<< fls(nanddev_page_size(nand
));
499 ret
= spi_mem_dirmap_write(wdesc
, column
, nbytes
, buf
);
503 if (!ret
|| ret
> nbytes
)
514 static int spinand_program_op(struct spinand_device
*spinand
,
515 const struct nand_page_io_req
*req
)
517 struct nand_device
*nand
= spinand_to_nand(spinand
);
518 unsigned int row
= nanddev_pos_to_row(nand
, &req
->pos
);
519 struct spi_mem_op op
= SPINAND_PROG_EXEC_OP(row
);
521 return spi_mem_exec_op(spinand
->spimem
, &op
);
524 static int spinand_erase_op(struct spinand_device
*spinand
,
525 const struct nand_pos
*pos
)
527 struct nand_device
*nand
= spinand_to_nand(spinand
);
528 unsigned int row
= nanddev_pos_to_row(nand
, pos
);
529 struct spi_mem_op op
= SPINAND_BLK_ERASE_OP(row
);
531 return spi_mem_exec_op(spinand
->spimem
, &op
);
534 static int spinand_wait(struct spinand_device
*spinand
,
535 unsigned long initial_delay_us
,
536 unsigned long poll_delay_us
,
539 struct spi_mem_op op
= SPINAND_GET_FEATURE_OP(REG_STATUS
,
540 spinand
->scratchbuf
);
544 ret
= spi_mem_poll_status(spinand
->spimem
, &op
, STATUS_BUSY
, 0,
547 SPINAND_WAITRDY_TIMEOUT_MS
);
551 status
= *spinand
->scratchbuf
;
552 if (!(status
& STATUS_BUSY
))
556 * Extra read, just in case the STATUS_READY bit has changed
557 * since our last check
559 ret
= spinand_read_status(spinand
, &status
);
567 return status
& STATUS_BUSY
? -ETIMEDOUT
: 0;
570 static int spinand_read_id_op(struct spinand_device
*spinand
, u8 naddr
,
573 struct spi_mem_op op
= SPINAND_READID_OP(
574 naddr
, ndummy
, spinand
->scratchbuf
, SPINAND_MAX_ID_LEN
);
577 ret
= spi_mem_exec_op(spinand
->spimem
, &op
);
579 memcpy(buf
, spinand
->scratchbuf
, SPINAND_MAX_ID_LEN
);
584 static int spinand_reset_op(struct spinand_device
*spinand
)
586 struct spi_mem_op op
= SPINAND_RESET_OP
;
589 ret
= spi_mem_exec_op(spinand
->spimem
, &op
);
593 return spinand_wait(spinand
,
594 SPINAND_RESET_INITIAL_DELAY_US
,
595 SPINAND_RESET_POLL_DELAY_US
,
599 static int spinand_lock_block(struct spinand_device
*spinand
, u8 lock
)
601 return spinand_write_reg_op(spinand
, REG_BLOCK_LOCK
, lock
);
604 static int spinand_read_page(struct spinand_device
*spinand
,
605 const struct nand_page_io_req
*req
)
607 struct nand_device
*nand
= spinand_to_nand(spinand
);
611 ret
= nand_ecc_prepare_io_req(nand
, (struct nand_page_io_req
*)req
);
615 ret
= spinand_load_page_op(spinand
, req
);
619 ret
= spinand_wait(spinand
,
620 SPINAND_READ_INITIAL_DELAY_US
,
621 SPINAND_READ_POLL_DELAY_US
,
626 spinand_ondie_ecc_save_status(nand
, status
);
628 ret
= spinand_read_from_cache_op(spinand
, req
);
632 return nand_ecc_finish_io_req(nand
, (struct nand_page_io_req
*)req
);
635 static int spinand_write_page(struct spinand_device
*spinand
,
636 const struct nand_page_io_req
*req
)
638 struct nand_device
*nand
= spinand_to_nand(spinand
);
642 ret
= nand_ecc_prepare_io_req(nand
, (struct nand_page_io_req
*)req
);
646 ret
= spinand_write_enable_op(spinand
);
650 ret
= spinand_write_to_cache_op(spinand
, req
);
654 ret
= spinand_program_op(spinand
, req
);
658 ret
= spinand_wait(spinand
,
659 SPINAND_WRITE_INITIAL_DELAY_US
,
660 SPINAND_WRITE_POLL_DELAY_US
,
662 if (!ret
&& (status
& STATUS_PROG_FAILED
))
665 return nand_ecc_finish_io_req(nand
, (struct nand_page_io_req
*)req
);
668 static int spinand_mtd_regular_page_read(struct mtd_info
*mtd
, loff_t from
,
669 struct mtd_oob_ops
*ops
,
670 unsigned int *max_bitflips
)
672 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
673 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
674 struct nand_io_iter iter
;
675 bool disable_ecc
= false;
676 bool ecc_failed
= false;
679 if (ops
->mode
== MTD_OPS_RAW
|| !mtd
->ooblayout
)
682 nanddev_io_for_each_page(nand
, NAND_PAGE_READ
, from
, ops
, &iter
) {
684 iter
.req
.mode
= MTD_OPS_RAW
;
686 ret
= spinand_select_target(spinand
, iter
.req
.pos
.target
);
690 ret
= spinand_read_page(spinand
, &iter
.req
);
691 if (ret
< 0 && ret
!= -EBADMSG
)
697 *max_bitflips
= max_t(unsigned int, *max_bitflips
, ret
);
700 ops
->retlen
+= iter
.req
.datalen
;
701 ops
->oobretlen
+= iter
.req
.ooblen
;
704 if (ecc_failed
&& !ret
)
710 static int spinand_mtd_continuous_page_read(struct mtd_info
*mtd
, loff_t from
,
711 struct mtd_oob_ops
*ops
,
712 unsigned int *max_bitflips
)
714 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
715 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
716 struct nand_io_iter iter
;
720 ret
= spinand_cont_read_enable(spinand
, true);
725 * The cache is divided into two halves. While one half of the cache has
726 * the requested data, the other half is loaded with the next chunk of data.
727 * Therefore, the host can read out the data continuously from page to page.
728 * Each data read must be a multiple of 4-bytes and full pages should be read;
729 * otherwise, the data output might get out of sequence from one read command
732 nanddev_io_for_each_block(nand
, NAND_PAGE_READ
, from
, ops
, &iter
) {
733 ret
= spinand_select_target(spinand
, iter
.req
.pos
.target
);
737 ret
= nand_ecc_prepare_io_req(nand
, &iter
.req
);
741 ret
= spinand_load_page_op(spinand
, &iter
.req
);
745 ret
= spinand_wait(spinand
, SPINAND_READ_INITIAL_DELAY_US
,
746 SPINAND_READ_POLL_DELAY_US
, NULL
);
750 ret
= spinand_read_from_cache_op(spinand
, &iter
.req
);
754 ops
->retlen
+= iter
.req
.datalen
;
756 ret
= spinand_read_status(spinand
, &status
);
760 spinand_ondie_ecc_save_status(nand
, status
);
762 ret
= nand_ecc_finish_io_req(nand
, &iter
.req
);
766 *max_bitflips
= max_t(unsigned int, *max_bitflips
, ret
);
772 * Once all the data has been read out, the host can either pull CS#
773 * high and wait for tRST or manually clear the bit in the configuration
774 * register to terminate the continuous read operation. We have no
775 * guarantee the SPI controller drivers will effectively deassert the CS
776 * when we expect them to, so take the register based approach.
778 spinand_cont_read_enable(spinand
, false);
783 static void spinand_cont_read_init(struct spinand_device
*spinand
)
785 struct nand_device
*nand
= spinand_to_nand(spinand
);
786 enum nand_ecc_engine_type engine_type
= nand
->ecc
.ctx
.conf
.engine_type
;
788 /* OOBs cannot be retrieved so external/on-host ECC engine won't work */
789 if (spinand
->set_cont_read
&&
790 (engine_type
== NAND_ECC_ENGINE_TYPE_ON_DIE
||
791 engine_type
== NAND_ECC_ENGINE_TYPE_NONE
)) {
792 spinand
->cont_read_possible
= true;
796 static bool spinand_use_cont_read(struct mtd_info
*mtd
, loff_t from
,
797 struct mtd_oob_ops
*ops
)
799 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
800 struct spinand_device
*spinand
= nand_to_spinand(nand
);
801 struct nand_pos start_pos
, end_pos
;
803 if (!spinand
->cont_read_possible
)
806 /* OOBs won't be retrieved */
807 if (ops
->ooblen
|| ops
->oobbuf
)
810 nanddev_offs_to_pos(nand
, from
, &start_pos
);
811 nanddev_offs_to_pos(nand
, from
+ ops
->len
- 1, &end_pos
);
814 * Continuous reads never cross LUN boundaries. Some devices don't
815 * support crossing planes boundaries. Some devices don't even support
816 * crossing blocks boundaries. The common case being to read through UBI,
817 * we will very rarely read two consequent blocks or more, so it is safer
818 * and easier (can be improved) to only enable continuous reads when
819 * reading within the same erase block.
821 if (start_pos
.target
!= end_pos
.target
||
822 start_pos
.plane
!= end_pos
.plane
||
823 start_pos
.eraseblock
!= end_pos
.eraseblock
)
826 return start_pos
.page
< end_pos
.page
;
829 static int spinand_mtd_read(struct mtd_info
*mtd
, loff_t from
,
830 struct mtd_oob_ops
*ops
)
832 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
833 struct mtd_ecc_stats old_stats
;
834 unsigned int max_bitflips
= 0;
837 mutex_lock(&spinand
->lock
);
839 old_stats
= mtd
->ecc_stats
;
841 if (spinand_use_cont_read(mtd
, from
, ops
))
842 ret
= spinand_mtd_continuous_page_read(mtd
, from
, ops
, &max_bitflips
);
844 ret
= spinand_mtd_regular_page_read(mtd
, from
, ops
, &max_bitflips
);
847 ops
->stats
->uncorrectable_errors
+=
848 mtd
->ecc_stats
.failed
- old_stats
.failed
;
849 ops
->stats
->corrected_bitflips
+=
850 mtd
->ecc_stats
.corrected
- old_stats
.corrected
;
853 mutex_unlock(&spinand
->lock
);
855 return ret
? ret
: max_bitflips
;
858 static int spinand_mtd_write(struct mtd_info
*mtd
, loff_t to
,
859 struct mtd_oob_ops
*ops
)
861 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
862 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
863 struct nand_io_iter iter
;
864 bool disable_ecc
= false;
867 if (ops
->mode
== MTD_OPS_RAW
|| !mtd
->ooblayout
)
870 mutex_lock(&spinand
->lock
);
872 nanddev_io_for_each_page(nand
, NAND_PAGE_WRITE
, to
, ops
, &iter
) {
874 iter
.req
.mode
= MTD_OPS_RAW
;
876 ret
= spinand_select_target(spinand
, iter
.req
.pos
.target
);
880 ret
= spinand_write_page(spinand
, &iter
.req
);
884 ops
->retlen
+= iter
.req
.datalen
;
885 ops
->oobretlen
+= iter
.req
.ooblen
;
888 mutex_unlock(&spinand
->lock
);
893 static bool spinand_isbad(struct nand_device
*nand
, const struct nand_pos
*pos
)
895 struct spinand_device
*spinand
= nand_to_spinand(nand
);
897 struct nand_page_io_req req
= {
899 .ooblen
= sizeof(marker
),
905 spinand_select_target(spinand
, pos
->target
);
906 spinand_read_page(spinand
, &req
);
907 if (marker
[0] != 0xff || marker
[1] != 0xff)
913 static int spinand_mtd_block_isbad(struct mtd_info
*mtd
, loff_t offs
)
915 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
916 struct spinand_device
*spinand
= nand_to_spinand(nand
);
920 nanddev_offs_to_pos(nand
, offs
, &pos
);
921 mutex_lock(&spinand
->lock
);
922 ret
= nanddev_isbad(nand
, &pos
);
923 mutex_unlock(&spinand
->lock
);
928 static int spinand_markbad(struct nand_device
*nand
, const struct nand_pos
*pos
)
930 struct spinand_device
*spinand
= nand_to_spinand(nand
);
932 struct nand_page_io_req req
= {
935 .ooblen
= sizeof(marker
),
936 .oobbuf
.out
= marker
,
941 ret
= spinand_select_target(spinand
, pos
->target
);
945 ret
= spinand_write_enable_op(spinand
);
949 return spinand_write_page(spinand
, &req
);
952 static int spinand_mtd_block_markbad(struct mtd_info
*mtd
, loff_t offs
)
954 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
955 struct spinand_device
*spinand
= nand_to_spinand(nand
);
959 nanddev_offs_to_pos(nand
, offs
, &pos
);
960 mutex_lock(&spinand
->lock
);
961 ret
= nanddev_markbad(nand
, &pos
);
962 mutex_unlock(&spinand
->lock
);
967 static int spinand_erase(struct nand_device
*nand
, const struct nand_pos
*pos
)
969 struct spinand_device
*spinand
= nand_to_spinand(nand
);
973 ret
= spinand_select_target(spinand
, pos
->target
);
977 ret
= spinand_write_enable_op(spinand
);
981 ret
= spinand_erase_op(spinand
, pos
);
985 ret
= spinand_wait(spinand
,
986 SPINAND_ERASE_INITIAL_DELAY_US
,
987 SPINAND_ERASE_POLL_DELAY_US
,
990 if (!ret
&& (status
& STATUS_ERASE_FAILED
))
996 static int spinand_mtd_erase(struct mtd_info
*mtd
,
997 struct erase_info
*einfo
)
999 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
1002 mutex_lock(&spinand
->lock
);
1003 ret
= nanddev_mtd_erase(mtd
, einfo
);
1004 mutex_unlock(&spinand
->lock
);
1009 static int spinand_mtd_block_isreserved(struct mtd_info
*mtd
, loff_t offs
)
1011 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
1012 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
1013 struct nand_pos pos
;
1016 nanddev_offs_to_pos(nand
, offs
, &pos
);
1017 mutex_lock(&spinand
->lock
);
1018 ret
= nanddev_isreserved(nand
, &pos
);
1019 mutex_unlock(&spinand
->lock
);
1024 static int spinand_create_dirmap(struct spinand_device
*spinand
,
1027 struct nand_device
*nand
= spinand_to_nand(spinand
);
1028 struct spi_mem_dirmap_info info
= {
1029 .length
= nanddev_page_size(nand
) +
1030 nanddev_per_page_oobsize(nand
),
1032 struct spi_mem_dirmap_desc
*desc
;
1034 if (spinand
->cont_read_possible
)
1035 info
.length
= nanddev_eraseblock_size(nand
);
1037 /* The plane number is passed in MSB just above the column address */
1038 info
.offset
= plane
<< fls(nand
->memorg
.pagesize
);
1040 info
.op_tmpl
= *spinand
->op_templates
.update_cache
;
1041 desc
= devm_spi_mem_dirmap_create(&spinand
->spimem
->spi
->dev
,
1042 spinand
->spimem
, &info
);
1044 return PTR_ERR(desc
);
1046 spinand
->dirmaps
[plane
].wdesc
= desc
;
1048 info
.op_tmpl
= *spinand
->op_templates
.read_cache
;
1049 desc
= devm_spi_mem_dirmap_create(&spinand
->spimem
->spi
->dev
,
1050 spinand
->spimem
, &info
);
1052 return PTR_ERR(desc
);
1054 spinand
->dirmaps
[plane
].rdesc
= desc
;
1056 if (nand
->ecc
.engine
->integration
!= NAND_ECC_ENGINE_INTEGRATION_PIPELINED
) {
1057 spinand
->dirmaps
[plane
].wdesc_ecc
= spinand
->dirmaps
[plane
].wdesc
;
1058 spinand
->dirmaps
[plane
].rdesc_ecc
= spinand
->dirmaps
[plane
].rdesc
;
1063 info
.op_tmpl
= *spinand
->op_templates
.update_cache
;
1064 info
.op_tmpl
.data
.ecc
= true;
1065 desc
= devm_spi_mem_dirmap_create(&spinand
->spimem
->spi
->dev
,
1066 spinand
->spimem
, &info
);
1068 return PTR_ERR(desc
);
1070 spinand
->dirmaps
[plane
].wdesc_ecc
= desc
;
1072 info
.op_tmpl
= *spinand
->op_templates
.read_cache
;
1073 info
.op_tmpl
.data
.ecc
= true;
1074 desc
= devm_spi_mem_dirmap_create(&spinand
->spimem
->spi
->dev
,
1075 spinand
->spimem
, &info
);
1077 return PTR_ERR(desc
);
1079 spinand
->dirmaps
[plane
].rdesc_ecc
= desc
;
1084 static int spinand_create_dirmaps(struct spinand_device
*spinand
)
1086 struct nand_device
*nand
= spinand_to_nand(spinand
);
1089 spinand
->dirmaps
= devm_kzalloc(&spinand
->spimem
->spi
->dev
,
1090 sizeof(*spinand
->dirmaps
) *
1091 nand
->memorg
.planes_per_lun
,
1093 if (!spinand
->dirmaps
)
1096 for (i
= 0; i
< nand
->memorg
.planes_per_lun
; i
++) {
1097 ret
= spinand_create_dirmap(spinand
, i
);
1105 static const struct nand_ops spinand_ops
= {
1106 .erase
= spinand_erase
,
1107 .markbad
= spinand_markbad
,
1108 .isbad
= spinand_isbad
,
1111 static const struct spinand_manufacturer
*spinand_manufacturers
[] = {
1112 &alliancememory_spinand_manufacturer
,
1113 &ato_spinand_manufacturer
,
1114 &esmt_c8_spinand_manufacturer
,
1115 &foresee_spinand_manufacturer
,
1116 &gigadevice_spinand_manufacturer
,
1117 ¯onix_spinand_manufacturer
,
1118 µn_spinand_manufacturer
,
1119 ¶gon_spinand_manufacturer
,
1120 &toshiba_spinand_manufacturer
,
1121 &winbond_spinand_manufacturer
,
1122 &xtx_spinand_manufacturer
,
1125 static int spinand_manufacturer_match(struct spinand_device
*spinand
,
1126 enum spinand_readid_method rdid_method
)
1128 u8
*id
= spinand
->id
.data
;
1132 for (i
= 0; i
< ARRAY_SIZE(spinand_manufacturers
); i
++) {
1133 const struct spinand_manufacturer
*manufacturer
=
1134 spinand_manufacturers
[i
];
1136 if (id
[0] != manufacturer
->id
)
1139 ret
= spinand_match_and_init(spinand
,
1140 manufacturer
->chips
,
1141 manufacturer
->nchips
,
1146 spinand
->manufacturer
= manufacturer
;
1152 static int spinand_id_detect(struct spinand_device
*spinand
)
1154 u8
*id
= spinand
->id
.data
;
1157 ret
= spinand_read_id_op(spinand
, 0, 0, id
);
1160 ret
= spinand_manufacturer_match(spinand
, SPINAND_READID_METHOD_OPCODE
);
1164 ret
= spinand_read_id_op(spinand
, 1, 0, id
);
1167 ret
= spinand_manufacturer_match(spinand
,
1168 SPINAND_READID_METHOD_OPCODE_ADDR
);
1172 ret
= spinand_read_id_op(spinand
, 0, 1, id
);
1175 ret
= spinand_manufacturer_match(spinand
,
1176 SPINAND_READID_METHOD_OPCODE_DUMMY
);
1181 static int spinand_manufacturer_init(struct spinand_device
*spinand
)
1183 if (spinand
->manufacturer
->ops
->init
)
1184 return spinand
->manufacturer
->ops
->init(spinand
);
1189 static void spinand_manufacturer_cleanup(struct spinand_device
*spinand
)
1191 /* Release manufacturer private data */
1192 if (spinand
->manufacturer
->ops
->cleanup
)
1193 return spinand
->manufacturer
->ops
->cleanup(spinand
);
1196 static const struct spi_mem_op
*
1197 spinand_select_op_variant(struct spinand_device
*spinand
,
1198 const struct spinand_op_variants
*variants
)
1200 struct nand_device
*nand
= spinand_to_nand(spinand
);
1203 for (i
= 0; i
< variants
->nops
; i
++) {
1204 struct spi_mem_op op
= variants
->ops
[i
];
1205 unsigned int nbytes
;
1208 nbytes
= nanddev_per_page_oobsize(nand
) +
1209 nanddev_page_size(nand
);
1212 op
.data
.nbytes
= nbytes
;
1213 ret
= spi_mem_adjust_op_size(spinand
->spimem
, &op
);
1217 if (!spi_mem_supports_op(spinand
->spimem
, &op
))
1220 nbytes
-= op
.data
.nbytes
;
1224 return &variants
->ops
[i
];
1231 * spinand_match_and_init() - Try to find a match between a device ID and an
1232 * entry in a spinand_info table
1233 * @spinand: SPI NAND object
1234 * @table: SPI NAND device description table
1235 * @table_size: size of the device description table
1236 * @rdid_method: read id method to match
1238 * Match between a device ID retrieved through the READ_ID command and an
1239 * entry in the SPI NAND description table. If a match is found, the spinand
1240 * object will be initialized with information provided by the matching
1241 * spinand_info entry.
1243 * Return: 0 on success, a negative error code otherwise.
1245 int spinand_match_and_init(struct spinand_device
*spinand
,
1246 const struct spinand_info
*table
,
1247 unsigned int table_size
,
1248 enum spinand_readid_method rdid_method
)
1250 u8
*id
= spinand
->id
.data
;
1251 struct nand_device
*nand
= spinand_to_nand(spinand
);
1254 for (i
= 0; i
< table_size
; i
++) {
1255 const struct spinand_info
*info
= &table
[i
];
1256 const struct spi_mem_op
*op
;
1258 if (rdid_method
!= info
->devid
.method
)
1261 if (memcmp(id
+ 1, info
->devid
.id
, info
->devid
.len
))
1264 nand
->memorg
= table
[i
].memorg
;
1265 nanddev_set_ecc_requirements(nand
, &table
[i
].eccreq
);
1266 spinand
->eccinfo
= table
[i
].eccinfo
;
1267 spinand
->flags
= table
[i
].flags
;
1268 spinand
->id
.len
= 1 + table
[i
].devid
.len
;
1269 spinand
->select_target
= table
[i
].select_target
;
1270 spinand
->set_cont_read
= table
[i
].set_cont_read
;
1272 op
= spinand_select_op_variant(spinand
,
1273 info
->op_variants
.read_cache
);
1277 spinand
->op_templates
.read_cache
= op
;
1279 op
= spinand_select_op_variant(spinand
,
1280 info
->op_variants
.write_cache
);
1284 spinand
->op_templates
.write_cache
= op
;
1286 op
= spinand_select_op_variant(spinand
,
1287 info
->op_variants
.update_cache
);
1288 spinand
->op_templates
.update_cache
= op
;
1296 static int spinand_detect(struct spinand_device
*spinand
)
1298 struct device
*dev
= &spinand
->spimem
->spi
->dev
;
1299 struct nand_device
*nand
= spinand_to_nand(spinand
);
1302 ret
= spinand_reset_op(spinand
);
1306 ret
= spinand_id_detect(spinand
);
1308 dev_err(dev
, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN
,
1313 if (nand
->memorg
.ntargets
> 1 && !spinand
->select_target
) {
1315 "SPI NANDs with more than one die must implement ->select_target()\n");
1319 dev_info(&spinand
->spimem
->spi
->dev
,
1320 "%s SPI NAND was found.\n", spinand
->manufacturer
->name
);
1321 dev_info(&spinand
->spimem
->spi
->dev
,
1322 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
1323 nanddev_size(nand
) >> 20, nanddev_eraseblock_size(nand
) >> 10,
1324 nanddev_page_size(nand
), nanddev_per_page_oobsize(nand
));
1329 static int spinand_init_flash(struct spinand_device
*spinand
)
1331 struct device
*dev
= &spinand
->spimem
->spi
->dev
;
1332 struct nand_device
*nand
= spinand_to_nand(spinand
);
1335 ret
= spinand_read_cfg(spinand
);
1339 ret
= spinand_init_quad_enable(spinand
);
1343 ret
= spinand_upd_cfg(spinand
, CFG_OTP_ENABLE
, 0);
1347 ret
= spinand_manufacturer_init(spinand
);
1350 "Failed to initialize the SPI NAND chip (err = %d)\n",
1355 /* After power up, all blocks are locked, so unlock them here. */
1356 for (i
= 0; i
< nand
->memorg
.ntargets
; i
++) {
1357 ret
= spinand_select_target(spinand
, i
);
1361 ret
= spinand_lock_block(spinand
, BL_ALL_UNLOCKED
);
1367 spinand_manufacturer_cleanup(spinand
);
1372 static void spinand_mtd_resume(struct mtd_info
*mtd
)
1374 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
1377 ret
= spinand_reset_op(spinand
);
1381 ret
= spinand_init_flash(spinand
);
1385 spinand_ecc_enable(spinand
, false);
1388 static int spinand_init(struct spinand_device
*spinand
)
1390 struct device
*dev
= &spinand
->spimem
->spi
->dev
;
1391 struct mtd_info
*mtd
= spinand_to_mtd(spinand
);
1392 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
1396 * We need a scratch buffer because the spi_mem interface requires that
1397 * buf passed in spi_mem_op->data.buf be DMA-able.
1399 spinand
->scratchbuf
= kzalloc(SPINAND_MAX_ID_LEN
, GFP_KERNEL
);
1400 if (!spinand
->scratchbuf
)
1403 ret
= spinand_detect(spinand
);
1408 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1409 * may use this buffer for DMA access.
1410 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1412 spinand
->databuf
= kzalloc(nanddev_eraseblock_size(nand
),
1414 if (!spinand
->databuf
) {
1419 spinand
->oobbuf
= spinand
->databuf
+ nanddev_page_size(nand
);
1421 ret
= spinand_init_cfg_cache(spinand
);
1425 ret
= spinand_init_flash(spinand
);
1429 ret
= nanddev_init(nand
, &spinand_ops
, THIS_MODULE
);
1431 goto err_manuf_cleanup
;
1433 /* SPI-NAND default ECC engine is on-die */
1434 nand
->ecc
.defaults
.engine_type
= NAND_ECC_ENGINE_TYPE_ON_DIE
;
1435 nand
->ecc
.ondie_engine
= &spinand_ondie_ecc_engine
;
1437 spinand_ecc_enable(spinand
, false);
1438 ret
= nanddev_ecc_engine_init(nand
);
1440 goto err_cleanup_nanddev
;
1443 * Continuous read can only be enabled with an on-die ECC engine, so the
1444 * ECC initialization must have happened previously.
1446 spinand_cont_read_init(spinand
);
1448 mtd
->_read_oob
= spinand_mtd_read
;
1449 mtd
->_write_oob
= spinand_mtd_write
;
1450 mtd
->_block_isbad
= spinand_mtd_block_isbad
;
1451 mtd
->_block_markbad
= spinand_mtd_block_markbad
;
1452 mtd
->_block_isreserved
= spinand_mtd_block_isreserved
;
1453 mtd
->_erase
= spinand_mtd_erase
;
1454 mtd
->_max_bad_blocks
= nanddev_mtd_max_bad_blocks
;
1455 mtd
->_resume
= spinand_mtd_resume
;
1457 if (nand
->ecc
.engine
) {
1458 ret
= mtd_ooblayout_count_freebytes(mtd
);
1460 goto err_cleanup_ecc_engine
;
1463 mtd
->oobavail
= ret
;
1465 /* Propagate ECC information to mtd_info */
1466 mtd
->ecc_strength
= nanddev_get_ecc_conf(nand
)->strength
;
1467 mtd
->ecc_step_size
= nanddev_get_ecc_conf(nand
)->step_size
;
1468 mtd
->bitflip_threshold
= DIV_ROUND_UP(mtd
->ecc_strength
* 3, 4);
1470 ret
= spinand_create_dirmaps(spinand
);
1473 "Failed to create direct mappings for read/write operations (err = %d)\n",
1475 goto err_cleanup_ecc_engine
;
1480 err_cleanup_ecc_engine
:
1481 nanddev_ecc_engine_cleanup(nand
);
1483 err_cleanup_nanddev
:
1484 nanddev_cleanup(nand
);
1487 spinand_manufacturer_cleanup(spinand
);
1490 kfree(spinand
->databuf
);
1491 kfree(spinand
->scratchbuf
);
1495 static void spinand_cleanup(struct spinand_device
*spinand
)
1497 struct nand_device
*nand
= spinand_to_nand(spinand
);
1499 nanddev_cleanup(nand
);
1500 spinand_manufacturer_cleanup(spinand
);
1501 kfree(spinand
->databuf
);
1502 kfree(spinand
->scratchbuf
);
1505 static int spinand_probe(struct spi_mem
*mem
)
1507 struct spinand_device
*spinand
;
1508 struct mtd_info
*mtd
;
1511 spinand
= devm_kzalloc(&mem
->spi
->dev
, sizeof(*spinand
),
1516 spinand
->spimem
= mem
;
1517 spi_mem_set_drvdata(mem
, spinand
);
1518 spinand_set_of_node(spinand
, mem
->spi
->dev
.of_node
);
1519 mutex_init(&spinand
->lock
);
1520 mtd
= spinand_to_mtd(spinand
);
1521 mtd
->dev
.parent
= &mem
->spi
->dev
;
1523 ret
= spinand_init(spinand
);
1527 ret
= mtd_device_register(mtd
, NULL
, 0);
1529 goto err_spinand_cleanup
;
1533 err_spinand_cleanup
:
1534 spinand_cleanup(spinand
);
1539 static int spinand_remove(struct spi_mem
*mem
)
1541 struct spinand_device
*spinand
;
1542 struct mtd_info
*mtd
;
1545 spinand
= spi_mem_get_drvdata(mem
);
1546 mtd
= spinand_to_mtd(spinand
);
1548 ret
= mtd_device_unregister(mtd
);
1552 spinand_cleanup(spinand
);
1557 static const struct spi_device_id spinand_ids
[] = {
1558 { .name
= "spi-nand" },
1561 MODULE_DEVICE_TABLE(spi
, spinand_ids
);
1564 static const struct of_device_id spinand_of_ids
[] = {
1565 { .compatible
= "spi-nand" },
1568 MODULE_DEVICE_TABLE(of
, spinand_of_ids
);
1571 static struct spi_mem_driver spinand_drv
= {
1573 .id_table
= spinand_ids
,
1576 .of_match_table
= of_match_ptr(spinand_of_ids
),
1579 .probe
= spinand_probe
,
1580 .remove
= spinand_remove
,
1582 module_spi_mem_driver(spinand_drv
);
1584 MODULE_DESCRIPTION("SPI NAND framework");
1585 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1586 MODULE_LICENSE("GPL v2");