1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016-2017 Micron Technology, Inc.
6 * Peter Pan <peterpandong@micron.com>
7 * Boris Brezillon <boris.brezillon@bootlin.com>
10 #define pr_fmt(fmt) "spi-nand: " fmt
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/spinand.h>
18 #include <linux/slab.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
22 static void spinand_cache_op_adjust_colum(struct spinand_device
*spinand
,
23 const struct nand_page_io_req
*req
,
26 struct nand_device
*nand
= spinand_to_nand(spinand
);
29 if (nand
->memorg
.planes_per_lun
< 2)
32 /* The plane number is passed in MSB just above the column address */
33 shift
= fls(nand
->memorg
.pagesize
);
34 *column
|= req
->pos
.plane
<< shift
;
37 static int spinand_read_reg_op(struct spinand_device
*spinand
, u8 reg
, u8
*val
)
39 struct spi_mem_op op
= SPINAND_GET_FEATURE_OP(reg
,
43 ret
= spi_mem_exec_op(spinand
->spimem
, &op
);
47 *val
= *spinand
->scratchbuf
;
51 static int spinand_write_reg_op(struct spinand_device
*spinand
, u8 reg
, u8 val
)
53 struct spi_mem_op op
= SPINAND_SET_FEATURE_OP(reg
,
56 *spinand
->scratchbuf
= val
;
57 return spi_mem_exec_op(spinand
->spimem
, &op
);
60 static int spinand_read_status(struct spinand_device
*spinand
, u8
*status
)
62 return spinand_read_reg_op(spinand
, REG_STATUS
, status
);
65 static int spinand_get_cfg(struct spinand_device
*spinand
, u8
*cfg
)
67 struct nand_device
*nand
= spinand_to_nand(spinand
);
69 if (WARN_ON(spinand
->cur_target
< 0 ||
70 spinand
->cur_target
>= nand
->memorg
.ntargets
))
73 *cfg
= spinand
->cfg_cache
[spinand
->cur_target
];
77 static int spinand_set_cfg(struct spinand_device
*spinand
, u8 cfg
)
79 struct nand_device
*nand
= spinand_to_nand(spinand
);
82 if (WARN_ON(spinand
->cur_target
< 0 ||
83 spinand
->cur_target
>= nand
->memorg
.ntargets
))
86 if (spinand
->cfg_cache
[spinand
->cur_target
] == cfg
)
89 ret
= spinand_write_reg_op(spinand
, REG_CFG
, cfg
);
93 spinand
->cfg_cache
[spinand
->cur_target
] = cfg
;
98 * spinand_upd_cfg() - Update the configuration register
99 * @spinand: the spinand device
100 * @mask: the mask encoding the bits to update in the config reg
101 * @val: the new value to apply
103 * Update the configuration register.
105 * Return: 0 on success, a negative error code otherwise.
107 int spinand_upd_cfg(struct spinand_device
*spinand
, u8 mask
, u8 val
)
112 ret
= spinand_get_cfg(spinand
, &cfg
);
119 return spinand_set_cfg(spinand
, cfg
);
123 * spinand_select_target() - Select a specific NAND target/die
124 * @spinand: the spinand device
125 * @target: the target/die to select
127 * Select a new target/die. If chip only has one die, this function is a NOOP.
129 * Return: 0 on success, a negative error code otherwise.
131 int spinand_select_target(struct spinand_device
*spinand
, unsigned int target
)
133 struct nand_device
*nand
= spinand_to_nand(spinand
);
136 if (WARN_ON(target
>= nand
->memorg
.ntargets
))
139 if (spinand
->cur_target
== target
)
142 if (nand
->memorg
.ntargets
== 1) {
143 spinand
->cur_target
= target
;
147 ret
= spinand
->select_target(spinand
, target
);
151 spinand
->cur_target
= target
;
155 static int spinand_init_cfg_cache(struct spinand_device
*spinand
)
157 struct nand_device
*nand
= spinand_to_nand(spinand
);
158 struct device
*dev
= &spinand
->spimem
->spi
->dev
;
162 spinand
->cfg_cache
= devm_kcalloc(dev
,
163 nand
->memorg
.ntargets
,
164 sizeof(*spinand
->cfg_cache
),
166 if (!spinand
->cfg_cache
)
169 for (target
= 0; target
< nand
->memorg
.ntargets
; target
++) {
170 ret
= spinand_select_target(spinand
, target
);
175 * We use spinand_read_reg_op() instead of spinand_get_cfg()
176 * here to bypass the config cache.
178 ret
= spinand_read_reg_op(spinand
, REG_CFG
,
179 &spinand
->cfg_cache
[target
]);
187 static int spinand_init_quad_enable(struct spinand_device
*spinand
)
191 if (!(spinand
->flags
& SPINAND_HAS_QE_BIT
))
194 if (spinand
->op_templates
.read_cache
->data
.buswidth
== 4 ||
195 spinand
->op_templates
.write_cache
->data
.buswidth
== 4 ||
196 spinand
->op_templates
.update_cache
->data
.buswidth
== 4)
199 return spinand_upd_cfg(spinand
, CFG_QUAD_ENABLE
,
200 enable
? CFG_QUAD_ENABLE
: 0);
203 static int spinand_ecc_enable(struct spinand_device
*spinand
,
206 return spinand_upd_cfg(spinand
, CFG_ECC_ENABLE
,
207 enable
? CFG_ECC_ENABLE
: 0);
210 static int spinand_write_enable_op(struct spinand_device
*spinand
)
212 struct spi_mem_op op
= SPINAND_WR_EN_DIS_OP(true);
214 return spi_mem_exec_op(spinand
->spimem
, &op
);
217 static int spinand_load_page_op(struct spinand_device
*spinand
,
218 const struct nand_page_io_req
*req
)
220 struct nand_device
*nand
= spinand_to_nand(spinand
);
221 unsigned int row
= nanddev_pos_to_row(nand
, &req
->pos
);
222 struct spi_mem_op op
= SPINAND_PAGE_READ_OP(row
);
224 return spi_mem_exec_op(spinand
->spimem
, &op
);
227 static int spinand_read_from_cache_op(struct spinand_device
*spinand
,
228 const struct nand_page_io_req
*req
)
230 struct spi_mem_op op
= *spinand
->op_templates
.read_cache
;
231 struct nand_device
*nand
= spinand_to_nand(spinand
);
232 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
233 struct nand_page_io_req adjreq
= *req
;
234 unsigned int nbytes
= 0;
240 adjreq
.datalen
= nanddev_page_size(nand
);
242 adjreq
.databuf
.in
= spinand
->databuf
;
243 buf
= spinand
->databuf
;
244 nbytes
= adjreq
.datalen
;
248 adjreq
.ooblen
= nanddev_per_page_oobsize(nand
);
250 adjreq
.oobbuf
.in
= spinand
->oobbuf
;
251 nbytes
+= nanddev_per_page_oobsize(nand
);
253 buf
= spinand
->oobbuf
;
254 column
= nanddev_page_size(nand
);
258 spinand_cache_op_adjust_colum(spinand
, &adjreq
, &column
);
259 op
.addr
.val
= column
;
262 * Some controllers are limited in term of max RX data size. In this
263 * case, just repeat the READ_CACHE operation after updating the
267 op
.data
.buf
.in
= buf
;
268 op
.data
.nbytes
= nbytes
;
269 ret
= spi_mem_adjust_op_size(spinand
->spimem
, &op
);
273 ret
= spi_mem_exec_op(spinand
->spimem
, &op
);
277 buf
+= op
.data
.nbytes
;
278 nbytes
-= op
.data
.nbytes
;
279 op
.addr
.val
+= op
.data
.nbytes
;
283 memcpy(req
->databuf
.in
, spinand
->databuf
+ req
->dataoffs
,
287 if (req
->mode
== MTD_OPS_AUTO_OOB
)
288 mtd_ooblayout_get_databytes(mtd
, req
->oobbuf
.in
,
293 memcpy(req
->oobbuf
.in
, spinand
->oobbuf
+ req
->ooboffs
,
300 static int spinand_write_to_cache_op(struct spinand_device
*spinand
,
301 const struct nand_page_io_req
*req
)
303 struct spi_mem_op op
= *spinand
->op_templates
.write_cache
;
304 struct nand_device
*nand
= spinand_to_nand(spinand
);
305 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
306 struct nand_page_io_req adjreq
= *req
;
307 void *buf
= spinand
->databuf
;
313 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
314 * the cache content to 0xFF (depends on vendor implementation), so we
315 * must fill the page cache entirely even if we only want to program
316 * the data portion of the page, otherwise we might corrupt the BBM or
317 * user data previously programmed in OOB area.
319 nbytes
= nanddev_page_size(nand
) + nanddev_per_page_oobsize(nand
);
320 memset(spinand
->databuf
, 0xff, nbytes
);
322 adjreq
.datalen
= nanddev_page_size(nand
);
323 adjreq
.databuf
.out
= spinand
->databuf
;
324 adjreq
.ooblen
= nanddev_per_page_oobsize(nand
);
326 adjreq
.oobbuf
.out
= spinand
->oobbuf
;
329 memcpy(spinand
->databuf
+ req
->dataoffs
, req
->databuf
.out
,
333 if (req
->mode
== MTD_OPS_AUTO_OOB
)
334 mtd_ooblayout_set_databytes(mtd
, req
->oobbuf
.out
,
339 memcpy(spinand
->oobbuf
+ req
->ooboffs
, req
->oobbuf
.out
,
343 spinand_cache_op_adjust_colum(spinand
, &adjreq
, &column
);
345 op
= *spinand
->op_templates
.write_cache
;
346 op
.addr
.val
= column
;
349 * Some controllers are limited in term of max TX data size. In this
350 * case, split the operation into one LOAD CACHE and one or more
354 op
.data
.buf
.out
= buf
;
355 op
.data
.nbytes
= nbytes
;
357 ret
= spi_mem_adjust_op_size(spinand
->spimem
, &op
);
361 ret
= spi_mem_exec_op(spinand
->spimem
, &op
);
365 buf
+= op
.data
.nbytes
;
366 nbytes
-= op
.data
.nbytes
;
367 op
.addr
.val
+= op
.data
.nbytes
;
370 * We need to use the RANDOM LOAD CACHE operation if there's
371 * more than one iteration, because the LOAD operation might
372 * reset the cache to 0xff.
375 column
= op
.addr
.val
;
376 op
= *spinand
->op_templates
.update_cache
;
377 op
.addr
.val
= column
;
384 static int spinand_program_op(struct spinand_device
*spinand
,
385 const struct nand_page_io_req
*req
)
387 struct nand_device
*nand
= spinand_to_nand(spinand
);
388 unsigned int row
= nanddev_pos_to_row(nand
, &req
->pos
);
389 struct spi_mem_op op
= SPINAND_PROG_EXEC_OP(row
);
391 return spi_mem_exec_op(spinand
->spimem
, &op
);
394 static int spinand_erase_op(struct spinand_device
*spinand
,
395 const struct nand_pos
*pos
)
397 struct nand_device
*nand
= spinand_to_nand(spinand
);
398 unsigned int row
= nanddev_pos_to_row(nand
, pos
);
399 struct spi_mem_op op
= SPINAND_BLK_ERASE_OP(row
);
401 return spi_mem_exec_op(spinand
->spimem
, &op
);
404 static int spinand_wait(struct spinand_device
*spinand
, u8
*s
)
406 unsigned long timeo
= jiffies
+ msecs_to_jiffies(400);
411 ret
= spinand_read_status(spinand
, &status
);
415 if (!(status
& STATUS_BUSY
))
417 } while (time_before(jiffies
, timeo
));
420 * Extra read, just in case the STATUS_READY bit has changed
421 * since our last check
423 ret
= spinand_read_status(spinand
, &status
);
431 return status
& STATUS_BUSY
? -ETIMEDOUT
: 0;
434 static int spinand_read_id_op(struct spinand_device
*spinand
, u8
*buf
)
436 struct spi_mem_op op
= SPINAND_READID_OP(0, spinand
->scratchbuf
,
440 ret
= spi_mem_exec_op(spinand
->spimem
, &op
);
442 memcpy(buf
, spinand
->scratchbuf
, SPINAND_MAX_ID_LEN
);
447 static int spinand_reset_op(struct spinand_device
*spinand
)
449 struct spi_mem_op op
= SPINAND_RESET_OP
;
452 ret
= spi_mem_exec_op(spinand
->spimem
, &op
);
456 return spinand_wait(spinand
, NULL
);
459 static int spinand_lock_block(struct spinand_device
*spinand
, u8 lock
)
461 return spinand_write_reg_op(spinand
, REG_BLOCK_LOCK
, lock
);
464 static int spinand_check_ecc_status(struct spinand_device
*spinand
, u8 status
)
466 struct nand_device
*nand
= spinand_to_nand(spinand
);
468 if (spinand
->eccinfo
.get_status
)
469 return spinand
->eccinfo
.get_status(spinand
, status
);
471 switch (status
& STATUS_ECC_MASK
) {
472 case STATUS_ECC_NO_BITFLIPS
:
475 case STATUS_ECC_HAS_BITFLIPS
:
477 * We have no way to know exactly how many bitflips have been
478 * fixed, so let's return the maximum possible value so that
479 * wear-leveling layers move the data immediately.
481 return nand
->eccreq
.strength
;
483 case STATUS_ECC_UNCOR_ERROR
:
493 static int spinand_read_page(struct spinand_device
*spinand
,
494 const struct nand_page_io_req
*req
,
500 ret
= spinand_load_page_op(spinand
, req
);
504 ret
= spinand_wait(spinand
, &status
);
508 ret
= spinand_read_from_cache_op(spinand
, req
);
515 return spinand_check_ecc_status(spinand
, status
);
518 static int spinand_write_page(struct spinand_device
*spinand
,
519 const struct nand_page_io_req
*req
)
524 ret
= spinand_write_enable_op(spinand
);
528 ret
= spinand_write_to_cache_op(spinand
, req
);
532 ret
= spinand_program_op(spinand
, req
);
536 ret
= spinand_wait(spinand
, &status
);
537 if (!ret
&& (status
& STATUS_PROG_FAILED
))
543 static int spinand_mtd_read(struct mtd_info
*mtd
, loff_t from
,
544 struct mtd_oob_ops
*ops
)
546 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
547 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
548 unsigned int max_bitflips
= 0;
549 struct nand_io_iter iter
;
550 bool enable_ecc
= false;
551 bool ecc_failed
= false;
554 if (ops
->mode
!= MTD_OPS_RAW
&& spinand
->eccinfo
.ooblayout
)
557 mutex_lock(&spinand
->lock
);
559 nanddev_io_for_each_page(nand
, from
, ops
, &iter
) {
560 ret
= spinand_select_target(spinand
, iter
.req
.pos
.target
);
564 ret
= spinand_ecc_enable(spinand
, enable_ecc
);
568 ret
= spinand_read_page(spinand
, &iter
.req
, enable_ecc
);
569 if (ret
< 0 && ret
!= -EBADMSG
)
572 if (ret
== -EBADMSG
) {
574 mtd
->ecc_stats
.failed
++;
576 mtd
->ecc_stats
.corrected
+= ret
;
577 max_bitflips
= max_t(unsigned int, max_bitflips
, ret
);
581 ops
->retlen
+= iter
.req
.datalen
;
582 ops
->oobretlen
+= iter
.req
.ooblen
;
585 mutex_unlock(&spinand
->lock
);
587 if (ecc_failed
&& !ret
)
590 return ret
? ret
: max_bitflips
;
593 static int spinand_mtd_write(struct mtd_info
*mtd
, loff_t to
,
594 struct mtd_oob_ops
*ops
)
596 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
597 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
598 struct nand_io_iter iter
;
599 bool enable_ecc
= false;
602 if (ops
->mode
!= MTD_OPS_RAW
&& mtd
->ooblayout
)
605 mutex_lock(&spinand
->lock
);
607 nanddev_io_for_each_page(nand
, to
, ops
, &iter
) {
608 ret
= spinand_select_target(spinand
, iter
.req
.pos
.target
);
612 ret
= spinand_ecc_enable(spinand
, enable_ecc
);
616 ret
= spinand_write_page(spinand
, &iter
.req
);
620 ops
->retlen
+= iter
.req
.datalen
;
621 ops
->oobretlen
+= iter
.req
.ooblen
;
624 mutex_unlock(&spinand
->lock
);
629 static bool spinand_isbad(struct nand_device
*nand
, const struct nand_pos
*pos
)
631 struct spinand_device
*spinand
= nand_to_spinand(nand
);
633 struct nand_page_io_req req
= {
635 .ooblen
= sizeof(marker
),
641 spinand_select_target(spinand
, pos
->target
);
642 spinand_read_page(spinand
, &req
, false);
643 if (marker
[0] != 0xff || marker
[1] != 0xff)
649 static int spinand_mtd_block_isbad(struct mtd_info
*mtd
, loff_t offs
)
651 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
652 struct spinand_device
*spinand
= nand_to_spinand(nand
);
656 nanddev_offs_to_pos(nand
, offs
, &pos
);
657 mutex_lock(&spinand
->lock
);
658 ret
= nanddev_isbad(nand
, &pos
);
659 mutex_unlock(&spinand
->lock
);
664 static int spinand_markbad(struct nand_device
*nand
, const struct nand_pos
*pos
)
666 struct spinand_device
*spinand
= nand_to_spinand(nand
);
668 struct nand_page_io_req req
= {
671 .ooblen
= sizeof(marker
),
672 .oobbuf
.out
= marker
,
677 ret
= spinand_select_target(spinand
, pos
->target
);
681 ret
= spinand_write_enable_op(spinand
);
685 return spinand_write_page(spinand
, &req
);
688 static int spinand_mtd_block_markbad(struct mtd_info
*mtd
, loff_t offs
)
690 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
691 struct spinand_device
*spinand
= nand_to_spinand(nand
);
695 nanddev_offs_to_pos(nand
, offs
, &pos
);
696 mutex_lock(&spinand
->lock
);
697 ret
= nanddev_markbad(nand
, &pos
);
698 mutex_unlock(&spinand
->lock
);
703 static int spinand_erase(struct nand_device
*nand
, const struct nand_pos
*pos
)
705 struct spinand_device
*spinand
= nand_to_spinand(nand
);
709 ret
= spinand_select_target(spinand
, pos
->target
);
713 ret
= spinand_write_enable_op(spinand
);
717 ret
= spinand_erase_op(spinand
, pos
);
721 ret
= spinand_wait(spinand
, &status
);
722 if (!ret
&& (status
& STATUS_ERASE_FAILED
))
728 static int spinand_mtd_erase(struct mtd_info
*mtd
,
729 struct erase_info
*einfo
)
731 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
734 mutex_lock(&spinand
->lock
);
735 ret
= nanddev_mtd_erase(mtd
, einfo
);
736 mutex_unlock(&spinand
->lock
);
741 static int spinand_mtd_block_isreserved(struct mtd_info
*mtd
, loff_t offs
)
743 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
744 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
748 nanddev_offs_to_pos(nand
, offs
, &pos
);
749 mutex_lock(&spinand
->lock
);
750 ret
= nanddev_isreserved(nand
, &pos
);
751 mutex_unlock(&spinand
->lock
);
756 static const struct nand_ops spinand_ops
= {
757 .erase
= spinand_erase
,
758 .markbad
= spinand_markbad
,
759 .isbad
= spinand_isbad
,
762 static const struct spinand_manufacturer
*spinand_manufacturers
[] = {
763 ¯onix_spinand_manufacturer
,
764 µn_spinand_manufacturer
,
765 &winbond_spinand_manufacturer
,
768 static int spinand_manufacturer_detect(struct spinand_device
*spinand
)
773 for (i
= 0; i
< ARRAY_SIZE(spinand_manufacturers
); i
++) {
774 ret
= spinand_manufacturers
[i
]->ops
->detect(spinand
);
776 spinand
->manufacturer
= spinand_manufacturers
[i
];
778 } else if (ret
< 0) {
786 static int spinand_manufacturer_init(struct spinand_device
*spinand
)
788 if (spinand
->manufacturer
->ops
->init
)
789 return spinand
->manufacturer
->ops
->init(spinand
);
794 static void spinand_manufacturer_cleanup(struct spinand_device
*spinand
)
796 /* Release manufacturer private data */
797 if (spinand
->manufacturer
->ops
->cleanup
)
798 return spinand
->manufacturer
->ops
->cleanup(spinand
);
801 static const struct spi_mem_op
*
802 spinand_select_op_variant(struct spinand_device
*spinand
,
803 const struct spinand_op_variants
*variants
)
805 struct nand_device
*nand
= spinand_to_nand(spinand
);
808 for (i
= 0; i
< variants
->nops
; i
++) {
809 struct spi_mem_op op
= variants
->ops
[i
];
813 nbytes
= nanddev_per_page_oobsize(nand
) +
814 nanddev_page_size(nand
);
817 op
.data
.nbytes
= nbytes
;
818 ret
= spi_mem_adjust_op_size(spinand
->spimem
, &op
);
822 if (!spi_mem_supports_op(spinand
->spimem
, &op
))
825 nbytes
-= op
.data
.nbytes
;
829 return &variants
->ops
[i
];
836 * spinand_match_and_init() - Try to find a match between a device ID and an
837 * entry in a spinand_info table
838 * @spinand: SPI NAND object
839 * @table: SPI NAND device description table
840 * @table_size: size of the device description table
842 * Should be used by SPI NAND manufacturer drivers when they want to find a
843 * match between a device ID retrieved through the READ_ID command and an
844 * entry in the SPI NAND description table. If a match is found, the spinand
845 * object will be initialized with information provided by the matching
846 * spinand_info entry.
848 * Return: 0 on success, a negative error code otherwise.
850 int spinand_match_and_init(struct spinand_device
*spinand
,
851 const struct spinand_info
*table
,
852 unsigned int table_size
, u8 devid
)
854 struct nand_device
*nand
= spinand_to_nand(spinand
);
857 for (i
= 0; i
< table_size
; i
++) {
858 const struct spinand_info
*info
= &table
[i
];
859 const struct spi_mem_op
*op
;
861 if (devid
!= info
->devid
)
864 nand
->memorg
= table
[i
].memorg
;
865 nand
->eccreq
= table
[i
].eccreq
;
866 spinand
->eccinfo
= table
[i
].eccinfo
;
867 spinand
->flags
= table
[i
].flags
;
868 spinand
->select_target
= table
[i
].select_target
;
870 op
= spinand_select_op_variant(spinand
,
871 info
->op_variants
.read_cache
);
875 spinand
->op_templates
.read_cache
= op
;
877 op
= spinand_select_op_variant(spinand
,
878 info
->op_variants
.write_cache
);
882 spinand
->op_templates
.write_cache
= op
;
884 op
= spinand_select_op_variant(spinand
,
885 info
->op_variants
.update_cache
);
886 spinand
->op_templates
.update_cache
= op
;
894 static int spinand_detect(struct spinand_device
*spinand
)
896 struct device
*dev
= &spinand
->spimem
->spi
->dev
;
897 struct nand_device
*nand
= spinand_to_nand(spinand
);
900 ret
= spinand_reset_op(spinand
);
904 ret
= spinand_read_id_op(spinand
, spinand
->id
.data
);
908 spinand
->id
.len
= SPINAND_MAX_ID_LEN
;
910 ret
= spinand_manufacturer_detect(spinand
);
912 dev_err(dev
, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN
,
917 if (nand
->memorg
.ntargets
> 1 && !spinand
->select_target
) {
919 "SPI NANDs with more than one die must implement ->select_target()\n");
923 dev_info(&spinand
->spimem
->spi
->dev
,
924 "%s SPI NAND was found.\n", spinand
->manufacturer
->name
);
925 dev_info(&spinand
->spimem
->spi
->dev
,
926 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
927 nanddev_size(nand
) >> 20, nanddev_eraseblock_size(nand
) >> 10,
928 nanddev_page_size(nand
), nanddev_per_page_oobsize(nand
));
933 static int spinand_noecc_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
934 struct mtd_oob_region
*region
)
939 static int spinand_noecc_ooblayout_free(struct mtd_info
*mtd
, int section
,
940 struct mtd_oob_region
*region
)
945 /* Reserve 2 bytes for the BBM. */
952 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout
= {
953 .ecc
= spinand_noecc_ooblayout_ecc
,
954 .free
= spinand_noecc_ooblayout_free
,
957 static int spinand_init(struct spinand_device
*spinand
)
959 struct device
*dev
= &spinand
->spimem
->spi
->dev
;
960 struct mtd_info
*mtd
= spinand_to_mtd(spinand
);
961 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
965 * We need a scratch buffer because the spi_mem interface requires that
966 * buf passed in spi_mem_op->data.buf be DMA-able.
968 spinand
->scratchbuf
= kzalloc(SPINAND_MAX_ID_LEN
, GFP_KERNEL
);
969 if (!spinand
->scratchbuf
)
972 ret
= spinand_detect(spinand
);
977 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
978 * may use this buffer for DMA access.
979 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
981 spinand
->databuf
= kzalloc(nanddev_page_size(nand
) +
982 nanddev_per_page_oobsize(nand
),
984 if (!spinand
->databuf
) {
989 spinand
->oobbuf
= spinand
->databuf
+ nanddev_page_size(nand
);
991 ret
= spinand_init_cfg_cache(spinand
);
995 ret
= spinand_init_quad_enable(spinand
);
999 ret
= spinand_upd_cfg(spinand
, CFG_OTP_ENABLE
, 0);
1003 ret
= spinand_manufacturer_init(spinand
);
1006 "Failed to initialize the SPI NAND chip (err = %d)\n",
1011 /* After power up, all blocks are locked, so unlock them here. */
1012 for (i
= 0; i
< nand
->memorg
.ntargets
; i
++) {
1013 ret
= spinand_select_target(spinand
, i
);
1015 goto err_manuf_cleanup
;
1017 ret
= spinand_lock_block(spinand
, BL_ALL_UNLOCKED
);
1019 goto err_manuf_cleanup
;
1022 ret
= nanddev_init(nand
, &spinand_ops
, THIS_MODULE
);
1024 goto err_manuf_cleanup
;
1027 * Right now, we don't support ECC, so let the whole oob
1028 * area is available for user.
1030 mtd
->_read_oob
= spinand_mtd_read
;
1031 mtd
->_write_oob
= spinand_mtd_write
;
1032 mtd
->_block_isbad
= spinand_mtd_block_isbad
;
1033 mtd
->_block_markbad
= spinand_mtd_block_markbad
;
1034 mtd
->_block_isreserved
= spinand_mtd_block_isreserved
;
1035 mtd
->_erase
= spinand_mtd_erase
;
1037 if (spinand
->eccinfo
.ooblayout
)
1038 mtd_set_ooblayout(mtd
, spinand
->eccinfo
.ooblayout
);
1040 mtd_set_ooblayout(mtd
, &spinand_noecc_ooblayout
);
1042 ret
= mtd_ooblayout_count_freebytes(mtd
);
1044 goto err_cleanup_nanddev
;
1046 mtd
->oobavail
= ret
;
1048 /* Propagate ECC information to mtd_info */
1049 mtd
->ecc_strength
= nand
->eccreq
.strength
;
1050 mtd
->ecc_step_size
= nand
->eccreq
.step_size
;
1054 err_cleanup_nanddev
:
1055 nanddev_cleanup(nand
);
1058 spinand_manufacturer_cleanup(spinand
);
1061 kfree(spinand
->databuf
);
1062 kfree(spinand
->scratchbuf
);
1066 static void spinand_cleanup(struct spinand_device
*spinand
)
1068 struct nand_device
*nand
= spinand_to_nand(spinand
);
1070 nanddev_cleanup(nand
);
1071 spinand_manufacturer_cleanup(spinand
);
1072 kfree(spinand
->databuf
);
1073 kfree(spinand
->scratchbuf
);
1076 static int spinand_probe(struct spi_mem
*mem
)
1078 struct spinand_device
*spinand
;
1079 struct mtd_info
*mtd
;
1082 spinand
= devm_kzalloc(&mem
->spi
->dev
, sizeof(*spinand
),
1087 spinand
->spimem
= mem
;
1088 spi_mem_set_drvdata(mem
, spinand
);
1089 spinand_set_of_node(spinand
, mem
->spi
->dev
.of_node
);
1090 mutex_init(&spinand
->lock
);
1091 mtd
= spinand_to_mtd(spinand
);
1092 mtd
->dev
.parent
= &mem
->spi
->dev
;
1094 ret
= spinand_init(spinand
);
1098 ret
= mtd_device_register(mtd
, NULL
, 0);
1100 goto err_spinand_cleanup
;
1104 err_spinand_cleanup
:
1105 spinand_cleanup(spinand
);
1110 static int spinand_remove(struct spi_mem
*mem
)
1112 struct spinand_device
*spinand
;
1113 struct mtd_info
*mtd
;
1116 spinand
= spi_mem_get_drvdata(mem
);
1117 mtd
= spinand_to_mtd(spinand
);
1119 ret
= mtd_device_unregister(mtd
);
1123 spinand_cleanup(spinand
);
1128 static const struct spi_device_id spinand_ids
[] = {
1129 { .name
= "spi-nand" },
1134 static const struct of_device_id spinand_of_ids
[] = {
1135 { .compatible
= "spi-nand" },
1140 static struct spi_mem_driver spinand_drv
= {
1142 .id_table
= spinand_ids
,
1145 .of_match_table
= of_match_ptr(spinand_of_ids
),
1148 .probe
= spinand_probe
,
1149 .remove
= spinand_remove
,
1151 module_spi_mem_driver(spinand_drv
);
1153 MODULE_DESCRIPTION("SPI NAND framework");
1154 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1155 MODULE_LICENSE("GPL v2");