1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016-2017 Micron Technology, Inc.
6 * Peter Pan <peterpandong@micron.com>
7 * Boris Brezillon <boris.brezillon@bootlin.com>
10 #define pr_fmt(fmt) "spi-nand: " fmt
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/spinand.h>
18 #include <linux/slab.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
22 static int spinand_read_reg_op(struct spinand_device
*spinand
, u8 reg
, u8
*val
)
24 struct spi_mem_op op
= SPINAND_GET_FEATURE_OP(reg
,
28 ret
= spi_mem_exec_op(spinand
->spimem
, &op
);
32 *val
= *spinand
->scratchbuf
;
36 static int spinand_write_reg_op(struct spinand_device
*spinand
, u8 reg
, u8 val
)
38 struct spi_mem_op op
= SPINAND_SET_FEATURE_OP(reg
,
41 *spinand
->scratchbuf
= val
;
42 return spi_mem_exec_op(spinand
->spimem
, &op
);
45 static int spinand_read_status(struct spinand_device
*spinand
, u8
*status
)
47 return spinand_read_reg_op(spinand
, REG_STATUS
, status
);
50 static int spinand_get_cfg(struct spinand_device
*spinand
, u8
*cfg
)
52 struct nand_device
*nand
= spinand_to_nand(spinand
);
54 if (WARN_ON(spinand
->cur_target
< 0 ||
55 spinand
->cur_target
>= nand
->memorg
.ntargets
))
58 *cfg
= spinand
->cfg_cache
[spinand
->cur_target
];
62 static int spinand_set_cfg(struct spinand_device
*spinand
, u8 cfg
)
64 struct nand_device
*nand
= spinand_to_nand(spinand
);
67 if (WARN_ON(spinand
->cur_target
< 0 ||
68 spinand
->cur_target
>= nand
->memorg
.ntargets
))
71 if (spinand
->cfg_cache
[spinand
->cur_target
] == cfg
)
74 ret
= spinand_write_reg_op(spinand
, REG_CFG
, cfg
);
78 spinand
->cfg_cache
[spinand
->cur_target
] = cfg
;
83 * spinand_upd_cfg() - Update the configuration register
84 * @spinand: the spinand device
85 * @mask: the mask encoding the bits to update in the config reg
86 * @val: the new value to apply
88 * Update the configuration register.
90 * Return: 0 on success, a negative error code otherwise.
92 int spinand_upd_cfg(struct spinand_device
*spinand
, u8 mask
, u8 val
)
97 ret
= spinand_get_cfg(spinand
, &cfg
);
104 return spinand_set_cfg(spinand
, cfg
);
108 * spinand_select_target() - Select a specific NAND target/die
109 * @spinand: the spinand device
110 * @target: the target/die to select
112 * Select a new target/die. If chip only has one die, this function is a NOOP.
114 * Return: 0 on success, a negative error code otherwise.
116 int spinand_select_target(struct spinand_device
*spinand
, unsigned int target
)
118 struct nand_device
*nand
= spinand_to_nand(spinand
);
121 if (WARN_ON(target
>= nand
->memorg
.ntargets
))
124 if (spinand
->cur_target
== target
)
127 if (nand
->memorg
.ntargets
== 1) {
128 spinand
->cur_target
= target
;
132 ret
= spinand
->select_target(spinand
, target
);
136 spinand
->cur_target
= target
;
140 static int spinand_init_cfg_cache(struct spinand_device
*spinand
)
142 struct nand_device
*nand
= spinand_to_nand(spinand
);
143 struct device
*dev
= &spinand
->spimem
->spi
->dev
;
147 spinand
->cfg_cache
= devm_kcalloc(dev
,
148 nand
->memorg
.ntargets
,
149 sizeof(*spinand
->cfg_cache
),
151 if (!spinand
->cfg_cache
)
154 for (target
= 0; target
< nand
->memorg
.ntargets
; target
++) {
155 ret
= spinand_select_target(spinand
, target
);
160 * We use spinand_read_reg_op() instead of spinand_get_cfg()
161 * here to bypass the config cache.
163 ret
= spinand_read_reg_op(spinand
, REG_CFG
,
164 &spinand
->cfg_cache
[target
]);
172 static int spinand_init_quad_enable(struct spinand_device
*spinand
)
176 if (!(spinand
->flags
& SPINAND_HAS_QE_BIT
))
179 if (spinand
->op_templates
.read_cache
->data
.buswidth
== 4 ||
180 spinand
->op_templates
.write_cache
->data
.buswidth
== 4 ||
181 spinand
->op_templates
.update_cache
->data
.buswidth
== 4)
184 return spinand_upd_cfg(spinand
, CFG_QUAD_ENABLE
,
185 enable
? CFG_QUAD_ENABLE
: 0);
188 static int spinand_ecc_enable(struct spinand_device
*spinand
,
191 return spinand_upd_cfg(spinand
, CFG_ECC_ENABLE
,
192 enable
? CFG_ECC_ENABLE
: 0);
195 static int spinand_write_enable_op(struct spinand_device
*spinand
)
197 struct spi_mem_op op
= SPINAND_WR_EN_DIS_OP(true);
199 return spi_mem_exec_op(spinand
->spimem
, &op
);
202 static int spinand_load_page_op(struct spinand_device
*spinand
,
203 const struct nand_page_io_req
*req
)
205 struct nand_device
*nand
= spinand_to_nand(spinand
);
206 unsigned int row
= nanddev_pos_to_row(nand
, &req
->pos
);
207 struct spi_mem_op op
= SPINAND_PAGE_READ_OP(row
);
209 return spi_mem_exec_op(spinand
->spimem
, &op
);
212 static int spinand_read_from_cache_op(struct spinand_device
*spinand
,
213 const struct nand_page_io_req
*req
)
215 struct nand_device
*nand
= spinand_to_nand(spinand
);
216 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
217 struct spi_mem_dirmap_desc
*rdesc
;
218 unsigned int nbytes
= 0;
224 buf
= spinand
->databuf
;
225 nbytes
= nanddev_page_size(nand
);
230 nbytes
+= nanddev_per_page_oobsize(nand
);
232 buf
= spinand
->oobbuf
;
233 column
= nanddev_page_size(nand
);
237 rdesc
= spinand
->dirmaps
[req
->pos
.plane
].rdesc
;
240 ret
= spi_mem_dirmap_read(rdesc
, column
, nbytes
, buf
);
244 if (!ret
|| ret
> nbytes
)
253 memcpy(req
->databuf
.in
, spinand
->databuf
+ req
->dataoffs
,
257 if (req
->mode
== MTD_OPS_AUTO_OOB
)
258 mtd_ooblayout_get_databytes(mtd
, req
->oobbuf
.in
,
263 memcpy(req
->oobbuf
.in
, spinand
->oobbuf
+ req
->ooboffs
,
270 static int spinand_write_to_cache_op(struct spinand_device
*spinand
,
271 const struct nand_page_io_req
*req
)
273 struct nand_device
*nand
= spinand_to_nand(spinand
);
274 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
275 struct spi_mem_dirmap_desc
*wdesc
;
276 unsigned int nbytes
, column
= 0;
277 void *buf
= spinand
->databuf
;
281 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
282 * the cache content to 0xFF (depends on vendor implementation), so we
283 * must fill the page cache entirely even if we only want to program
284 * the data portion of the page, otherwise we might corrupt the BBM or
285 * user data previously programmed in OOB area.
287 nbytes
= nanddev_page_size(nand
) + nanddev_per_page_oobsize(nand
);
288 memset(spinand
->databuf
, 0xff, nbytes
);
291 memcpy(spinand
->databuf
+ req
->dataoffs
, req
->databuf
.out
,
295 if (req
->mode
== MTD_OPS_AUTO_OOB
)
296 mtd_ooblayout_set_databytes(mtd
, req
->oobbuf
.out
,
301 memcpy(spinand
->oobbuf
+ req
->ooboffs
, req
->oobbuf
.out
,
305 wdesc
= spinand
->dirmaps
[req
->pos
.plane
].wdesc
;
308 ret
= spi_mem_dirmap_write(wdesc
, column
, nbytes
, buf
);
312 if (!ret
|| ret
> nbytes
)
323 static int spinand_program_op(struct spinand_device
*spinand
,
324 const struct nand_page_io_req
*req
)
326 struct nand_device
*nand
= spinand_to_nand(spinand
);
327 unsigned int row
= nanddev_pos_to_row(nand
, &req
->pos
);
328 struct spi_mem_op op
= SPINAND_PROG_EXEC_OP(row
);
330 return spi_mem_exec_op(spinand
->spimem
, &op
);
333 static int spinand_erase_op(struct spinand_device
*spinand
,
334 const struct nand_pos
*pos
)
336 struct nand_device
*nand
= spinand_to_nand(spinand
);
337 unsigned int row
= nanddev_pos_to_row(nand
, pos
);
338 struct spi_mem_op op
= SPINAND_BLK_ERASE_OP(row
);
340 return spi_mem_exec_op(spinand
->spimem
, &op
);
343 static int spinand_wait(struct spinand_device
*spinand
, u8
*s
)
345 unsigned long timeo
= jiffies
+ msecs_to_jiffies(400);
350 ret
= spinand_read_status(spinand
, &status
);
354 if (!(status
& STATUS_BUSY
))
356 } while (time_before(jiffies
, timeo
));
359 * Extra read, just in case the STATUS_READY bit has changed
360 * since our last check
362 ret
= spinand_read_status(spinand
, &status
);
370 return status
& STATUS_BUSY
? -ETIMEDOUT
: 0;
373 static int spinand_read_id_op(struct spinand_device
*spinand
, u8
*buf
)
375 struct spi_mem_op op
= SPINAND_READID_OP(0, spinand
->scratchbuf
,
379 ret
= spi_mem_exec_op(spinand
->spimem
, &op
);
381 memcpy(buf
, spinand
->scratchbuf
, SPINAND_MAX_ID_LEN
);
386 static int spinand_reset_op(struct spinand_device
*spinand
)
388 struct spi_mem_op op
= SPINAND_RESET_OP
;
391 ret
= spi_mem_exec_op(spinand
->spimem
, &op
);
395 return spinand_wait(spinand
, NULL
);
398 static int spinand_lock_block(struct spinand_device
*spinand
, u8 lock
)
400 return spinand_write_reg_op(spinand
, REG_BLOCK_LOCK
, lock
);
403 static int spinand_check_ecc_status(struct spinand_device
*spinand
, u8 status
)
405 struct nand_device
*nand
= spinand_to_nand(spinand
);
407 if (spinand
->eccinfo
.get_status
)
408 return spinand
->eccinfo
.get_status(spinand
, status
);
410 switch (status
& STATUS_ECC_MASK
) {
411 case STATUS_ECC_NO_BITFLIPS
:
414 case STATUS_ECC_HAS_BITFLIPS
:
416 * We have no way to know exactly how many bitflips have been
417 * fixed, so let's return the maximum possible value so that
418 * wear-leveling layers move the data immediately.
420 return nand
->eccreq
.strength
;
422 case STATUS_ECC_UNCOR_ERROR
:
432 static int spinand_read_page(struct spinand_device
*spinand
,
433 const struct nand_page_io_req
*req
,
439 ret
= spinand_load_page_op(spinand
, req
);
443 ret
= spinand_wait(spinand
, &status
);
447 ret
= spinand_read_from_cache_op(spinand
, req
);
454 return spinand_check_ecc_status(spinand
, status
);
457 static int spinand_write_page(struct spinand_device
*spinand
,
458 const struct nand_page_io_req
*req
)
463 ret
= spinand_write_enable_op(spinand
);
467 ret
= spinand_write_to_cache_op(spinand
, req
);
471 ret
= spinand_program_op(spinand
, req
);
475 ret
= spinand_wait(spinand
, &status
);
476 if (!ret
&& (status
& STATUS_PROG_FAILED
))
482 static int spinand_mtd_read(struct mtd_info
*mtd
, loff_t from
,
483 struct mtd_oob_ops
*ops
)
485 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
486 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
487 unsigned int max_bitflips
= 0;
488 struct nand_io_iter iter
;
489 bool enable_ecc
= false;
490 bool ecc_failed
= false;
493 if (ops
->mode
!= MTD_OPS_RAW
&& spinand
->eccinfo
.ooblayout
)
496 mutex_lock(&spinand
->lock
);
498 nanddev_io_for_each_page(nand
, from
, ops
, &iter
) {
499 ret
= spinand_select_target(spinand
, iter
.req
.pos
.target
);
503 ret
= spinand_ecc_enable(spinand
, enable_ecc
);
507 ret
= spinand_read_page(spinand
, &iter
.req
, enable_ecc
);
508 if (ret
< 0 && ret
!= -EBADMSG
)
511 if (ret
== -EBADMSG
) {
513 mtd
->ecc_stats
.failed
++;
515 mtd
->ecc_stats
.corrected
+= ret
;
516 max_bitflips
= max_t(unsigned int, max_bitflips
, ret
);
520 ops
->retlen
+= iter
.req
.datalen
;
521 ops
->oobretlen
+= iter
.req
.ooblen
;
524 mutex_unlock(&spinand
->lock
);
526 if (ecc_failed
&& !ret
)
529 return ret
? ret
: max_bitflips
;
532 static int spinand_mtd_write(struct mtd_info
*mtd
, loff_t to
,
533 struct mtd_oob_ops
*ops
)
535 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
536 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
537 struct nand_io_iter iter
;
538 bool enable_ecc
= false;
541 if (ops
->mode
!= MTD_OPS_RAW
&& mtd
->ooblayout
)
544 mutex_lock(&spinand
->lock
);
546 nanddev_io_for_each_page(nand
, to
, ops
, &iter
) {
547 ret
= spinand_select_target(spinand
, iter
.req
.pos
.target
);
551 ret
= spinand_ecc_enable(spinand
, enable_ecc
);
555 ret
= spinand_write_page(spinand
, &iter
.req
);
559 ops
->retlen
+= iter
.req
.datalen
;
560 ops
->oobretlen
+= iter
.req
.ooblen
;
563 mutex_unlock(&spinand
->lock
);
568 static bool spinand_isbad(struct nand_device
*nand
, const struct nand_pos
*pos
)
570 struct spinand_device
*spinand
= nand_to_spinand(nand
);
571 struct nand_page_io_req req
= {
575 .oobbuf
.in
= spinand
->oobbuf
,
579 memset(spinand
->oobbuf
, 0, 2);
580 spinand_select_target(spinand
, pos
->target
);
581 spinand_read_page(spinand
, &req
, false);
582 if (spinand
->oobbuf
[0] != 0xff || spinand
->oobbuf
[1] != 0xff)
588 static int spinand_mtd_block_isbad(struct mtd_info
*mtd
, loff_t offs
)
590 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
591 struct spinand_device
*spinand
= nand_to_spinand(nand
);
595 nanddev_offs_to_pos(nand
, offs
, &pos
);
596 mutex_lock(&spinand
->lock
);
597 ret
= nanddev_isbad(nand
, &pos
);
598 mutex_unlock(&spinand
->lock
);
603 static int spinand_markbad(struct nand_device
*nand
, const struct nand_pos
*pos
)
605 struct spinand_device
*spinand
= nand_to_spinand(nand
);
606 struct nand_page_io_req req
= {
610 .oobbuf
.out
= spinand
->oobbuf
,
614 /* Erase block before marking it bad. */
615 ret
= spinand_select_target(spinand
, pos
->target
);
619 ret
= spinand_write_enable_op(spinand
);
623 spinand_erase_op(spinand
, pos
);
625 memset(spinand
->oobbuf
, 0, 2);
626 return spinand_write_page(spinand
, &req
);
629 static int spinand_mtd_block_markbad(struct mtd_info
*mtd
, loff_t offs
)
631 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
632 struct spinand_device
*spinand
= nand_to_spinand(nand
);
636 nanddev_offs_to_pos(nand
, offs
, &pos
);
637 mutex_lock(&spinand
->lock
);
638 ret
= nanddev_markbad(nand
, &pos
);
639 mutex_unlock(&spinand
->lock
);
644 static int spinand_erase(struct nand_device
*nand
, const struct nand_pos
*pos
)
646 struct spinand_device
*spinand
= nand_to_spinand(nand
);
650 ret
= spinand_select_target(spinand
, pos
->target
);
654 ret
= spinand_write_enable_op(spinand
);
658 ret
= spinand_erase_op(spinand
, pos
);
662 ret
= spinand_wait(spinand
, &status
);
663 if (!ret
&& (status
& STATUS_ERASE_FAILED
))
669 static int spinand_mtd_erase(struct mtd_info
*mtd
,
670 struct erase_info
*einfo
)
672 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
675 mutex_lock(&spinand
->lock
);
676 ret
= nanddev_mtd_erase(mtd
, einfo
);
677 mutex_unlock(&spinand
->lock
);
682 static int spinand_mtd_block_isreserved(struct mtd_info
*mtd
, loff_t offs
)
684 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
685 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
689 nanddev_offs_to_pos(nand
, offs
, &pos
);
690 mutex_lock(&spinand
->lock
);
691 ret
= nanddev_isreserved(nand
, &pos
);
692 mutex_unlock(&spinand
->lock
);
697 static int spinand_create_dirmap(struct spinand_device
*spinand
,
700 struct nand_device
*nand
= spinand_to_nand(spinand
);
701 struct spi_mem_dirmap_info info
= {
702 .length
= nanddev_page_size(nand
) +
703 nanddev_per_page_oobsize(nand
),
705 struct spi_mem_dirmap_desc
*desc
;
707 /* The plane number is passed in MSB just above the column address */
708 info
.offset
= plane
<< fls(nand
->memorg
.pagesize
);
710 info
.op_tmpl
= *spinand
->op_templates
.update_cache
;
711 desc
= devm_spi_mem_dirmap_create(&spinand
->spimem
->spi
->dev
,
712 spinand
->spimem
, &info
);
714 return PTR_ERR(desc
);
716 spinand
->dirmaps
[plane
].wdesc
= desc
;
718 info
.op_tmpl
= *spinand
->op_templates
.read_cache
;
719 desc
= devm_spi_mem_dirmap_create(&spinand
->spimem
->spi
->dev
,
720 spinand
->spimem
, &info
);
722 return PTR_ERR(desc
);
724 spinand
->dirmaps
[plane
].rdesc
= desc
;
729 static int spinand_create_dirmaps(struct spinand_device
*spinand
)
731 struct nand_device
*nand
= spinand_to_nand(spinand
);
734 spinand
->dirmaps
= devm_kzalloc(&spinand
->spimem
->spi
->dev
,
735 sizeof(*spinand
->dirmaps
) *
736 nand
->memorg
.planes_per_lun
,
738 if (!spinand
->dirmaps
)
741 for (i
= 0; i
< nand
->memorg
.planes_per_lun
; i
++) {
742 ret
= spinand_create_dirmap(spinand
, i
);
750 static const struct nand_ops spinand_ops
= {
751 .erase
= spinand_erase
,
752 .markbad
= spinand_markbad
,
753 .isbad
= spinand_isbad
,
756 static const struct spinand_manufacturer
*spinand_manufacturers
[] = {
757 &gigadevice_spinand_manufacturer
,
758 ¯onix_spinand_manufacturer
,
759 µn_spinand_manufacturer
,
760 ¶gon_spinand_manufacturer
,
761 &toshiba_spinand_manufacturer
,
762 &winbond_spinand_manufacturer
,
765 static int spinand_manufacturer_detect(struct spinand_device
*spinand
)
770 for (i
= 0; i
< ARRAY_SIZE(spinand_manufacturers
); i
++) {
771 ret
= spinand_manufacturers
[i
]->ops
->detect(spinand
);
773 spinand
->manufacturer
= spinand_manufacturers
[i
];
775 } else if (ret
< 0) {
783 static int spinand_manufacturer_init(struct spinand_device
*spinand
)
785 if (spinand
->manufacturer
->ops
->init
)
786 return spinand
->manufacturer
->ops
->init(spinand
);
791 static void spinand_manufacturer_cleanup(struct spinand_device
*spinand
)
793 /* Release manufacturer private data */
794 if (spinand
->manufacturer
->ops
->cleanup
)
795 return spinand
->manufacturer
->ops
->cleanup(spinand
);
798 static const struct spi_mem_op
*
799 spinand_select_op_variant(struct spinand_device
*spinand
,
800 const struct spinand_op_variants
*variants
)
802 struct nand_device
*nand
= spinand_to_nand(spinand
);
805 for (i
= 0; i
< variants
->nops
; i
++) {
806 struct spi_mem_op op
= variants
->ops
[i
];
810 nbytes
= nanddev_per_page_oobsize(nand
) +
811 nanddev_page_size(nand
);
814 op
.data
.nbytes
= nbytes
;
815 ret
= spi_mem_adjust_op_size(spinand
->spimem
, &op
);
819 if (!spi_mem_supports_op(spinand
->spimem
, &op
))
822 nbytes
-= op
.data
.nbytes
;
826 return &variants
->ops
[i
];
833 * spinand_match_and_init() - Try to find a match between a device ID and an
834 * entry in a spinand_info table
835 * @spinand: SPI NAND object
836 * @table: SPI NAND device description table
837 * @table_size: size of the device description table
839 * Should be used by SPI NAND manufacturer drivers when they want to find a
840 * match between a device ID retrieved through the READ_ID command and an
841 * entry in the SPI NAND description table. If a match is found, the spinand
842 * object will be initialized with information provided by the matching
843 * spinand_info entry.
845 * Return: 0 on success, a negative error code otherwise.
847 int spinand_match_and_init(struct spinand_device
*spinand
,
848 const struct spinand_info
*table
,
849 unsigned int table_size
, u16 devid
)
851 struct nand_device
*nand
= spinand_to_nand(spinand
);
854 for (i
= 0; i
< table_size
; i
++) {
855 const struct spinand_info
*info
= &table
[i
];
856 const struct spi_mem_op
*op
;
858 if (devid
!= info
->devid
)
861 nand
->memorg
= table
[i
].memorg
;
862 nand
->eccreq
= table
[i
].eccreq
;
863 spinand
->eccinfo
= table
[i
].eccinfo
;
864 spinand
->flags
= table
[i
].flags
;
865 spinand
->select_target
= table
[i
].select_target
;
867 op
= spinand_select_op_variant(spinand
,
868 info
->op_variants
.read_cache
);
872 spinand
->op_templates
.read_cache
= op
;
874 op
= spinand_select_op_variant(spinand
,
875 info
->op_variants
.write_cache
);
879 spinand
->op_templates
.write_cache
= op
;
881 op
= spinand_select_op_variant(spinand
,
882 info
->op_variants
.update_cache
);
883 spinand
->op_templates
.update_cache
= op
;
891 static int spinand_detect(struct spinand_device
*spinand
)
893 struct device
*dev
= &spinand
->spimem
->spi
->dev
;
894 struct nand_device
*nand
= spinand_to_nand(spinand
);
897 ret
= spinand_reset_op(spinand
);
901 ret
= spinand_read_id_op(spinand
, spinand
->id
.data
);
905 spinand
->id
.len
= SPINAND_MAX_ID_LEN
;
907 ret
= spinand_manufacturer_detect(spinand
);
909 dev_err(dev
, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN
,
914 if (nand
->memorg
.ntargets
> 1 && !spinand
->select_target
) {
916 "SPI NANDs with more than one die must implement ->select_target()\n");
920 dev_info(&spinand
->spimem
->spi
->dev
,
921 "%s SPI NAND was found.\n", spinand
->manufacturer
->name
);
922 dev_info(&spinand
->spimem
->spi
->dev
,
923 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
924 nanddev_size(nand
) >> 20, nanddev_eraseblock_size(nand
) >> 10,
925 nanddev_page_size(nand
), nanddev_per_page_oobsize(nand
));
930 static int spinand_noecc_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
931 struct mtd_oob_region
*region
)
936 static int spinand_noecc_ooblayout_free(struct mtd_info
*mtd
, int section
,
937 struct mtd_oob_region
*region
)
942 /* Reserve 2 bytes for the BBM. */
949 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout
= {
950 .ecc
= spinand_noecc_ooblayout_ecc
,
951 .free
= spinand_noecc_ooblayout_free
,
954 static int spinand_init(struct spinand_device
*spinand
)
956 struct device
*dev
= &spinand
->spimem
->spi
->dev
;
957 struct mtd_info
*mtd
= spinand_to_mtd(spinand
);
958 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
962 * We need a scratch buffer because the spi_mem interface requires that
963 * buf passed in spi_mem_op->data.buf be DMA-able.
965 spinand
->scratchbuf
= kzalloc(SPINAND_MAX_ID_LEN
, GFP_KERNEL
);
966 if (!spinand
->scratchbuf
)
969 ret
= spinand_detect(spinand
);
974 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
975 * may use this buffer for DMA access.
976 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
978 spinand
->databuf
= kzalloc(nanddev_page_size(nand
) +
979 nanddev_per_page_oobsize(nand
),
981 if (!spinand
->databuf
) {
986 spinand
->oobbuf
= spinand
->databuf
+ nanddev_page_size(nand
);
988 ret
= spinand_init_cfg_cache(spinand
);
992 ret
= spinand_init_quad_enable(spinand
);
996 ret
= spinand_upd_cfg(spinand
, CFG_OTP_ENABLE
, 0);
1000 ret
= spinand_manufacturer_init(spinand
);
1003 "Failed to initialize the SPI NAND chip (err = %d)\n",
1008 ret
= spinand_create_dirmaps(spinand
);
1011 "Failed to create direct mappings for read/write operations (err = %d)\n",
1013 goto err_manuf_cleanup
;
1016 /* After power up, all blocks are locked, so unlock them here. */
1017 for (i
= 0; i
< nand
->memorg
.ntargets
; i
++) {
1018 ret
= spinand_select_target(spinand
, i
);
1020 goto err_manuf_cleanup
;
1022 ret
= spinand_lock_block(spinand
, BL_ALL_UNLOCKED
);
1024 goto err_manuf_cleanup
;
1027 ret
= nanddev_init(nand
, &spinand_ops
, THIS_MODULE
);
1029 goto err_manuf_cleanup
;
1032 * Right now, we don't support ECC, so let the whole oob
1033 * area is available for user.
1035 mtd
->_read_oob
= spinand_mtd_read
;
1036 mtd
->_write_oob
= spinand_mtd_write
;
1037 mtd
->_block_isbad
= spinand_mtd_block_isbad
;
1038 mtd
->_block_markbad
= spinand_mtd_block_markbad
;
1039 mtd
->_block_isreserved
= spinand_mtd_block_isreserved
;
1040 mtd
->_erase
= spinand_mtd_erase
;
1041 mtd
->_max_bad_blocks
= nanddev_mtd_max_bad_blocks
;
1043 if (spinand
->eccinfo
.ooblayout
)
1044 mtd_set_ooblayout(mtd
, spinand
->eccinfo
.ooblayout
);
1046 mtd_set_ooblayout(mtd
, &spinand_noecc_ooblayout
);
1048 ret
= mtd_ooblayout_count_freebytes(mtd
);
1050 goto err_cleanup_nanddev
;
1052 mtd
->oobavail
= ret
;
1056 err_cleanup_nanddev
:
1057 nanddev_cleanup(nand
);
1060 spinand_manufacturer_cleanup(spinand
);
1063 kfree(spinand
->databuf
);
1064 kfree(spinand
->scratchbuf
);
1068 static void spinand_cleanup(struct spinand_device
*spinand
)
1070 struct nand_device
*nand
= spinand_to_nand(spinand
);
1072 nanddev_cleanup(nand
);
1073 spinand_manufacturer_cleanup(spinand
);
1074 kfree(spinand
->databuf
);
1075 kfree(spinand
->scratchbuf
);
1078 static int spinand_probe(struct spi_mem
*mem
)
1080 struct spinand_device
*spinand
;
1081 struct mtd_info
*mtd
;
1084 spinand
= devm_kzalloc(&mem
->spi
->dev
, sizeof(*spinand
),
1089 spinand
->spimem
= mem
;
1090 spi_mem_set_drvdata(mem
, spinand
);
1091 spinand_set_of_node(spinand
, mem
->spi
->dev
.of_node
);
1092 mutex_init(&spinand
->lock
);
1093 mtd
= spinand_to_mtd(spinand
);
1094 mtd
->dev
.parent
= &mem
->spi
->dev
;
1096 ret
= spinand_init(spinand
);
1100 ret
= mtd_device_register(mtd
, NULL
, 0);
1102 goto err_spinand_cleanup
;
1106 err_spinand_cleanup
:
1107 spinand_cleanup(spinand
);
1112 static int spinand_remove(struct spi_mem
*mem
)
1114 struct spinand_device
*spinand
;
1115 struct mtd_info
*mtd
;
1118 spinand
= spi_mem_get_drvdata(mem
);
1119 mtd
= spinand_to_mtd(spinand
);
1121 ret
= mtd_device_unregister(mtd
);
1125 spinand_cleanup(spinand
);
1130 static const struct spi_device_id spinand_ids
[] = {
1131 { .name
= "spi-nand" },
1136 static const struct of_device_id spinand_of_ids
[] = {
1137 { .compatible
= "spi-nand" },
1142 static struct spi_mem_driver spinand_drv
= {
1144 .id_table
= spinand_ids
,
1147 .of_match_table
= of_match_ptr(spinand_of_ids
),
1150 .probe
= spinand_probe
,
1151 .remove
= spinand_remove
,
1153 module_spi_mem_driver(spinand_drv
);
1155 MODULE_DESCRIPTION("SPI NAND framework");
1156 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1157 MODULE_LICENSE("GPL v2");