treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / mtd / nand / spi / core.c
blob89f6beefb01ca95afa16b6d7d44652b580952b63
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2016-2017 Micron Technology, Inc.
5 * Authors:
6 * Peter Pan <peterpandong@micron.com>
7 * Boris Brezillon <boris.brezillon@bootlin.com>
8 */
10 #define pr_fmt(fmt) "spi-nand: " fmt
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/spinand.h>
17 #include <linux/of.h>
18 #include <linux/slab.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
22 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
24 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
25 spinand->scratchbuf);
26 int ret;
28 ret = spi_mem_exec_op(spinand->spimem, &op);
29 if (ret)
30 return ret;
32 *val = *spinand->scratchbuf;
33 return 0;
36 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
38 struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
39 spinand->scratchbuf);
41 *spinand->scratchbuf = val;
42 return spi_mem_exec_op(spinand->spimem, &op);
45 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
47 return spinand_read_reg_op(spinand, REG_STATUS, status);
50 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
52 struct nand_device *nand = spinand_to_nand(spinand);
54 if (WARN_ON(spinand->cur_target < 0 ||
55 spinand->cur_target >= nand->memorg.ntargets))
56 return -EINVAL;
58 *cfg = spinand->cfg_cache[spinand->cur_target];
59 return 0;
62 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
64 struct nand_device *nand = spinand_to_nand(spinand);
65 int ret;
67 if (WARN_ON(spinand->cur_target < 0 ||
68 spinand->cur_target >= nand->memorg.ntargets))
69 return -EINVAL;
71 if (spinand->cfg_cache[spinand->cur_target] == cfg)
72 return 0;
74 ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
75 if (ret)
76 return ret;
78 spinand->cfg_cache[spinand->cur_target] = cfg;
79 return 0;
82 /**
83 * spinand_upd_cfg() - Update the configuration register
84 * @spinand: the spinand device
85 * @mask: the mask encoding the bits to update in the config reg
86 * @val: the new value to apply
88 * Update the configuration register.
90 * Return: 0 on success, a negative error code otherwise.
92 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
94 int ret;
95 u8 cfg;
97 ret = spinand_get_cfg(spinand, &cfg);
98 if (ret)
99 return ret;
101 cfg &= ~mask;
102 cfg |= val;
104 return spinand_set_cfg(spinand, cfg);
108 * spinand_select_target() - Select a specific NAND target/die
109 * @spinand: the spinand device
110 * @target: the target/die to select
112 * Select a new target/die. If chip only has one die, this function is a NOOP.
114 * Return: 0 on success, a negative error code otherwise.
116 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
118 struct nand_device *nand = spinand_to_nand(spinand);
119 int ret;
121 if (WARN_ON(target >= nand->memorg.ntargets))
122 return -EINVAL;
124 if (spinand->cur_target == target)
125 return 0;
127 if (nand->memorg.ntargets == 1) {
128 spinand->cur_target = target;
129 return 0;
132 ret = spinand->select_target(spinand, target);
133 if (ret)
134 return ret;
136 spinand->cur_target = target;
137 return 0;
140 static int spinand_init_cfg_cache(struct spinand_device *spinand)
142 struct nand_device *nand = spinand_to_nand(spinand);
143 struct device *dev = &spinand->spimem->spi->dev;
144 unsigned int target;
145 int ret;
147 spinand->cfg_cache = devm_kcalloc(dev,
148 nand->memorg.ntargets,
149 sizeof(*spinand->cfg_cache),
150 GFP_KERNEL);
151 if (!spinand->cfg_cache)
152 return -ENOMEM;
154 for (target = 0; target < nand->memorg.ntargets; target++) {
155 ret = spinand_select_target(spinand, target);
156 if (ret)
157 return ret;
160 * We use spinand_read_reg_op() instead of spinand_get_cfg()
161 * here to bypass the config cache.
163 ret = spinand_read_reg_op(spinand, REG_CFG,
164 &spinand->cfg_cache[target]);
165 if (ret)
166 return ret;
169 return 0;
172 static int spinand_init_quad_enable(struct spinand_device *spinand)
174 bool enable = false;
176 if (!(spinand->flags & SPINAND_HAS_QE_BIT))
177 return 0;
179 if (spinand->op_templates.read_cache->data.buswidth == 4 ||
180 spinand->op_templates.write_cache->data.buswidth == 4 ||
181 spinand->op_templates.update_cache->data.buswidth == 4)
182 enable = true;
184 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
185 enable ? CFG_QUAD_ENABLE : 0);
188 static int spinand_ecc_enable(struct spinand_device *spinand,
189 bool enable)
191 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
192 enable ? CFG_ECC_ENABLE : 0);
195 static int spinand_write_enable_op(struct spinand_device *spinand)
197 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
199 return spi_mem_exec_op(spinand->spimem, &op);
202 static int spinand_load_page_op(struct spinand_device *spinand,
203 const struct nand_page_io_req *req)
205 struct nand_device *nand = spinand_to_nand(spinand);
206 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
207 struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
209 return spi_mem_exec_op(spinand->spimem, &op);
212 static int spinand_read_from_cache_op(struct spinand_device *spinand,
213 const struct nand_page_io_req *req)
215 struct nand_device *nand = spinand_to_nand(spinand);
216 struct mtd_info *mtd = nanddev_to_mtd(nand);
217 struct spi_mem_dirmap_desc *rdesc;
218 unsigned int nbytes = 0;
219 void *buf = NULL;
220 u16 column = 0;
221 ssize_t ret;
223 if (req->datalen) {
224 buf = spinand->databuf;
225 nbytes = nanddev_page_size(nand);
226 column = 0;
229 if (req->ooblen) {
230 nbytes += nanddev_per_page_oobsize(nand);
231 if (!buf) {
232 buf = spinand->oobbuf;
233 column = nanddev_page_size(nand);
237 rdesc = spinand->dirmaps[req->pos.plane].rdesc;
239 while (nbytes) {
240 ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
241 if (ret < 0)
242 return ret;
244 if (!ret || ret > nbytes)
245 return -EIO;
247 nbytes -= ret;
248 column += ret;
249 buf += ret;
252 if (req->datalen)
253 memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
254 req->datalen);
256 if (req->ooblen) {
257 if (req->mode == MTD_OPS_AUTO_OOB)
258 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
259 spinand->oobbuf,
260 req->ooboffs,
261 req->ooblen);
262 else
263 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
264 req->ooblen);
267 return 0;
270 static int spinand_write_to_cache_op(struct spinand_device *spinand,
271 const struct nand_page_io_req *req)
273 struct nand_device *nand = spinand_to_nand(spinand);
274 struct mtd_info *mtd = nanddev_to_mtd(nand);
275 struct spi_mem_dirmap_desc *wdesc;
276 unsigned int nbytes, column = 0;
277 void *buf = spinand->databuf;
278 ssize_t ret;
281 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
282 * the cache content to 0xFF (depends on vendor implementation), so we
283 * must fill the page cache entirely even if we only want to program
284 * the data portion of the page, otherwise we might corrupt the BBM or
285 * user data previously programmed in OOB area.
287 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
288 memset(spinand->databuf, 0xff, nbytes);
290 if (req->datalen)
291 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
292 req->datalen);
294 if (req->ooblen) {
295 if (req->mode == MTD_OPS_AUTO_OOB)
296 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
297 spinand->oobbuf,
298 req->ooboffs,
299 req->ooblen);
300 else
301 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
302 req->ooblen);
305 wdesc = spinand->dirmaps[req->pos.plane].wdesc;
307 while (nbytes) {
308 ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
309 if (ret < 0)
310 return ret;
312 if (!ret || ret > nbytes)
313 return -EIO;
315 nbytes -= ret;
316 column += ret;
317 buf += ret;
320 return 0;
323 static int spinand_program_op(struct spinand_device *spinand,
324 const struct nand_page_io_req *req)
326 struct nand_device *nand = spinand_to_nand(spinand);
327 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
328 struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
330 return spi_mem_exec_op(spinand->spimem, &op);
333 static int spinand_erase_op(struct spinand_device *spinand,
334 const struct nand_pos *pos)
336 struct nand_device *nand = spinand_to_nand(spinand);
337 unsigned int row = nanddev_pos_to_row(nand, pos);
338 struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
340 return spi_mem_exec_op(spinand->spimem, &op);
343 static int spinand_wait(struct spinand_device *spinand, u8 *s)
345 unsigned long timeo = jiffies + msecs_to_jiffies(400);
346 u8 status;
347 int ret;
349 do {
350 ret = spinand_read_status(spinand, &status);
351 if (ret)
352 return ret;
354 if (!(status & STATUS_BUSY))
355 goto out;
356 } while (time_before(jiffies, timeo));
359 * Extra read, just in case the STATUS_READY bit has changed
360 * since our last check
362 ret = spinand_read_status(spinand, &status);
363 if (ret)
364 return ret;
366 out:
367 if (s)
368 *s = status;
370 return status & STATUS_BUSY ? -ETIMEDOUT : 0;
373 static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
375 struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
376 SPINAND_MAX_ID_LEN);
377 int ret;
379 ret = spi_mem_exec_op(spinand->spimem, &op);
380 if (!ret)
381 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
383 return ret;
386 static int spinand_reset_op(struct spinand_device *spinand)
388 struct spi_mem_op op = SPINAND_RESET_OP;
389 int ret;
391 ret = spi_mem_exec_op(spinand->spimem, &op);
392 if (ret)
393 return ret;
395 return spinand_wait(spinand, NULL);
398 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
400 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
403 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
405 struct nand_device *nand = spinand_to_nand(spinand);
407 if (spinand->eccinfo.get_status)
408 return spinand->eccinfo.get_status(spinand, status);
410 switch (status & STATUS_ECC_MASK) {
411 case STATUS_ECC_NO_BITFLIPS:
412 return 0;
414 case STATUS_ECC_HAS_BITFLIPS:
416 * We have no way to know exactly how many bitflips have been
417 * fixed, so let's return the maximum possible value so that
418 * wear-leveling layers move the data immediately.
420 return nand->eccreq.strength;
422 case STATUS_ECC_UNCOR_ERROR:
423 return -EBADMSG;
425 default:
426 break;
429 return -EINVAL;
432 static int spinand_read_page(struct spinand_device *spinand,
433 const struct nand_page_io_req *req,
434 bool ecc_enabled)
436 u8 status;
437 int ret;
439 ret = spinand_load_page_op(spinand, req);
440 if (ret)
441 return ret;
443 ret = spinand_wait(spinand, &status);
444 if (ret < 0)
445 return ret;
447 ret = spinand_read_from_cache_op(spinand, req);
448 if (ret)
449 return ret;
451 if (!ecc_enabled)
452 return 0;
454 return spinand_check_ecc_status(spinand, status);
457 static int spinand_write_page(struct spinand_device *spinand,
458 const struct nand_page_io_req *req)
460 u8 status;
461 int ret;
463 ret = spinand_write_enable_op(spinand);
464 if (ret)
465 return ret;
467 ret = spinand_write_to_cache_op(spinand, req);
468 if (ret)
469 return ret;
471 ret = spinand_program_op(spinand, req);
472 if (ret)
473 return ret;
475 ret = spinand_wait(spinand, &status);
476 if (!ret && (status & STATUS_PROG_FAILED))
477 ret = -EIO;
479 return ret;
482 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
483 struct mtd_oob_ops *ops)
485 struct spinand_device *spinand = mtd_to_spinand(mtd);
486 struct nand_device *nand = mtd_to_nanddev(mtd);
487 unsigned int max_bitflips = 0;
488 struct nand_io_iter iter;
489 bool enable_ecc = false;
490 bool ecc_failed = false;
491 int ret = 0;
493 if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
494 enable_ecc = true;
496 mutex_lock(&spinand->lock);
498 nanddev_io_for_each_page(nand, from, ops, &iter) {
499 ret = spinand_select_target(spinand, iter.req.pos.target);
500 if (ret)
501 break;
503 ret = spinand_ecc_enable(spinand, enable_ecc);
504 if (ret)
505 break;
507 ret = spinand_read_page(spinand, &iter.req, enable_ecc);
508 if (ret < 0 && ret != -EBADMSG)
509 break;
511 if (ret == -EBADMSG) {
512 ecc_failed = true;
513 mtd->ecc_stats.failed++;
514 } else {
515 mtd->ecc_stats.corrected += ret;
516 max_bitflips = max_t(unsigned int, max_bitflips, ret);
519 ret = 0;
520 ops->retlen += iter.req.datalen;
521 ops->oobretlen += iter.req.ooblen;
524 mutex_unlock(&spinand->lock);
526 if (ecc_failed && !ret)
527 ret = -EBADMSG;
529 return ret ? ret : max_bitflips;
532 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
533 struct mtd_oob_ops *ops)
535 struct spinand_device *spinand = mtd_to_spinand(mtd);
536 struct nand_device *nand = mtd_to_nanddev(mtd);
537 struct nand_io_iter iter;
538 bool enable_ecc = false;
539 int ret = 0;
541 if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
542 enable_ecc = true;
544 mutex_lock(&spinand->lock);
546 nanddev_io_for_each_page(nand, to, ops, &iter) {
547 ret = spinand_select_target(spinand, iter.req.pos.target);
548 if (ret)
549 break;
551 ret = spinand_ecc_enable(spinand, enable_ecc);
552 if (ret)
553 break;
555 ret = spinand_write_page(spinand, &iter.req);
556 if (ret)
557 break;
559 ops->retlen += iter.req.datalen;
560 ops->oobretlen += iter.req.ooblen;
563 mutex_unlock(&spinand->lock);
565 return ret;
568 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
570 struct spinand_device *spinand = nand_to_spinand(nand);
571 struct nand_page_io_req req = {
572 .pos = *pos,
573 .ooblen = 2,
574 .ooboffs = 0,
575 .oobbuf.in = spinand->oobbuf,
576 .mode = MTD_OPS_RAW,
579 memset(spinand->oobbuf, 0, 2);
580 spinand_select_target(spinand, pos->target);
581 spinand_read_page(spinand, &req, false);
582 if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
583 return true;
585 return false;
588 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
590 struct nand_device *nand = mtd_to_nanddev(mtd);
591 struct spinand_device *spinand = nand_to_spinand(nand);
592 struct nand_pos pos;
593 int ret;
595 nanddev_offs_to_pos(nand, offs, &pos);
596 mutex_lock(&spinand->lock);
597 ret = nanddev_isbad(nand, &pos);
598 mutex_unlock(&spinand->lock);
600 return ret;
603 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
605 struct spinand_device *spinand = nand_to_spinand(nand);
606 struct nand_page_io_req req = {
607 .pos = *pos,
608 .ooboffs = 0,
609 .ooblen = 2,
610 .oobbuf.out = spinand->oobbuf,
612 int ret;
614 /* Erase block before marking it bad. */
615 ret = spinand_select_target(spinand, pos->target);
616 if (ret)
617 return ret;
619 ret = spinand_write_enable_op(spinand);
620 if (ret)
621 return ret;
623 spinand_erase_op(spinand, pos);
625 memset(spinand->oobbuf, 0, 2);
626 return spinand_write_page(spinand, &req);
629 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
631 struct nand_device *nand = mtd_to_nanddev(mtd);
632 struct spinand_device *spinand = nand_to_spinand(nand);
633 struct nand_pos pos;
634 int ret;
636 nanddev_offs_to_pos(nand, offs, &pos);
637 mutex_lock(&spinand->lock);
638 ret = nanddev_markbad(nand, &pos);
639 mutex_unlock(&spinand->lock);
641 return ret;
644 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
646 struct spinand_device *spinand = nand_to_spinand(nand);
647 u8 status;
648 int ret;
650 ret = spinand_select_target(spinand, pos->target);
651 if (ret)
652 return ret;
654 ret = spinand_write_enable_op(spinand);
655 if (ret)
656 return ret;
658 ret = spinand_erase_op(spinand, pos);
659 if (ret)
660 return ret;
662 ret = spinand_wait(spinand, &status);
663 if (!ret && (status & STATUS_ERASE_FAILED))
664 ret = -EIO;
666 return ret;
669 static int spinand_mtd_erase(struct mtd_info *mtd,
670 struct erase_info *einfo)
672 struct spinand_device *spinand = mtd_to_spinand(mtd);
673 int ret;
675 mutex_lock(&spinand->lock);
676 ret = nanddev_mtd_erase(mtd, einfo);
677 mutex_unlock(&spinand->lock);
679 return ret;
682 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
684 struct spinand_device *spinand = mtd_to_spinand(mtd);
685 struct nand_device *nand = mtd_to_nanddev(mtd);
686 struct nand_pos pos;
687 int ret;
689 nanddev_offs_to_pos(nand, offs, &pos);
690 mutex_lock(&spinand->lock);
691 ret = nanddev_isreserved(nand, &pos);
692 mutex_unlock(&spinand->lock);
694 return ret;
697 static int spinand_create_dirmap(struct spinand_device *spinand,
698 unsigned int plane)
700 struct nand_device *nand = spinand_to_nand(spinand);
701 struct spi_mem_dirmap_info info = {
702 .length = nanddev_page_size(nand) +
703 nanddev_per_page_oobsize(nand),
705 struct spi_mem_dirmap_desc *desc;
707 /* The plane number is passed in MSB just above the column address */
708 info.offset = plane << fls(nand->memorg.pagesize);
710 info.op_tmpl = *spinand->op_templates.update_cache;
711 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
712 spinand->spimem, &info);
713 if (IS_ERR(desc))
714 return PTR_ERR(desc);
716 spinand->dirmaps[plane].wdesc = desc;
718 info.op_tmpl = *spinand->op_templates.read_cache;
719 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
720 spinand->spimem, &info);
721 if (IS_ERR(desc))
722 return PTR_ERR(desc);
724 spinand->dirmaps[plane].rdesc = desc;
726 return 0;
729 static int spinand_create_dirmaps(struct spinand_device *spinand)
731 struct nand_device *nand = spinand_to_nand(spinand);
732 int i, ret;
734 spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
735 sizeof(*spinand->dirmaps) *
736 nand->memorg.planes_per_lun,
737 GFP_KERNEL);
738 if (!spinand->dirmaps)
739 return -ENOMEM;
741 for (i = 0; i < nand->memorg.planes_per_lun; i++) {
742 ret = spinand_create_dirmap(spinand, i);
743 if (ret)
744 return ret;
747 return 0;
750 static const struct nand_ops spinand_ops = {
751 .erase = spinand_erase,
752 .markbad = spinand_markbad,
753 .isbad = spinand_isbad,
756 static const struct spinand_manufacturer *spinand_manufacturers[] = {
757 &gigadevice_spinand_manufacturer,
758 &macronix_spinand_manufacturer,
759 &micron_spinand_manufacturer,
760 &paragon_spinand_manufacturer,
761 &toshiba_spinand_manufacturer,
762 &winbond_spinand_manufacturer,
765 static int spinand_manufacturer_detect(struct spinand_device *spinand)
767 unsigned int i;
768 int ret;
770 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
771 ret = spinand_manufacturers[i]->ops->detect(spinand);
772 if (ret > 0) {
773 spinand->manufacturer = spinand_manufacturers[i];
774 return 0;
775 } else if (ret < 0) {
776 return ret;
780 return -ENOTSUPP;
783 static int spinand_manufacturer_init(struct spinand_device *spinand)
785 if (spinand->manufacturer->ops->init)
786 return spinand->manufacturer->ops->init(spinand);
788 return 0;
791 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
793 /* Release manufacturer private data */
794 if (spinand->manufacturer->ops->cleanup)
795 return spinand->manufacturer->ops->cleanup(spinand);
798 static const struct spi_mem_op *
799 spinand_select_op_variant(struct spinand_device *spinand,
800 const struct spinand_op_variants *variants)
802 struct nand_device *nand = spinand_to_nand(spinand);
803 unsigned int i;
805 for (i = 0; i < variants->nops; i++) {
806 struct spi_mem_op op = variants->ops[i];
807 unsigned int nbytes;
808 int ret;
810 nbytes = nanddev_per_page_oobsize(nand) +
811 nanddev_page_size(nand);
813 while (nbytes) {
814 op.data.nbytes = nbytes;
815 ret = spi_mem_adjust_op_size(spinand->spimem, &op);
816 if (ret)
817 break;
819 if (!spi_mem_supports_op(spinand->spimem, &op))
820 break;
822 nbytes -= op.data.nbytes;
825 if (!nbytes)
826 return &variants->ops[i];
829 return NULL;
833 * spinand_match_and_init() - Try to find a match between a device ID and an
834 * entry in a spinand_info table
835 * @spinand: SPI NAND object
836 * @table: SPI NAND device description table
837 * @table_size: size of the device description table
839 * Should be used by SPI NAND manufacturer drivers when they want to find a
840 * match between a device ID retrieved through the READ_ID command and an
841 * entry in the SPI NAND description table. If a match is found, the spinand
842 * object will be initialized with information provided by the matching
843 * spinand_info entry.
845 * Return: 0 on success, a negative error code otherwise.
847 int spinand_match_and_init(struct spinand_device *spinand,
848 const struct spinand_info *table,
849 unsigned int table_size, u16 devid)
851 struct nand_device *nand = spinand_to_nand(spinand);
852 unsigned int i;
854 for (i = 0; i < table_size; i++) {
855 const struct spinand_info *info = &table[i];
856 const struct spi_mem_op *op;
858 if (devid != info->devid)
859 continue;
861 nand->memorg = table[i].memorg;
862 nand->eccreq = table[i].eccreq;
863 spinand->eccinfo = table[i].eccinfo;
864 spinand->flags = table[i].flags;
865 spinand->select_target = table[i].select_target;
867 op = spinand_select_op_variant(spinand,
868 info->op_variants.read_cache);
869 if (!op)
870 return -ENOTSUPP;
872 spinand->op_templates.read_cache = op;
874 op = spinand_select_op_variant(spinand,
875 info->op_variants.write_cache);
876 if (!op)
877 return -ENOTSUPP;
879 spinand->op_templates.write_cache = op;
881 op = spinand_select_op_variant(spinand,
882 info->op_variants.update_cache);
883 spinand->op_templates.update_cache = op;
885 return 0;
888 return -ENOTSUPP;
891 static int spinand_detect(struct spinand_device *spinand)
893 struct device *dev = &spinand->spimem->spi->dev;
894 struct nand_device *nand = spinand_to_nand(spinand);
895 int ret;
897 ret = spinand_reset_op(spinand);
898 if (ret)
899 return ret;
901 ret = spinand_read_id_op(spinand, spinand->id.data);
902 if (ret)
903 return ret;
905 spinand->id.len = SPINAND_MAX_ID_LEN;
907 ret = spinand_manufacturer_detect(spinand);
908 if (ret) {
909 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
910 spinand->id.data);
911 return ret;
914 if (nand->memorg.ntargets > 1 && !spinand->select_target) {
915 dev_err(dev,
916 "SPI NANDs with more than one die must implement ->select_target()\n");
917 return -EINVAL;
920 dev_info(&spinand->spimem->spi->dev,
921 "%s SPI NAND was found.\n", spinand->manufacturer->name);
922 dev_info(&spinand->spimem->spi->dev,
923 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
924 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
925 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
927 return 0;
930 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
931 struct mtd_oob_region *region)
933 return -ERANGE;
936 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
937 struct mtd_oob_region *region)
939 if (section)
940 return -ERANGE;
942 /* Reserve 2 bytes for the BBM. */
943 region->offset = 2;
944 region->length = 62;
946 return 0;
949 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
950 .ecc = spinand_noecc_ooblayout_ecc,
951 .free = spinand_noecc_ooblayout_free,
954 static int spinand_init(struct spinand_device *spinand)
956 struct device *dev = &spinand->spimem->spi->dev;
957 struct mtd_info *mtd = spinand_to_mtd(spinand);
958 struct nand_device *nand = mtd_to_nanddev(mtd);
959 int ret, i;
962 * We need a scratch buffer because the spi_mem interface requires that
963 * buf passed in spi_mem_op->data.buf be DMA-able.
965 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
966 if (!spinand->scratchbuf)
967 return -ENOMEM;
969 ret = spinand_detect(spinand);
970 if (ret)
971 goto err_free_bufs;
974 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
975 * may use this buffer for DMA access.
976 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
978 spinand->databuf = kzalloc(nanddev_page_size(nand) +
979 nanddev_per_page_oobsize(nand),
980 GFP_KERNEL);
981 if (!spinand->databuf) {
982 ret = -ENOMEM;
983 goto err_free_bufs;
986 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
988 ret = spinand_init_cfg_cache(spinand);
989 if (ret)
990 goto err_free_bufs;
992 ret = spinand_init_quad_enable(spinand);
993 if (ret)
994 goto err_free_bufs;
996 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
997 if (ret)
998 goto err_free_bufs;
1000 ret = spinand_manufacturer_init(spinand);
1001 if (ret) {
1002 dev_err(dev,
1003 "Failed to initialize the SPI NAND chip (err = %d)\n",
1004 ret);
1005 goto err_free_bufs;
1008 ret = spinand_create_dirmaps(spinand);
1009 if (ret) {
1010 dev_err(dev,
1011 "Failed to create direct mappings for read/write operations (err = %d)\n",
1012 ret);
1013 goto err_manuf_cleanup;
1016 /* After power up, all blocks are locked, so unlock them here. */
1017 for (i = 0; i < nand->memorg.ntargets; i++) {
1018 ret = spinand_select_target(spinand, i);
1019 if (ret)
1020 goto err_manuf_cleanup;
1022 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1023 if (ret)
1024 goto err_manuf_cleanup;
1027 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1028 if (ret)
1029 goto err_manuf_cleanup;
1032 * Right now, we don't support ECC, so let the whole oob
1033 * area is available for user.
1035 mtd->_read_oob = spinand_mtd_read;
1036 mtd->_write_oob = spinand_mtd_write;
1037 mtd->_block_isbad = spinand_mtd_block_isbad;
1038 mtd->_block_markbad = spinand_mtd_block_markbad;
1039 mtd->_block_isreserved = spinand_mtd_block_isreserved;
1040 mtd->_erase = spinand_mtd_erase;
1041 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1043 if (spinand->eccinfo.ooblayout)
1044 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
1045 else
1046 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
1048 ret = mtd_ooblayout_count_freebytes(mtd);
1049 if (ret < 0)
1050 goto err_cleanup_nanddev;
1052 mtd->oobavail = ret;
1054 return 0;
1056 err_cleanup_nanddev:
1057 nanddev_cleanup(nand);
1059 err_manuf_cleanup:
1060 spinand_manufacturer_cleanup(spinand);
1062 err_free_bufs:
1063 kfree(spinand->databuf);
1064 kfree(spinand->scratchbuf);
1065 return ret;
1068 static void spinand_cleanup(struct spinand_device *spinand)
1070 struct nand_device *nand = spinand_to_nand(spinand);
1072 nanddev_cleanup(nand);
1073 spinand_manufacturer_cleanup(spinand);
1074 kfree(spinand->databuf);
1075 kfree(spinand->scratchbuf);
1078 static int spinand_probe(struct spi_mem *mem)
1080 struct spinand_device *spinand;
1081 struct mtd_info *mtd;
1082 int ret;
1084 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1085 GFP_KERNEL);
1086 if (!spinand)
1087 return -ENOMEM;
1089 spinand->spimem = mem;
1090 spi_mem_set_drvdata(mem, spinand);
1091 spinand_set_of_node(spinand, mem->spi->dev.of_node);
1092 mutex_init(&spinand->lock);
1093 mtd = spinand_to_mtd(spinand);
1094 mtd->dev.parent = &mem->spi->dev;
1096 ret = spinand_init(spinand);
1097 if (ret)
1098 return ret;
1100 ret = mtd_device_register(mtd, NULL, 0);
1101 if (ret)
1102 goto err_spinand_cleanup;
1104 return 0;
1106 err_spinand_cleanup:
1107 spinand_cleanup(spinand);
1109 return ret;
1112 static int spinand_remove(struct spi_mem *mem)
1114 struct spinand_device *spinand;
1115 struct mtd_info *mtd;
1116 int ret;
1118 spinand = spi_mem_get_drvdata(mem);
1119 mtd = spinand_to_mtd(spinand);
1121 ret = mtd_device_unregister(mtd);
1122 if (ret)
1123 return ret;
1125 spinand_cleanup(spinand);
1127 return 0;
1130 static const struct spi_device_id spinand_ids[] = {
1131 { .name = "spi-nand" },
1132 { /* sentinel */ },
1135 #ifdef CONFIG_OF
1136 static const struct of_device_id spinand_of_ids[] = {
1137 { .compatible = "spi-nand" },
1138 { /* sentinel */ },
1140 #endif
1142 static struct spi_mem_driver spinand_drv = {
1143 .spidrv = {
1144 .id_table = spinand_ids,
1145 .driver = {
1146 .name = "spi-nand",
1147 .of_match_table = of_match_ptr(spinand_of_ids),
1150 .probe = spinand_probe,
1151 .remove = spinand_remove,
1153 module_spi_mem_driver(spinand_drv);
1155 MODULE_DESCRIPTION("SPI NAND framework");
1156 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1157 MODULE_LICENSE("GPL v2");