1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2017 Free Electrons
6 * Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Peter Pan <peterpandong@micron.com>
10 #define pr_fmt(fmt) "nand: " fmt
12 #include <linux/module.h>
13 #include <linux/mtd/nand.h>
16 * nanddev_isbad() - Check if a block is bad
18 * @pos: position pointing to the block we want to check
20 * Return: true if the block is bad, false otherwise.
22 bool nanddev_isbad(struct nand_device
*nand
, const struct nand_pos
*pos
)
24 if (mtd_check_expert_analysis_mode())
27 if (nanddev_bbt_is_initialized(nand
)) {
31 entry
= nanddev_bbt_pos_to_entry(nand
, pos
);
32 status
= nanddev_bbt_get_block_status(nand
, entry
);
33 /* Lazy block status retrieval */
34 if (status
== NAND_BBT_BLOCK_STATUS_UNKNOWN
) {
35 if (nand
->ops
->isbad(nand
, pos
))
36 status
= NAND_BBT_BLOCK_FACTORY_BAD
;
38 status
= NAND_BBT_BLOCK_GOOD
;
40 nanddev_bbt_set_block_status(nand
, entry
, status
);
43 if (status
== NAND_BBT_BLOCK_WORN
||
44 status
== NAND_BBT_BLOCK_FACTORY_BAD
)
50 return nand
->ops
->isbad(nand
, pos
);
52 EXPORT_SYMBOL_GPL(nanddev_isbad
);
55 * nanddev_markbad() - Mark a block as bad
57 * @pos: position of the block to mark bad
59 * Mark a block bad. This function is updating the BBT if available and
60 * calls the low-level markbad hook (nand->ops->markbad()).
62 * Return: 0 in case of success, a negative error code otherwise.
64 int nanddev_markbad(struct nand_device
*nand
, const struct nand_pos
*pos
)
66 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
70 if (nanddev_isbad(nand
, pos
))
73 ret
= nand
->ops
->markbad(nand
, pos
);
75 pr_warn("failed to write BBM to block @%llx (err = %d)\n",
76 nanddev_pos_to_offs(nand
, pos
), ret
);
78 if (!nanddev_bbt_is_initialized(nand
))
81 entry
= nanddev_bbt_pos_to_entry(nand
, pos
);
82 ret
= nanddev_bbt_set_block_status(nand
, entry
, NAND_BBT_BLOCK_WORN
);
86 ret
= nanddev_bbt_update(nand
);
90 mtd
->ecc_stats
.badblocks
++;
94 EXPORT_SYMBOL_GPL(nanddev_markbad
);
97 * nanddev_isreserved() - Check whether an eraseblock is reserved or not
99 * @pos: NAND position to test
101 * Checks whether the eraseblock pointed by @pos is reserved or not.
103 * Return: true if the eraseblock is reserved, false otherwise.
105 bool nanddev_isreserved(struct nand_device
*nand
, const struct nand_pos
*pos
)
110 if (!nanddev_bbt_is_initialized(nand
))
113 /* Return info from the table */
114 entry
= nanddev_bbt_pos_to_entry(nand
, pos
);
115 status
= nanddev_bbt_get_block_status(nand
, entry
);
116 return status
== NAND_BBT_BLOCK_RESERVED
;
118 EXPORT_SYMBOL_GPL(nanddev_isreserved
);
121 * nanddev_erase() - Erase a NAND portion
123 * @pos: position of the block to erase
125 * Erases the block if it's not bad.
127 * Return: 0 in case of success, a negative error code otherwise.
129 static int nanddev_erase(struct nand_device
*nand
, const struct nand_pos
*pos
)
131 if (nanddev_isbad(nand
, pos
) || nanddev_isreserved(nand
, pos
)) {
132 pr_warn("attempt to erase a bad/reserved block @%llx\n",
133 nanddev_pos_to_offs(nand
, pos
));
137 return nand
->ops
->erase(nand
, pos
);
141 * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
143 * @einfo: erase request
145 * This is a simple mtd->_erase() implementation iterating over all blocks
146 * concerned by @einfo and calling nand->ops->erase() on each of them.
148 * Note that mtd->_erase should not be directly assigned to this helper,
149 * because there's no locking here. NAND specialized layers should instead
150 * implement there own wrapper around nanddev_mtd_erase() taking the
151 * appropriate lock before calling nanddev_mtd_erase().
153 * Return: 0 in case of success, a negative error code otherwise.
155 int nanddev_mtd_erase(struct mtd_info
*mtd
, struct erase_info
*einfo
)
157 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
158 struct nand_pos pos
, last
;
161 nanddev_offs_to_pos(nand
, einfo
->addr
, &pos
);
162 nanddev_offs_to_pos(nand
, einfo
->addr
+ einfo
->len
- 1, &last
);
163 while (nanddev_pos_cmp(&pos
, &last
) <= 0) {
164 ret
= nanddev_erase(nand
, &pos
);
166 einfo
->fail_addr
= nanddev_pos_to_offs(nand
, &pos
);
171 nanddev_pos_next_eraseblock(nand
, &pos
);
176 EXPORT_SYMBOL_GPL(nanddev_mtd_erase
);
179 * nanddev_mtd_max_bad_blocks() - Get the maximum number of bad eraseblock on
180 * a specific region of the NAND device
182 * @offs: offset of the NAND region
183 * @len: length of the NAND region
185 * Default implementation for mtd->_max_bad_blocks(). Only works if
186 * nand->memorg.max_bad_eraseblocks_per_lun is > 0.
188 * Return: a positive number encoding the maximum number of eraseblocks on a
189 * portion of memory, a negative error code otherwise.
191 int nanddev_mtd_max_bad_blocks(struct mtd_info
*mtd
, loff_t offs
, size_t len
)
193 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
194 struct nand_pos pos
, end
;
195 unsigned int max_bb
= 0;
197 if (!nand
->memorg
.max_bad_eraseblocks_per_lun
)
200 nanddev_offs_to_pos(nand
, offs
, &pos
);
201 nanddev_offs_to_pos(nand
, offs
+ len
, &end
);
203 for (nanddev_offs_to_pos(nand
, offs
, &pos
);
204 nanddev_pos_cmp(&pos
, &end
) < 0;
205 nanddev_pos_next_lun(nand
, &pos
))
206 max_bb
+= nand
->memorg
.max_bad_eraseblocks_per_lun
;
210 EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks
);
213 * nanddev_get_ecc_engine() - Find and get a suitable ECC engine
216 static int nanddev_get_ecc_engine(struct nand_device
*nand
)
220 /* Read the user desires in terms of ECC engine/configuration */
221 of_get_nand_ecc_user_config(nand
);
223 engine_type
= nand
->ecc
.user_conf
.engine_type
;
224 if (engine_type
== NAND_ECC_ENGINE_TYPE_INVALID
)
225 engine_type
= nand
->ecc
.defaults
.engine_type
;
227 switch (engine_type
) {
228 case NAND_ECC_ENGINE_TYPE_NONE
:
230 case NAND_ECC_ENGINE_TYPE_SOFT
:
231 nand
->ecc
.engine
= nand_ecc_get_sw_engine(nand
);
233 case NAND_ECC_ENGINE_TYPE_ON_DIE
:
234 nand
->ecc
.engine
= nand_ecc_get_on_die_hw_engine(nand
);
236 case NAND_ECC_ENGINE_TYPE_ON_HOST
:
237 nand
->ecc
.engine
= nand_ecc_get_on_host_hw_engine(nand
);
238 if (PTR_ERR(nand
->ecc
.engine
) == -EPROBE_DEFER
)
239 return -EPROBE_DEFER
;
242 pr_err("Missing ECC engine type\n");
245 if (!nand
->ecc
.engine
)
252 * nanddev_put_ecc_engine() - Dettach and put the in-use ECC engine
255 static int nanddev_put_ecc_engine(struct nand_device
*nand
)
257 switch (nand
->ecc
.ctx
.conf
.engine_type
) {
258 case NAND_ECC_ENGINE_TYPE_ON_HOST
:
259 nand_ecc_put_on_host_hw_engine(nand
);
261 case NAND_ECC_ENGINE_TYPE_NONE
:
262 case NAND_ECC_ENGINE_TYPE_SOFT
:
263 case NAND_ECC_ENGINE_TYPE_ON_DIE
:
272 * nanddev_find_ecc_configuration() - Find a suitable ECC configuration
275 static int nanddev_find_ecc_configuration(struct nand_device
*nand
)
279 if (!nand
->ecc
.engine
)
282 ret
= nand_ecc_init_ctx(nand
);
286 if (!nand_ecc_is_strong_enough(nand
))
287 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
294 * nanddev_ecc_engine_init() - Initialize an ECC engine for the chip
297 int nanddev_ecc_engine_init(struct nand_device
*nand
)
301 /* Look for the ECC engine to use */
302 ret
= nanddev_get_ecc_engine(nand
);
304 if (ret
!= -EPROBE_DEFER
)
305 pr_err("No ECC engine found\n");
310 /* No ECC engine requested */
311 if (!nand
->ecc
.engine
)
314 /* Configure the engine: balance user input and chip requirements */
315 ret
= nanddev_find_ecc_configuration(nand
);
317 pr_err("No suitable ECC configuration\n");
318 nanddev_put_ecc_engine(nand
);
325 EXPORT_SYMBOL_GPL(nanddev_ecc_engine_init
);
328 * nanddev_ecc_engine_cleanup() - Cleanup ECC engine initializations
331 void nanddev_ecc_engine_cleanup(struct nand_device
*nand
)
333 if (nand
->ecc
.engine
)
334 nand_ecc_cleanup_ctx(nand
);
336 nanddev_put_ecc_engine(nand
);
338 EXPORT_SYMBOL_GPL(nanddev_ecc_engine_cleanup
);
341 * nanddev_init() - Initialize a NAND device
343 * @ops: NAND device operations
344 * @owner: NAND device owner
346 * Initializes a NAND device object. Consistency checks are done on @ops and
347 * @nand->memorg. Also takes care of initializing the BBT.
349 * Return: 0 in case of success, a negative error code otherwise.
351 int nanddev_init(struct nand_device
*nand
, const struct nand_ops
*ops
,
352 struct module
*owner
)
354 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
355 struct nand_memory_organization
*memorg
= nanddev_get_memorg(nand
);
360 if (!ops
->erase
|| !ops
->markbad
|| !ops
->isbad
)
363 if (!memorg
->bits_per_cell
|| !memorg
->pagesize
||
364 !memorg
->pages_per_eraseblock
|| !memorg
->eraseblocks_per_lun
||
365 !memorg
->planes_per_lun
|| !memorg
->luns_per_target
||
369 nand
->rowconv
.eraseblock_addr_shift
=
370 fls(memorg
->pages_per_eraseblock
- 1);
371 nand
->rowconv
.lun_addr_shift
= fls(memorg
->eraseblocks_per_lun
- 1) +
372 nand
->rowconv
.eraseblock_addr_shift
;
376 mtd
->type
= memorg
->bits_per_cell
== 1 ?
377 MTD_NANDFLASH
: MTD_MLCNANDFLASH
;
378 mtd
->flags
= MTD_CAP_NANDFLASH
;
379 mtd
->erasesize
= memorg
->pagesize
* memorg
->pages_per_eraseblock
;
380 mtd
->writesize
= memorg
->pagesize
;
381 mtd
->writebufsize
= memorg
->pagesize
;
382 mtd
->oobsize
= memorg
->oobsize
;
383 mtd
->size
= nanddev_size(nand
);
386 return nanddev_bbt_init(nand
);
388 EXPORT_SYMBOL_GPL(nanddev_init
);
391 * nanddev_cleanup() - Release resources allocated in nanddev_init()
394 * Basically undoes what has been done in nanddev_init().
396 void nanddev_cleanup(struct nand_device
*nand
)
398 if (nanddev_bbt_is_initialized(nand
))
399 nanddev_bbt_cleanup(nand
);
401 EXPORT_SYMBOL_GPL(nanddev_cleanup
);
403 MODULE_DESCRIPTION("Generic NAND framework");
404 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
405 MODULE_LICENSE("GPL v2");