Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / mtd / nand / core.c
blob5e13a03d2b32f69b9a2a149caf2a7c7fcf59e824
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2017 Free Electrons
5 * Authors:
6 * Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Peter Pan <peterpandong@micron.com>
8 */
10 #define pr_fmt(fmt) "nand: " fmt
12 #include <linux/module.h>
13 #include <linux/mtd/nand.h>
15 /**
16 * nanddev_isbad() - Check if a block is bad
17 * @nand: NAND device
18 * @pos: position pointing to the block we want to check
20 * Return: true if the block is bad, false otherwise.
22 bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
24 if (nanddev_bbt_is_initialized(nand)) {
25 unsigned int entry;
26 int status;
28 entry = nanddev_bbt_pos_to_entry(nand, pos);
29 status = nanddev_bbt_get_block_status(nand, entry);
30 /* Lazy block status retrieval */
31 if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
32 if (nand->ops->isbad(nand, pos))
33 status = NAND_BBT_BLOCK_FACTORY_BAD;
34 else
35 status = NAND_BBT_BLOCK_GOOD;
37 nanddev_bbt_set_block_status(nand, entry, status);
40 if (status == NAND_BBT_BLOCK_WORN ||
41 status == NAND_BBT_BLOCK_FACTORY_BAD)
42 return true;
44 return false;
47 return nand->ops->isbad(nand, pos);
49 EXPORT_SYMBOL_GPL(nanddev_isbad);
51 /**
52 * nanddev_markbad() - Mark a block as bad
53 * @nand: NAND device
54 * @pos: position of the block to mark bad
56 * Mark a block bad. This function is updating the BBT if available and
57 * calls the low-level markbad hook (nand->ops->markbad()).
59 * Return: 0 in case of success, a negative error code otherwise.
61 int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
63 struct mtd_info *mtd = nanddev_to_mtd(nand);
64 unsigned int entry;
65 int ret = 0;
67 if (nanddev_isbad(nand, pos))
68 return 0;
70 ret = nand->ops->markbad(nand, pos);
71 if (ret)
72 pr_warn("failed to write BBM to block @%llx (err = %d)\n",
73 nanddev_pos_to_offs(nand, pos), ret);
75 if (!nanddev_bbt_is_initialized(nand))
76 goto out;
78 entry = nanddev_bbt_pos_to_entry(nand, pos);
79 ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
80 if (ret)
81 goto out;
83 ret = nanddev_bbt_update(nand);
85 out:
86 if (!ret)
87 mtd->ecc_stats.badblocks++;
89 return ret;
91 EXPORT_SYMBOL_GPL(nanddev_markbad);
93 /**
94 * nanddev_isreserved() - Check whether an eraseblock is reserved or not
95 * @nand: NAND device
96 * @pos: NAND position to test
98 * Checks whether the eraseblock pointed by @pos is reserved or not.
100 * Return: true if the eraseblock is reserved, false otherwise.
102 bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
104 unsigned int entry;
105 int status;
107 if (!nanddev_bbt_is_initialized(nand))
108 return false;
110 /* Return info from the table */
111 entry = nanddev_bbt_pos_to_entry(nand, pos);
112 status = nanddev_bbt_get_block_status(nand, entry);
113 return status == NAND_BBT_BLOCK_RESERVED;
115 EXPORT_SYMBOL_GPL(nanddev_isreserved);
118 * nanddev_erase() - Erase a NAND portion
119 * @nand: NAND device
120 * @pos: position of the block to erase
122 * Erases the block if it's not bad.
124 * Return: 0 in case of success, a negative error code otherwise.
126 int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
128 if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
129 pr_warn("attempt to erase a bad/reserved block @%llx\n",
130 nanddev_pos_to_offs(nand, pos));
131 return -EIO;
134 return nand->ops->erase(nand, pos);
136 EXPORT_SYMBOL_GPL(nanddev_erase);
139 * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
140 * @mtd: MTD device
141 * @einfo: erase request
143 * This is a simple mtd->_erase() implementation iterating over all blocks
144 * concerned by @einfo and calling nand->ops->erase() on each of them.
146 * Note that mtd->_erase should not be directly assigned to this helper,
147 * because there's no locking here. NAND specialized layers should instead
148 * implement there own wrapper around nanddev_mtd_erase() taking the
149 * appropriate lock before calling nanddev_mtd_erase().
151 * Return: 0 in case of success, a negative error code otherwise.
153 int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
155 struct nand_device *nand = mtd_to_nanddev(mtd);
156 struct nand_pos pos, last;
157 int ret;
159 nanddev_offs_to_pos(nand, einfo->addr, &pos);
160 nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
161 while (nanddev_pos_cmp(&pos, &last) <= 0) {
162 ret = nanddev_erase(nand, &pos);
163 if (ret) {
164 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
166 return ret;
169 nanddev_pos_next_eraseblock(nand, &pos);
172 return 0;
174 EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
177 * nanddev_mtd_max_bad_blocks() - Get the maximum number of bad eraseblock on
178 * a specific region of the NAND device
179 * @mtd: MTD device
180 * @offs: offset of the NAND region
181 * @len: length of the NAND region
183 * Default implementation for mtd->_max_bad_blocks(). Only works if
184 * nand->memorg.max_bad_eraseblocks_per_lun is > 0.
186 * Return: a positive number encoding the maximum number of eraseblocks on a
187 * portion of memory, a negative error code otherwise.
189 int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
191 struct nand_device *nand = mtd_to_nanddev(mtd);
192 struct nand_pos pos, end;
193 unsigned int max_bb = 0;
195 if (!nand->memorg.max_bad_eraseblocks_per_lun)
196 return -ENOTSUPP;
198 nanddev_offs_to_pos(nand, offs, &pos);
199 nanddev_offs_to_pos(nand, offs + len, &end);
201 for (nanddev_offs_to_pos(nand, offs, &pos);
202 nanddev_pos_cmp(&pos, &end) < 0;
203 nanddev_pos_next_lun(nand, &pos))
204 max_bb += nand->memorg.max_bad_eraseblocks_per_lun;
206 return max_bb;
208 EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);
211 * nanddev_get_ecc_engine() - Find and get a suitable ECC engine
212 * @nand: NAND device
214 static int nanddev_get_ecc_engine(struct nand_device *nand)
216 int engine_type;
218 /* Read the user desires in terms of ECC engine/configuration */
219 of_get_nand_ecc_user_config(nand);
221 engine_type = nand->ecc.user_conf.engine_type;
222 if (engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
223 engine_type = nand->ecc.defaults.engine_type;
225 switch (engine_type) {
226 case NAND_ECC_ENGINE_TYPE_NONE:
227 return 0;
228 case NAND_ECC_ENGINE_TYPE_SOFT:
229 nand->ecc.engine = nand_ecc_get_sw_engine(nand);
230 break;
231 case NAND_ECC_ENGINE_TYPE_ON_DIE:
232 nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand);
233 break;
234 case NAND_ECC_ENGINE_TYPE_ON_HOST:
235 pr_err("On-host hardware ECC engines not supported yet\n");
236 break;
237 default:
238 pr_err("Missing ECC engine type\n");
241 if (!nand->ecc.engine)
242 return -EINVAL;
244 return 0;
248 * nanddev_put_ecc_engine() - Dettach and put the in-use ECC engine
249 * @nand: NAND device
251 static int nanddev_put_ecc_engine(struct nand_device *nand)
253 switch (nand->ecc.ctx.conf.engine_type) {
254 case NAND_ECC_ENGINE_TYPE_ON_HOST:
255 pr_err("On-host hardware ECC engines not supported yet\n");
256 break;
257 case NAND_ECC_ENGINE_TYPE_NONE:
258 case NAND_ECC_ENGINE_TYPE_SOFT:
259 case NAND_ECC_ENGINE_TYPE_ON_DIE:
260 default:
261 break;
264 return 0;
268 * nanddev_find_ecc_configuration() - Find a suitable ECC configuration
269 * @nand: NAND device
271 static int nanddev_find_ecc_configuration(struct nand_device *nand)
273 int ret;
275 if (!nand->ecc.engine)
276 return -ENOTSUPP;
278 ret = nand_ecc_init_ctx(nand);
279 if (ret)
280 return ret;
282 if (!nand_ecc_is_strong_enough(nand))
283 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
284 nand->mtd.name);
286 return 0;
290 * nanddev_ecc_engine_init() - Initialize an ECC engine for the chip
291 * @nand: NAND device
293 int nanddev_ecc_engine_init(struct nand_device *nand)
295 int ret;
297 /* Look for the ECC engine to use */
298 ret = nanddev_get_ecc_engine(nand);
299 if (ret) {
300 pr_err("No ECC engine found\n");
301 return ret;
304 /* No ECC engine requested */
305 if (!nand->ecc.engine)
306 return 0;
308 /* Configure the engine: balance user input and chip requirements */
309 ret = nanddev_find_ecc_configuration(nand);
310 if (ret) {
311 pr_err("No suitable ECC configuration\n");
312 nanddev_put_ecc_engine(nand);
314 return ret;
317 return 0;
319 EXPORT_SYMBOL_GPL(nanddev_ecc_engine_init);
322 * nanddev_ecc_engine_cleanup() - Cleanup ECC engine initializations
323 * @nand: NAND device
325 void nanddev_ecc_engine_cleanup(struct nand_device *nand)
327 if (nand->ecc.engine)
328 nand_ecc_cleanup_ctx(nand);
330 nanddev_put_ecc_engine(nand);
332 EXPORT_SYMBOL_GPL(nanddev_ecc_engine_cleanup);
335 * nanddev_init() - Initialize a NAND device
336 * @nand: NAND device
337 * @ops: NAND device operations
338 * @owner: NAND device owner
340 * Initializes a NAND device object. Consistency checks are done on @ops and
341 * @nand->memorg. Also takes care of initializing the BBT.
343 * Return: 0 in case of success, a negative error code otherwise.
345 int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
346 struct module *owner)
348 struct mtd_info *mtd = nanddev_to_mtd(nand);
349 struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
351 if (!nand || !ops)
352 return -EINVAL;
354 if (!ops->erase || !ops->markbad || !ops->isbad)
355 return -EINVAL;
357 if (!memorg->bits_per_cell || !memorg->pagesize ||
358 !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
359 !memorg->planes_per_lun || !memorg->luns_per_target ||
360 !memorg->ntargets)
361 return -EINVAL;
363 nand->rowconv.eraseblock_addr_shift =
364 fls(memorg->pages_per_eraseblock - 1);
365 nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
366 nand->rowconv.eraseblock_addr_shift;
368 nand->ops = ops;
370 mtd->type = memorg->bits_per_cell == 1 ?
371 MTD_NANDFLASH : MTD_MLCNANDFLASH;
372 mtd->flags = MTD_CAP_NANDFLASH;
373 mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
374 mtd->writesize = memorg->pagesize;
375 mtd->writebufsize = memorg->pagesize;
376 mtd->oobsize = memorg->oobsize;
377 mtd->size = nanddev_size(nand);
378 mtd->owner = owner;
380 return nanddev_bbt_init(nand);
382 EXPORT_SYMBOL_GPL(nanddev_init);
385 * nanddev_cleanup() - Release resources allocated in nanddev_init()
386 * @nand: NAND device
388 * Basically undoes what has been done in nanddev_init().
390 void nanddev_cleanup(struct nand_device *nand)
392 if (nanddev_bbt_is_initialized(nand))
393 nanddev_bbt_cleanup(nand);
395 EXPORT_SYMBOL_GPL(nanddev_cleanup);
397 MODULE_DESCRIPTION("Generic NAND framework");
398 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
399 MODULE_LICENSE("GPL v2");